pre-release commit

This commit is contained in:
koalazf.99
2025-07-22 19:59:07 +08:00
parent c5dee4bb17
commit a9d6965bef
485 changed files with 111444 additions and 2 deletions

View File

@@ -0,0 +1,762 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest';
import type { Mocked } from 'vitest';
import { handleAtCommand } from './atCommandProcessor.js';
import { Config, FileDiscoveryService } from '@qwen/qwen-code-core';
import { ToolCallStatus } from '../types.js';
import { UseHistoryManagerReturn } from './useHistoryManager.js';
import * as fsPromises from 'fs/promises';
import type { Stats } from 'fs';
const mockGetToolRegistry = vi.fn();
const mockGetTargetDir = vi.fn();
const mockConfig = {
getToolRegistry: mockGetToolRegistry,
getTargetDir: mockGetTargetDir,
isSandboxed: vi.fn(() => false),
getFileService: vi.fn(),
getFileFilteringRespectGitIgnore: vi.fn(() => true),
getEnableRecursiveFileSearch: vi.fn(() => true),
} as unknown as Config;
const mockReadManyFilesExecute = vi.fn();
const mockReadManyFilesTool = {
name: 'read_many_files',
displayName: 'Read Many Files',
description: 'Reads multiple files.',
execute: mockReadManyFilesExecute,
getDescription: vi.fn((params) => `Read files: ${params.paths.join(', ')}`),
};
const mockGlobExecute = vi.fn();
const mockGlobTool = {
name: 'glob',
displayName: 'Glob Tool',
execute: mockGlobExecute,
getDescription: vi.fn(() => 'Glob tool description'),
};
const mockAddItem: Mock<UseHistoryManagerReturn['addItem']> = vi.fn();
const mockOnDebugMessage: Mock<(message: string) => void> = vi.fn();
vi.mock('fs/promises', async () => {
const actual = await vi.importActual('fs/promises');
return {
...actual,
stat: vi.fn(),
};
});
vi.mock('@qwen/qwen-code-core', async () => {
const actual = await vi.importActual('@qwen/qwen-code-core');
return {
...actual,
FileDiscoveryService: vi.fn(),
};
});
describe('handleAtCommand', () => {
let abortController: AbortController;
let mockFileDiscoveryService: Mocked<FileDiscoveryService>;
beforeEach(() => {
vi.resetAllMocks();
abortController = new AbortController();
mockGetTargetDir.mockReturnValue('/test/dir');
mockGetToolRegistry.mockReturnValue({
getTool: vi.fn((toolName: string) => {
if (toolName === 'read_many_files') return mockReadManyFilesTool;
if (toolName === 'glob') return mockGlobTool;
return undefined;
}),
});
vi.mocked(fsPromises.stat).mockResolvedValue({
isDirectory: () => false,
} as Stats);
mockReadManyFilesExecute.mockResolvedValue({
llmContent: '',
returnDisplay: '',
});
mockGlobExecute.mockResolvedValue({
llmContent: 'No files found',
returnDisplay: '',
});
// Mock FileDiscoveryService
mockFileDiscoveryService = {
initialize: vi.fn(),
shouldIgnoreFile: vi.fn(() => false),
filterFiles: vi.fn((files) => files),
getIgnoreInfo: vi.fn(() => ({ gitIgnored: [] })),
isGitRepository: vi.fn(() => true),
};
vi.mocked(FileDiscoveryService).mockImplementation(
() => mockFileDiscoveryService,
);
// Mock getFileService to return the mocked FileDiscoveryService
mockConfig.getFileService = vi
.fn()
.mockReturnValue(mockFileDiscoveryService);
});
afterEach(() => {
abortController.abort();
});
it('should pass through query if no @ command is present', async () => {
const query = 'regular user query';
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 123,
signal: abortController.signal,
});
expect(mockAddItem).toHaveBeenCalledWith(
{ type: 'user', text: query },
123,
);
expect(result.processedQuery).toEqual([{ text: query }]);
expect(result.shouldProceed).toBe(true);
expect(mockReadManyFilesExecute).not.toHaveBeenCalled();
});
it('should pass through original query if only a lone @ symbol is present', async () => {
const queryWithSpaces = ' @ ';
const result = await handleAtCommand({
query: queryWithSpaces,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 124,
signal: abortController.signal,
});
expect(mockAddItem).toHaveBeenCalledWith(
{ type: 'user', text: queryWithSpaces },
124,
);
expect(result.processedQuery).toEqual([{ text: queryWithSpaces }]);
expect(result.shouldProceed).toBe(true);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
'Lone @ detected, will be treated as text in the modified query.',
);
});
it('should process a valid text file path', async () => {
const filePath = 'path/to/file.txt';
const query = `@${filePath}`;
const fileContent = 'This is the file content.';
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [`--- ${filePath} ---\n\n${fileContent}\n\n`],
returnDisplay: 'Read 1 file.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 125,
signal: abortController.signal,
});
expect(mockAddItem).toHaveBeenCalledWith(
{ type: 'user', text: query },
125,
);
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [filePath], respect_git_ignore: true },
abortController.signal,
);
expect(mockAddItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'tool_group',
tools: [expect.objectContaining({ status: ToolCallStatus.Success })],
}),
125,
);
expect(result.processedQuery).toEqual([
{ text: `@${filePath}` },
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${filePath}:\n` },
{ text: fileContent },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should process a valid directory path and convert to glob', async () => {
const dirPath = 'path/to/dir';
const query = `@${dirPath}`;
const resolvedGlob = `${dirPath}/**`;
const fileContent = 'Directory content.';
vi.mocked(fsPromises.stat).mockResolvedValue({
isDirectory: () => true,
} as Stats);
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [`--- ${resolvedGlob} ---\n\n${fileContent}\n\n`],
returnDisplay: 'Read directory contents.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 126,
signal: abortController.signal,
});
expect(mockAddItem).toHaveBeenCalledWith(
{ type: 'user', text: query },
126,
);
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [resolvedGlob], respect_git_ignore: true },
abortController.signal,
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Path ${dirPath} resolved to directory, using glob: ${resolvedGlob}`,
);
expect(result.processedQuery).toEqual([
{ text: `@${resolvedGlob}` },
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${resolvedGlob}:\n` },
{ text: fileContent },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should process a valid image file path (as text content for now)', async () => {
const imagePath = 'path/to/image.png';
const query = `@${imagePath}`;
// For @-commands, read_many_files is expected to return text or structured text.
// If it were to return actual image Part, the test and handling would be different.
// Current implementation of read_many_files for images returns base64 in text.
const imageFileTextContent = '[base64 image data for path/to/image.png]';
const imagePart = {
mimeType: 'image/png',
inlineData: imageFileTextContent,
};
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [imagePart],
returnDisplay: 'Read 1 image.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 127,
signal: abortController.signal,
});
expect(result.processedQuery).toEqual([
{ text: `@${imagePath}` },
{ text: '\n--- Content from referenced files ---' },
imagePart,
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should handle query with text before and after @command', async () => {
const textBefore = 'Explain this: ';
const filePath = 'doc.md';
const textAfter = ' in detail.';
const query = `${textBefore}@${filePath}${textAfter}`;
const fileContent = 'Markdown content.';
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [`--- ${filePath} ---\n\n${fileContent}\n\n`],
returnDisplay: 'Read 1 doc.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 128,
signal: abortController.signal,
});
expect(mockAddItem).toHaveBeenCalledWith(
{ type: 'user', text: query },
128,
);
expect(result.processedQuery).toEqual([
{ text: `${textBefore}@${filePath}${textAfter}` },
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${filePath}:\n` },
{ text: fileContent },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should correctly unescape paths with escaped spaces', async () => {
const rawPath = 'path/to/my\\ file.txt';
const unescapedPath = 'path/to/my file.txt';
const query = `@${rawPath}`;
const fileContent = 'Content of file with space.';
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [`--- ${unescapedPath} ---\n\n${fileContent}\n\n`],
returnDisplay: 'Read 1 file.',
});
await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 129,
signal: abortController.signal,
});
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [unescapedPath], respect_git_ignore: true },
abortController.signal,
);
});
it('should handle multiple @file references', async () => {
const file1 = 'file1.txt';
const content1 = 'Content file1';
const file2 = 'file2.md';
const content2 = 'Content file2';
const query = `@${file1} @${file2}`;
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [
`--- ${file1} ---\n\n${content1}\n\n`,
`--- ${file2} ---\n\n${content2}\n\n`,
],
returnDisplay: 'Read 2 files.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 130,
signal: abortController.signal,
});
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [file1, file2], respect_git_ignore: true },
abortController.signal,
);
expect(result.processedQuery).toEqual([
{ text: `@${file1} @${file2}` },
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${file1}:\n` },
{ text: content1 },
{ text: `\nContent from @${file2}:\n` },
{ text: content2 },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should handle multiple @file references with interleaved text', async () => {
const text1 = 'Check ';
const file1 = 'f1.txt';
const content1 = 'C1';
const text2 = ' and ';
const file2 = 'f2.md';
const content2 = 'C2';
const text3 = ' please.';
const query = `${text1}@${file1}${text2}@${file2}${text3}`;
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [
`--- ${file1} ---\n\n${content1}\n\n`,
`--- ${file2} ---\n\n${content2}\n\n`,
],
returnDisplay: 'Read 2 files.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 131,
signal: abortController.signal,
});
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [file1, file2], respect_git_ignore: true },
abortController.signal,
);
expect(result.processedQuery).toEqual([
{ text: `${text1}@${file1}${text2}@${file2}${text3}` },
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${file1}:\n` },
{ text: content1 },
{ text: `\nContent from @${file2}:\n` },
{ text: content2 },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should handle a mix of valid, invalid, and lone @ references', async () => {
const file1 = 'valid1.txt';
const content1 = 'Valid content 1';
const invalidFile = 'nonexistent.txt';
const query = `Look at @${file1} then @${invalidFile} and also just @ symbol, then @valid2.glob`;
const file2Glob = 'valid2.glob';
const resolvedFile2 = 'resolved/valid2.actual';
const content2 = 'Globbed content';
// Mock fs.stat for file1 (valid)
vi.mocked(fsPromises.stat).mockImplementation(async (p) => {
if (p.toString().endsWith(file1))
return { isDirectory: () => false } as Stats;
if (p.toString().endsWith(invalidFile))
throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' });
// For valid2.glob, stat will fail, triggering glob
if (p.toString().endsWith(file2Glob))
throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' });
return { isDirectory: () => false } as Stats; // Default
});
// Mock glob to find resolvedFile2 for valid2.glob
mockGlobExecute.mockImplementation(async (params) => {
if (params.pattern.includes('valid2.glob')) {
return {
llmContent: `Found files:\n${mockGetTargetDir()}/${resolvedFile2}`,
returnDisplay: 'Found 1 file',
};
}
return { llmContent: 'No files found', returnDisplay: '' };
});
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [
`--- ${file1} ---\n\n${content1}\n\n`,
`--- ${resolvedFile2} ---\n\n${content2}\n\n`,
],
returnDisplay: 'Read 2 files.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 132,
signal: abortController.signal,
});
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [file1, resolvedFile2], respect_git_ignore: true },
abortController.signal,
);
expect(result.processedQuery).toEqual([
// Original query has @nonexistent.txt and @, but resolved has @resolved/valid2.actual
{
text: `Look at @${file1} then @${invalidFile} and also just @ symbol, then @${resolvedFile2}`,
},
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${file1}:\n` },
{ text: content1 },
{ text: `\nContent from @${resolvedFile2}:\n` },
{ text: content2 },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Path ${invalidFile} not found directly, attempting glob search.`,
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Glob search for '**/*${invalidFile}*' found no files or an error. Path ${invalidFile} will be skipped.`,
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
'Lone @ detected, will be treated as text in the modified query.',
);
});
it('should return original query if all @paths are invalid or lone @', async () => {
const query = 'Check @nonexistent.txt and @ also';
vi.mocked(fsPromises.stat).mockRejectedValue(
Object.assign(new Error('ENOENT'), { code: 'ENOENT' }),
);
mockGlobExecute.mockResolvedValue({
llmContent: 'No files found',
returnDisplay: '',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 133,
signal: abortController.signal,
});
expect(mockReadManyFilesExecute).not.toHaveBeenCalled();
// The modified query string will be "Check @nonexistent.txt and @ also" because no paths were resolved for reading.
expect(result.processedQuery).toEqual([
{ text: 'Check @nonexistent.txt and @ also' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should process a file path case-insensitively', async () => {
// const actualFilePath = 'path/to/MyFile.txt'; // Unused, path in llmContent should match queryPath
const queryPath = 'path/to/myfile.txt'; // Different case
const query = `@${queryPath}`;
const fileContent = 'This is the case-insensitive file content.';
// Mock fs.stat to "find" MyFile.txt when looking for myfile.txt
// This simulates a case-insensitive file system or resolution
vi.mocked(fsPromises.stat).mockImplementation(async (p) => {
if (p.toString().toLowerCase().endsWith('myfile.txt')) {
return {
isDirectory: () => false,
// You might need to add other Stats properties if your code uses them
} as Stats;
}
throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' });
});
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [`--- ${queryPath} ---\n\n${fileContent}\n\n`],
returnDisplay: 'Read 1 file.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 134, // New messageId
signal: abortController.signal,
});
expect(mockAddItem).toHaveBeenCalledWith(
{ type: 'user', text: query },
134,
);
// The atCommandProcessor resolves the path before calling read_many_files.
// We expect it to be called with the path that fs.stat "found".
// In a real case-insensitive FS, stat(myfile.txt) might return info for MyFile.txt.
// The key is that *a* valid path that points to the content is used.
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
// Depending on how path resolution and fs.stat mock interact,
// this could be queryPath or actualFilePath.
// For this test, we'll assume the processor uses the path that stat "succeeded" with.
// If the underlying fs/stat is truly case-insensitive, it might resolve to actualFilePath.
// If the mock is simpler, it might use queryPath if stat(queryPath) succeeds.
// The most important part is that *some* version of the path that leads to the content is used.
// Let's assume it uses the path from the query if stat confirms it exists (even if different case on disk)
{ paths: [queryPath], respect_git_ignore: true },
abortController.signal,
);
expect(mockAddItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'tool_group',
tools: [expect.objectContaining({ status: ToolCallStatus.Success })],
}),
134,
);
expect(result.processedQuery).toEqual([
{ text: `@${queryPath}` }, // Query uses the input path
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${queryPath}:\n` }, // Content display also uses input path
{ text: fileContent },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
describe('git-aware filtering', () => {
it('should skip git-ignored files in @ commands', async () => {
const gitIgnoredFile = 'node_modules/package.json';
const query = `@${gitIgnoredFile}`;
// Mock the file discovery service to report this file as git-ignored
mockFileDiscoveryService.shouldIgnoreFile.mockImplementation(
(path: string, options?: { respectGitIgnore?: boolean }) =>
path === gitIgnoredFile && options?.respectGitIgnore !== false,
);
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 200,
signal: abortController.signal,
});
expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith(
gitIgnoredFile,
{ respectGitIgnore: true },
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Path ${gitIgnoredFile} is git-ignored and will be skipped.`,
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
'Ignored 1 git-ignored files: node_modules/package.json',
);
expect(mockReadManyFilesExecute).not.toHaveBeenCalled();
expect(result.processedQuery).toEqual([{ text: query }]);
expect(result.shouldProceed).toBe(true);
});
it('should process non-git-ignored files normally', async () => {
const validFile = 'src/index.ts';
const query = `@${validFile}`;
const fileContent = 'console.log("Hello world");';
mockFileDiscoveryService.shouldIgnoreFile.mockReturnValue(false);
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [`--- ${validFile} ---\n\n${fileContent}\n\n`],
returnDisplay: 'Read 1 file.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 201,
signal: abortController.signal,
});
expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith(
validFile,
{ respectGitIgnore: true },
);
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [validFile], respect_git_ignore: true },
abortController.signal,
);
expect(result.processedQuery).toEqual([
{ text: `@${validFile}` },
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${validFile}:\n` },
{ text: fileContent },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should handle mixed git-ignored and valid files', async () => {
const validFile = 'README.md';
const gitIgnoredFile = '.env';
const query = `@${validFile} @${gitIgnoredFile}`;
const fileContent = '# Project README';
mockFileDiscoveryService.shouldIgnoreFile.mockImplementation(
(path: string, options?: { respectGitIgnore?: boolean }) =>
path === gitIgnoredFile && options?.respectGitIgnore !== false,
);
mockReadManyFilesExecute.mockResolvedValue({
llmContent: [`--- ${validFile} ---\n\n${fileContent}\n\n`],
returnDisplay: 'Read 1 file.',
});
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 202,
signal: abortController.signal,
});
expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith(
validFile,
{ respectGitIgnore: true },
);
expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith(
gitIgnoredFile,
{ respectGitIgnore: true },
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Path ${gitIgnoredFile} is git-ignored and will be skipped.`,
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
'Ignored 1 git-ignored files: .env',
);
expect(mockReadManyFilesExecute).toHaveBeenCalledWith(
{ paths: [validFile], respect_git_ignore: true },
abortController.signal,
);
expect(result.processedQuery).toEqual([
{ text: `@${validFile} @${gitIgnoredFile}` },
{ text: '\n--- Content from referenced files ---' },
{ text: `\nContent from @${validFile}:\n` },
{ text: fileContent },
{ text: '\n--- End of content ---' },
]);
expect(result.shouldProceed).toBe(true);
});
it('should always ignore .git directory files', async () => {
const gitFile = '.git/config';
const query = `@${gitFile}`;
mockFileDiscoveryService.shouldIgnoreFile.mockReturnValue(true);
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 203,
signal: abortController.signal,
});
expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith(
gitFile,
{ respectGitIgnore: true },
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Path ${gitFile} is git-ignored and will be skipped.`,
);
expect(mockReadManyFilesExecute).not.toHaveBeenCalled();
expect(result.processedQuery).toEqual([{ text: query }]);
expect(result.shouldProceed).toBe(true);
});
});
describe('when recursive file search is disabled', () => {
beforeEach(() => {
vi.mocked(mockConfig.getEnableRecursiveFileSearch).mockReturnValue(false);
});
it('should not use glob search for a nonexistent file', async () => {
const invalidFile = 'nonexistent.txt';
const query = `@${invalidFile}`;
vi.mocked(fsPromises.stat).mockRejectedValue(
Object.assign(new Error('ENOENT'), { code: 'ENOENT' }),
);
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 300,
signal: abortController.signal,
});
expect(mockGlobExecute).not.toHaveBeenCalled();
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Glob tool not found. Path ${invalidFile} will be skipped.`,
);
expect(result.processedQuery).toEqual([{ text: query }]);
expect(result.shouldProceed).toBe(true);
});
});
});

View File

@@ -0,0 +1,423 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs/promises';
import * as path from 'path';
import { PartListUnion, PartUnion } from '@google/genai';
import {
Config,
getErrorMessage,
isNodeError,
unescapePath,
} from '@qwen/qwen-code-core';
import {
HistoryItem,
IndividualToolCallDisplay,
ToolCallStatus,
} from '../types.js';
import { UseHistoryManagerReturn } from './useHistoryManager.js';
interface HandleAtCommandParams {
query: string;
config: Config;
addItem: UseHistoryManagerReturn['addItem'];
onDebugMessage: (message: string) => void;
messageId: number;
signal: AbortSignal;
}
interface HandleAtCommandResult {
processedQuery: PartListUnion | null;
shouldProceed: boolean;
}
interface AtCommandPart {
type: 'text' | 'atPath';
content: string;
}
/**
* Parses a query string to find all '@<path>' commands and text segments.
* Handles \ escaped spaces within paths.
*/
function parseAllAtCommands(query: string): AtCommandPart[] {
const parts: AtCommandPart[] = [];
let currentIndex = 0;
while (currentIndex < query.length) {
let atIndex = -1;
let nextSearchIndex = currentIndex;
// Find next unescaped '@'
while (nextSearchIndex < query.length) {
if (
query[nextSearchIndex] === '@' &&
(nextSearchIndex === 0 || query[nextSearchIndex - 1] !== '\\')
) {
atIndex = nextSearchIndex;
break;
}
nextSearchIndex++;
}
if (atIndex === -1) {
// No more @
if (currentIndex < query.length) {
parts.push({ type: 'text', content: query.substring(currentIndex) });
}
break;
}
// Add text before @
if (atIndex > currentIndex) {
parts.push({
type: 'text',
content: query.substring(currentIndex, atIndex),
});
}
// Parse @path
let pathEndIndex = atIndex + 1;
let inEscape = false;
while (pathEndIndex < query.length) {
const char = query[pathEndIndex];
if (inEscape) {
inEscape = false;
} else if (char === '\\') {
inEscape = true;
} else if (/\s/.test(char)) {
// Path ends at first whitespace not escaped
break;
}
pathEndIndex++;
}
const rawAtPath = query.substring(atIndex, pathEndIndex);
// unescapePath expects the @ symbol to be present, and will handle it.
const atPath = unescapePath(rawAtPath);
parts.push({ type: 'atPath', content: atPath });
currentIndex = pathEndIndex;
}
// Filter out empty text parts that might result from consecutive @paths or leading/trailing spaces
return parts.filter(
(part) => !(part.type === 'text' && part.content.trim() === ''),
);
}
/**
* Processes user input potentially containing one or more '@<path>' commands.
* If found, it attempts to read the specified files/directories using the
* 'read_many_files' tool. The user query is modified to include resolved paths,
* and the content of the files is appended in a structured block.
*
* @returns An object indicating whether the main hook should proceed with an
* LLM call and the processed query parts (including file content).
*/
export async function handleAtCommand({
query,
config,
addItem,
onDebugMessage,
messageId: userMessageTimestamp,
signal,
}: HandleAtCommandParams): Promise<HandleAtCommandResult> {
const commandParts = parseAllAtCommands(query);
const atPathCommandParts = commandParts.filter(
(part) => part.type === 'atPath',
);
if (atPathCommandParts.length === 0) {
addItem({ type: 'user', text: query }, userMessageTimestamp);
return { processedQuery: [{ text: query }], shouldProceed: true };
}
addItem({ type: 'user', text: query }, userMessageTimestamp);
// Get centralized file discovery service
const fileDiscovery = config.getFileService();
const respectGitIgnore = config.getFileFilteringRespectGitIgnore();
const pathSpecsToRead: string[] = [];
const atPathToResolvedSpecMap = new Map<string, string>();
const contentLabelsForDisplay: string[] = [];
const ignoredPaths: string[] = [];
const toolRegistry = await config.getToolRegistry();
const readManyFilesTool = toolRegistry.getTool('read_many_files');
const globTool = toolRegistry.getTool('glob');
if (!readManyFilesTool) {
addItem(
{ type: 'error', text: 'Error: read_many_files tool not found.' },
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
for (const atPathPart of atPathCommandParts) {
const originalAtPath = atPathPart.content; // e.g., "@file.txt" or "@"
if (originalAtPath === '@') {
onDebugMessage(
'Lone @ detected, will be treated as text in the modified query.',
);
continue;
}
const pathName = originalAtPath.substring(1);
if (!pathName) {
// This case should ideally not be hit if parseAllAtCommands ensures content after @
// but as a safeguard:
addItem(
{
type: 'error',
text: `Error: Invalid @ command '${originalAtPath}'. No path specified.`,
},
userMessageTimestamp,
);
// Decide if this is a fatal error for the whole command or just skip this @ part
// For now, let's be strict and fail the command if one @path is malformed.
return { processedQuery: null, shouldProceed: false };
}
// Check if path should be ignored based on filtering options
if (fileDiscovery.shouldIgnoreFile(pathName, { respectGitIgnore })) {
const reason = respectGitIgnore ? 'git-ignored' : 'custom-ignored';
onDebugMessage(`Path ${pathName} is ${reason} and will be skipped.`);
ignoredPaths.push(pathName);
continue;
}
let currentPathSpec = pathName;
let resolvedSuccessfully = false;
try {
const absolutePath = path.resolve(config.getTargetDir(), pathName);
const stats = await fs.stat(absolutePath);
if (stats.isDirectory()) {
currentPathSpec = pathName.endsWith('/')
? `${pathName}**`
: `${pathName}/**`;
onDebugMessage(
`Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`,
);
} else {
onDebugMessage(`Path ${pathName} resolved to file: ${currentPathSpec}`);
}
resolvedSuccessfully = true;
} catch (error) {
if (isNodeError(error) && error.code === 'ENOENT') {
if (config.getEnableRecursiveFileSearch() && globTool) {
onDebugMessage(
`Path ${pathName} not found directly, attempting glob search.`,
);
try {
const globResult = await globTool.execute(
{ pattern: `**/*${pathName}*`, path: config.getTargetDir() },
signal,
);
if (
globResult.llmContent &&
typeof globResult.llmContent === 'string' &&
!globResult.llmContent.startsWith('No files found') &&
!globResult.llmContent.startsWith('Error:')
) {
const lines = globResult.llmContent.split('\n');
if (lines.length > 1 && lines[1]) {
const firstMatchAbsolute = lines[1].trim();
currentPathSpec = path.relative(
config.getTargetDir(),
firstMatchAbsolute,
);
onDebugMessage(
`Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`,
);
resolvedSuccessfully = true;
} else {
onDebugMessage(
`Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`,
);
}
} else {
onDebugMessage(
`Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`,
);
}
} catch (globError) {
console.error(
`Error during glob search for ${pathName}: ${getErrorMessage(globError)}`,
);
onDebugMessage(
`Error during glob search for ${pathName}. Path ${pathName} will be skipped.`,
);
}
} else {
onDebugMessage(
`Glob tool not found. Path ${pathName} will be skipped.`,
);
}
} else {
console.error(
`Error stating path ${pathName}: ${getErrorMessage(error)}`,
);
onDebugMessage(
`Error stating path ${pathName}. Path ${pathName} will be skipped.`,
);
}
}
if (resolvedSuccessfully) {
pathSpecsToRead.push(currentPathSpec);
atPathToResolvedSpecMap.set(originalAtPath, currentPathSpec);
contentLabelsForDisplay.push(pathName);
}
}
// Construct the initial part of the query for the LLM
let initialQueryText = '';
for (let i = 0; i < commandParts.length; i++) {
const part = commandParts[i];
if (part.type === 'text') {
initialQueryText += part.content;
} else {
// type === 'atPath'
const resolvedSpec = atPathToResolvedSpecMap.get(part.content);
if (
i > 0 &&
initialQueryText.length > 0 &&
!initialQueryText.endsWith(' ') &&
resolvedSpec
) {
// Add space if previous part was text and didn't end with space, or if previous was @path
const prevPart = commandParts[i - 1];
if (
prevPart.type === 'text' ||
(prevPart.type === 'atPath' &&
atPathToResolvedSpecMap.has(prevPart.content))
) {
initialQueryText += ' ';
}
}
if (resolvedSpec) {
initialQueryText += `@${resolvedSpec}`;
} else {
// If not resolved for reading (e.g. lone @ or invalid path that was skipped),
// add the original @-string back, ensuring spacing if it's not the first element.
if (
i > 0 &&
initialQueryText.length > 0 &&
!initialQueryText.endsWith(' ') &&
!part.content.startsWith(' ')
) {
initialQueryText += ' ';
}
initialQueryText += part.content;
}
}
}
initialQueryText = initialQueryText.trim();
// Inform user about ignored paths
if (ignoredPaths.length > 0) {
const ignoreType = respectGitIgnore ? 'git-ignored' : 'custom-ignored';
onDebugMessage(
`Ignored ${ignoredPaths.length} ${ignoreType} files: ${ignoredPaths.join(', ')}`,
);
}
// Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText
if (pathSpecsToRead.length === 0) {
onDebugMessage('No valid file paths found in @ commands to read.');
if (initialQueryText === '@' && query.trim() === '@') {
// If the only thing was a lone @, pass original query (which might have spaces)
return { processedQuery: [{ text: query }], shouldProceed: true };
} else if (!initialQueryText && query) {
// If all @-commands were invalid and no surrounding text, pass original query
return { processedQuery: [{ text: query }], shouldProceed: true };
}
// Otherwise, proceed with the (potentially modified) query text that doesn't involve file reading
return {
processedQuery: [{ text: initialQueryText || query }],
shouldProceed: true,
};
}
const processedQueryParts: PartUnion[] = [{ text: initialQueryText }];
const toolArgs = {
paths: pathSpecsToRead,
respect_git_ignore: respectGitIgnore, // Use configuration setting
};
let toolCallDisplay: IndividualToolCallDisplay;
try {
const result = await readManyFilesTool.execute(toolArgs, signal);
toolCallDisplay = {
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description: readManyFilesTool.getDescription(toolArgs),
status: ToolCallStatus.Success,
resultDisplay:
result.returnDisplay ||
`Successfully read: ${contentLabelsForDisplay.join(', ')}`,
confirmationDetails: undefined,
};
if (Array.isArray(result.llmContent)) {
const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/;
processedQueryParts.push({
text: '\n--- Content from referenced files ---',
});
for (const part of result.llmContent) {
if (typeof part === 'string') {
const match = fileContentRegex.exec(part);
if (match) {
const filePathSpecInContent = match[1]; // This is a resolved pathSpec
const fileActualContent = match[2].trim();
processedQueryParts.push({
text: `\nContent from @${filePathSpecInContent}:\n`,
});
processedQueryParts.push({ text: fileActualContent });
} else {
processedQueryParts.push({ text: part });
}
} else {
// part is a Part object.
processedQueryParts.push(part);
}
}
processedQueryParts.push({ text: '\n--- End of content ---' });
} else {
onDebugMessage(
'read_many_files tool returned no content or empty content.',
);
}
addItem(
{ type: 'tool_group', tools: [toolCallDisplay] } as Omit<
HistoryItem,
'id'
>,
userMessageTimestamp,
);
return { processedQuery: processedQueryParts, shouldProceed: true };
} catch (error: unknown) {
toolCallDisplay = {
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description: readManyFilesTool.getDescription(toolArgs),
status: ToolCallStatus.Error,
resultDisplay: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
};
addItem(
{ type: 'tool_group', tools: [toolCallDisplay] } as Omit<
HistoryItem,
'id'
>,
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
}

View File

@@ -0,0 +1,179 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { act, renderHook } from '@testing-library/react';
import { vi } from 'vitest';
import { useShellCommandProcessor } from './shellCommandProcessor';
import { Config, GeminiClient } from '@qwen/qwen-code-core';
import * as fs from 'fs';
import EventEmitter from 'events';
// Mock dependencies
vi.mock('child_process');
vi.mock('fs');
vi.mock('os', () => ({
default: {
platform: () => 'linux',
tmpdir: () => '/tmp',
},
platform: () => 'linux',
tmpdir: () => '/tmp',
}));
vi.mock('@qwen/qwen-code-core');
vi.mock('../utils/textUtils.js', () => ({
isBinary: vi.fn(),
}));
describe('useShellCommandProcessor', () => {
let spawnEmitter: EventEmitter;
let addItemToHistoryMock: vi.Mock;
let setPendingHistoryItemMock: vi.Mock;
let onExecMock: vi.Mock;
let onDebugMessageMock: vi.Mock;
let configMock: Config;
let geminiClientMock: GeminiClient;
beforeEach(async () => {
const { spawn } = await import('child_process');
spawnEmitter = new EventEmitter();
spawnEmitter.stdout = new EventEmitter();
spawnEmitter.stderr = new EventEmitter();
(spawn as vi.Mock).mockReturnValue(spawnEmitter);
vi.spyOn(fs, 'existsSync').mockReturnValue(false);
vi.spyOn(fs, 'readFileSync').mockReturnValue('');
vi.spyOn(fs, 'unlinkSync').mockReturnValue(undefined);
addItemToHistoryMock = vi.fn();
setPendingHistoryItemMock = vi.fn();
onExecMock = vi.fn();
onDebugMessageMock = vi.fn();
configMock = {
getTargetDir: () => '/test/dir',
} as unknown as Config;
geminiClientMock = {
addHistory: vi.fn(),
} as unknown as GeminiClient;
});
afterEach(() => {
vi.restoreAllMocks();
});
const renderProcessorHook = () =>
renderHook(() =>
useShellCommandProcessor(
addItemToHistoryMock,
setPendingHistoryItemMock,
onExecMock,
onDebugMessageMock,
configMock,
geminiClientMock,
),
);
it('should execute a command and update history on success', async () => {
const { result } = renderProcessorHook();
const abortController = new AbortController();
act(() => {
result.current.handleShellCommand('ls -l', abortController.signal);
});
expect(onExecMock).toHaveBeenCalledTimes(1);
const execPromise = onExecMock.mock.calls[0][0];
// Simulate stdout
act(() => {
spawnEmitter.stdout.emit('data', Buffer.from('file1.txt\nfile2.txt'));
});
// Simulate process exit
act(() => {
spawnEmitter.emit('exit', 0, null);
});
await act(async () => {
await execPromise;
});
expect(addItemToHistoryMock).toHaveBeenCalledTimes(2);
expect(addItemToHistoryMock.mock.calls[1][0]).toEqual({
type: 'info',
text: 'file1.txt\nfile2.txt',
});
expect(geminiClientMock.addHistory).toHaveBeenCalledTimes(1);
});
it('should handle binary output', async () => {
const { result } = renderProcessorHook();
const abortController = new AbortController();
const { isBinary } = await import('../utils/textUtils.js');
(isBinary as vi.Mock).mockReturnValue(true);
act(() => {
result.current.handleShellCommand(
'cat myimage.png',
abortController.signal,
);
});
expect(onExecMock).toHaveBeenCalledTimes(1);
const execPromise = onExecMock.mock.calls[0][0];
act(() => {
spawnEmitter.stdout.emit('data', Buffer.from([0x89, 0x50, 0x4e, 0x47]));
});
act(() => {
spawnEmitter.emit('exit', 0, null);
});
await act(async () => {
await execPromise;
});
expect(addItemToHistoryMock).toHaveBeenCalledTimes(2);
expect(addItemToHistoryMock.mock.calls[1][0]).toEqual({
type: 'info',
text: '[Command produced binary output, which is not shown.]',
});
});
it('should handle command failure', async () => {
const { result } = renderProcessorHook();
const abortController = new AbortController();
act(() => {
result.current.handleShellCommand(
'a-bad-command',
abortController.signal,
);
});
const execPromise = onExecMock.mock.calls[0][0];
act(() => {
spawnEmitter.stderr.emit('data', Buffer.from('command not found'));
});
act(() => {
spawnEmitter.emit('exit', 127, null);
});
await act(async () => {
await execPromise;
});
expect(addItemToHistoryMock).toHaveBeenCalledTimes(2);
expect(addItemToHistoryMock.mock.calls[1][0]).toEqual({
type: 'error',
text: 'Command exited with code 127.\ncommand not found',
});
});
});

View File

@@ -0,0 +1,348 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { spawn } from 'child_process';
import { StringDecoder } from 'string_decoder';
import type { HistoryItemWithoutId } from '../types.js';
import { useCallback } from 'react';
import { Config, GeminiClient } from '@qwen/qwen-code-core';
import { type PartListUnion } from '@google/genai';
import { formatMemoryUsage } from '../utils/formatters.js';
import { isBinary } from '../utils/textUtils.js';
import { UseHistoryManagerReturn } from './useHistoryManager.js';
import crypto from 'crypto';
import path from 'path';
import os from 'os';
import fs from 'fs';
import stripAnsi from 'strip-ansi';
const OUTPUT_UPDATE_INTERVAL_MS = 1000;
const MAX_OUTPUT_LENGTH = 10000;
/**
* A structured result from a shell command execution.
*/
interface ShellExecutionResult {
rawOutput: Buffer;
output: string;
exitCode: number | null;
signal: NodeJS.Signals | null;
error: Error | null;
aborted: boolean;
}
/**
* Executes a shell command using `spawn`, capturing all output and lifecycle events.
* This is the single, unified implementation for shell execution.
*
* @param commandToExecute The exact command string to run.
* @param cwd The working directory to execute the command in.
* @param abortSignal An AbortSignal to terminate the process.
* @param onOutputChunk A callback for streaming real-time output.
* @param onDebugMessage A callback for logging debug information.
* @returns A promise that resolves with the complete execution result.
*/
function executeShellCommand(
commandToExecute: string,
cwd: string,
abortSignal: AbortSignal,
onOutputChunk: (chunk: string) => void,
onDebugMessage: (message: string) => void,
): Promise<ShellExecutionResult> {
return new Promise((resolve) => {
const isWindows = os.platform() === 'win32';
const shell = isWindows ? 'cmd.exe' : 'bash';
const shellArgs = isWindows
? ['/c', commandToExecute]
: ['-c', commandToExecute];
const child = spawn(shell, shellArgs, {
cwd,
stdio: ['ignore', 'pipe', 'pipe'],
detached: !isWindows, // Use process groups on non-Windows for robust killing
});
// Use decoders to handle multi-byte characters safely (for streaming output).
const stdoutDecoder = new StringDecoder('utf8');
const stderrDecoder = new StringDecoder('utf8');
let stdout = '';
let stderr = '';
const outputChunks: Buffer[] = [];
let error: Error | null = null;
let exited = false;
let streamToUi = true;
const MAX_SNIFF_SIZE = 4096;
let sniffedBytes = 0;
const handleOutput = (data: Buffer, stream: 'stdout' | 'stderr') => {
outputChunks.push(data);
if (streamToUi && sniffedBytes < MAX_SNIFF_SIZE) {
// Use a limited-size buffer for the check to avoid performance issues.
const sniffBuffer = Buffer.concat(outputChunks.slice(0, 20));
sniffedBytes = sniffBuffer.length;
if (isBinary(sniffBuffer)) {
streamToUi = false;
// Overwrite any garbled text that may have streamed with a clear message.
onOutputChunk('[Binary output detected. Halting stream...]');
}
}
const decodedChunk =
stream === 'stdout'
? stdoutDecoder.write(data)
: stderrDecoder.write(data);
if (stream === 'stdout') {
stdout += stripAnsi(decodedChunk);
} else {
stderr += stripAnsi(decodedChunk);
}
if (!exited && streamToUi) {
// Send only the new chunk to avoid re-rendering the whole output.
const combinedOutput = stdout + (stderr ? `\n${stderr}` : '');
onOutputChunk(combinedOutput);
} else if (!exited && !streamToUi) {
// Send progress updates for the binary stream
const totalBytes = outputChunks.reduce(
(sum, chunk) => sum + chunk.length,
0,
);
onOutputChunk(
`[Receiving binary output... ${formatMemoryUsage(totalBytes)} received]`,
);
}
};
child.stdout.on('data', (data) => handleOutput(data, 'stdout'));
child.stderr.on('data', (data) => handleOutput(data, 'stderr'));
child.on('error', (err) => {
error = err;
});
const abortHandler = async () => {
if (child.pid && !exited) {
onDebugMessage(`Aborting shell command (PID: ${child.pid})`);
if (isWindows) {
spawn('taskkill', ['/pid', child.pid.toString(), '/f', '/t']);
} else {
try {
// Kill the entire process group (negative PID).
// SIGTERM first, then SIGKILL if it doesn't die.
process.kill(-child.pid, 'SIGTERM');
await new Promise((res) => setTimeout(res, 200));
if (!exited) {
process.kill(-child.pid, 'SIGKILL');
}
} catch (_e) {
// Fallback to killing just the main process if group kill fails.
if (!exited) child.kill('SIGKILL');
}
}
}
};
abortSignal.addEventListener('abort', abortHandler, { once: true });
child.on('exit', (code, signal) => {
exited = true;
abortSignal.removeEventListener('abort', abortHandler);
// Handle any final bytes lingering in the decoders
stdout += stdoutDecoder.end();
stderr += stderrDecoder.end();
const finalBuffer = Buffer.concat(outputChunks);
resolve({
rawOutput: finalBuffer,
output: stdout + (stderr ? `\n${stderr}` : ''),
exitCode: code,
signal,
error,
aborted: abortSignal.aborted,
});
});
});
}
function addShellCommandToGeminiHistory(
geminiClient: GeminiClient,
rawQuery: string,
resultText: string,
) {
const modelContent =
resultText.length > MAX_OUTPUT_LENGTH
? resultText.substring(0, MAX_OUTPUT_LENGTH) + '\n... (truncated)'
: resultText;
geminiClient.addHistory({
role: 'user',
parts: [
{
text: `I ran the following shell command:
\`\`\`sh
${rawQuery}
\`\`\`
This produced the following result:
\`\`\`
${modelContent}
\`\`\``,
},
],
});
}
/**
* Hook to process shell commands.
* Orchestrates command execution and updates history and agent context.
*/
export const useShellCommandProcessor = (
addItemToHistory: UseHistoryManagerReturn['addItem'],
setPendingHistoryItem: React.Dispatch<
React.SetStateAction<HistoryItemWithoutId | null>
>,
onExec: (command: Promise<void>) => void,
onDebugMessage: (message: string) => void,
config: Config,
geminiClient: GeminiClient,
) => {
const handleShellCommand = useCallback(
(rawQuery: PartListUnion, abortSignal: AbortSignal): boolean => {
if (typeof rawQuery !== 'string' || rawQuery.trim() === '') {
return false;
}
const userMessageTimestamp = Date.now();
addItemToHistory(
{ type: 'user_shell', text: rawQuery },
userMessageTimestamp,
);
const isWindows = os.platform() === 'win32';
const targetDir = config.getTargetDir();
let commandToExecute = rawQuery;
let pwdFilePath: string | undefined;
// On non-windows, wrap the command to capture the final working directory.
if (!isWindows) {
let command = rawQuery.trim();
const pwdFileName = `shell_pwd_${crypto.randomBytes(6).toString('hex')}.tmp`;
pwdFilePath = path.join(os.tmpdir(), pwdFileName);
// Ensure command ends with a separator before adding our own.
if (!command.endsWith(';') && !command.endsWith('&')) {
command += ';';
}
commandToExecute = `{ ${command} }; __code=$?; pwd > "${pwdFilePath}"; exit $__code`;
}
const execPromise = new Promise<void>((resolve) => {
let lastUpdateTime = 0;
onDebugMessage(`Executing in ${targetDir}: ${commandToExecute}`);
executeShellCommand(
commandToExecute,
targetDir,
abortSignal,
(streamedOutput) => {
// Throttle pending UI updates to avoid excessive re-renders.
if (Date.now() - lastUpdateTime > OUTPUT_UPDATE_INTERVAL_MS) {
setPendingHistoryItem({ type: 'info', text: streamedOutput });
lastUpdateTime = Date.now();
}
},
onDebugMessage,
)
.then((result) => {
// TODO(abhipatel12) - Consider updating pending item and using timeout to ensure
// there is no jump where intermediate output is skipped.
setPendingHistoryItem(null);
let historyItemType: HistoryItemWithoutId['type'] = 'info';
let mainContent: string;
// The context sent to the model utilizes a text tokenizer which means raw binary data is
// cannot be parsed and understood and thus would only pollute the context window and waste
// tokens.
if (isBinary(result.rawOutput)) {
mainContent =
'[Command produced binary output, which is not shown.]';
} else {
mainContent =
result.output.trim() || '(Command produced no output)';
}
let finalOutput = mainContent;
if (result.error) {
historyItemType = 'error';
finalOutput = `${result.error.message}\n${finalOutput}`;
} else if (result.aborted) {
finalOutput = `Command was cancelled.\n${finalOutput}`;
} else if (result.signal) {
historyItemType = 'error';
finalOutput = `Command terminated by signal: ${result.signal}.\n${finalOutput}`;
} else if (result.exitCode !== 0) {
historyItemType = 'error';
finalOutput = `Command exited with code ${result.exitCode}.\n${finalOutput}`;
}
if (pwdFilePath && fs.existsSync(pwdFilePath)) {
const finalPwd = fs.readFileSync(pwdFilePath, 'utf8').trim();
if (finalPwd && finalPwd !== targetDir) {
const warning = `WARNING: shell mode is stateless; the directory change to '${finalPwd}' will not persist.`;
finalOutput = `${warning}\n\n${finalOutput}`;
}
}
// Add the complete, contextual result to the local UI history.
addItemToHistory(
{ type: historyItemType, text: finalOutput },
userMessageTimestamp,
);
// Add the same complete, contextual result to the LLM's history.
addShellCommandToGeminiHistory(geminiClient, rawQuery, finalOutput);
})
.catch((err) => {
setPendingHistoryItem(null);
const errorMessage =
err instanceof Error ? err.message : String(err);
addItemToHistory(
{
type: 'error',
text: `An unexpected error occurred: ${errorMessage}`,
},
userMessageTimestamp,
);
})
.finally(() => {
if (pwdFilePath && fs.existsSync(pwdFilePath)) {
fs.unlinkSync(pwdFilePath);
}
resolve();
});
});
onExec(execPromise);
return true; // Command was initiated
},
[
config,
onDebugMessage,
addItemToHistory,
setPendingHistoryItem,
onExec,
geminiClient,
],
);
return { handleShellCommand };
};

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,88 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useCallback, useEffect } from 'react';
import { LoadedSettings, SettingScope } from '../../config/settings.js';
import {
AuthType,
Config,
clearCachedCredentialFile,
getErrorMessage,
} from '@qwen/qwen-code-core';
import { runExitCleanup } from '../../utils/cleanup.js';
export const useAuthCommand = (
settings: LoadedSettings,
setAuthError: (error: string | null) => void,
config: Config,
) => {
const [isAuthDialogOpen, setIsAuthDialogOpen] = useState(
settings.merged.selectedAuthType === undefined,
);
const openAuthDialog = useCallback(() => {
setIsAuthDialogOpen(true);
}, []);
const [isAuthenticating, setIsAuthenticating] = useState(false);
useEffect(() => {
const authFlow = async () => {
const authType = settings.merged.selectedAuthType;
if (isAuthDialogOpen || !authType) {
return;
}
try {
setIsAuthenticating(true);
await config.refreshAuth(authType);
console.log(`Authenticated via "${authType}".`);
} catch (e) {
setAuthError(`Failed to login. Message: ${getErrorMessage(e)}`);
openAuthDialog();
} finally {
setIsAuthenticating(false);
}
};
void authFlow();
}, [isAuthDialogOpen, settings, config, setAuthError, openAuthDialog]);
const handleAuthSelect = useCallback(
async (authType: AuthType | undefined, scope: SettingScope) => {
if (authType) {
await clearCachedCredentialFile();
settings.setValue(scope, 'selectedAuthType', authType);
if (authType === AuthType.LOGIN_WITH_GOOGLE && config.getNoBrowser()) {
runExitCleanup();
console.log(
`
----------------------------------------------------------------
Logging in with Google... Please restart Gemini CLI to continue.
----------------------------------------------------------------
`,
);
process.exit(0);
}
}
setIsAuthDialogOpen(false);
setAuthError(null);
},
[settings, setAuthError, config],
);
const cancelAuthentication = useCallback(() => {
setIsAuthenticating(false);
}, []);
return {
isAuthDialogOpen,
openAuthDialog,
handleAuthSelect,
isAuthenticating,
cancelAuthentication,
};
};

View File

@@ -0,0 +1,276 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
describe,
it,
expect,
vi,
beforeEach,
type MockedFunction,
type Mock,
} from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useAutoAcceptIndicator } from './useAutoAcceptIndicator.js';
import {
Config,
Config as ActualConfigType,
ApprovalMode,
} from '@qwen/qwen-code-core';
import { useInput, type Key as InkKey } from 'ink';
vi.mock('ink');
vi.mock('@qwen/qwen-code-core', async () => {
const actualServerModule = (await vi.importActual(
'@qwen/qwen-code-core',
)) as Record<string, unknown>;
return {
...actualServerModule,
Config: vi.fn(),
};
});
interface MockConfigInstanceShape {
getApprovalMode: Mock<() => ApprovalMode>;
setApprovalMode: Mock<(value: ApprovalMode) => void>;
getCoreTools: Mock<() => string[]>;
getToolDiscoveryCommand: Mock<() => string | undefined>;
getTargetDir: Mock<() => string>;
getApiKey: Mock<() => string>;
getModel: Mock<() => string>;
getSandbox: Mock<() => boolean | string>;
getDebugMode: Mock<() => boolean>;
getQuestion: Mock<() => string | undefined>;
getFullContext: Mock<() => boolean>;
getUserAgent: Mock<() => string>;
getUserMemory: Mock<() => string>;
getGeminiMdFileCount: Mock<() => number>;
getToolRegistry: Mock<() => { discoverTools: Mock<() => void> }>;
}
type UseInputKey = InkKey;
type UseInputHandler = (input: string, key: UseInputKey) => void;
describe('useAutoAcceptIndicator', () => {
let mockConfigInstance: MockConfigInstanceShape;
let capturedUseInputHandler: UseInputHandler;
let mockedInkUseInput: MockedFunction<typeof useInput>;
beforeEach(() => {
vi.resetAllMocks();
(
Config as unknown as MockedFunction<() => MockConfigInstanceShape>
).mockImplementation(() => {
const instanceGetApprovalModeMock = vi.fn();
const instanceSetApprovalModeMock = vi.fn();
const instance: MockConfigInstanceShape = {
getApprovalMode: instanceGetApprovalModeMock as Mock<
() => ApprovalMode
>,
setApprovalMode: instanceSetApprovalModeMock as Mock<
(value: ApprovalMode) => void
>,
getCoreTools: vi.fn().mockReturnValue([]) as Mock<() => string[]>,
getToolDiscoveryCommand: vi.fn().mockReturnValue(undefined) as Mock<
() => string | undefined
>,
getTargetDir: vi.fn().mockReturnValue('.') as Mock<() => string>,
getApiKey: vi.fn().mockReturnValue('test-api-key') as Mock<
() => string
>,
getModel: vi.fn().mockReturnValue('test-model') as Mock<() => string>,
getSandbox: vi.fn().mockReturnValue(false) as Mock<
() => boolean | string
>,
getDebugMode: vi.fn().mockReturnValue(false) as Mock<() => boolean>,
getQuestion: vi.fn().mockReturnValue(undefined) as Mock<
() => string | undefined
>,
getFullContext: vi.fn().mockReturnValue(false) as Mock<() => boolean>,
getUserAgent: vi.fn().mockReturnValue('test-user-agent') as Mock<
() => string
>,
getUserMemory: vi.fn().mockReturnValue('') as Mock<() => string>,
getGeminiMdFileCount: vi.fn().mockReturnValue(0) as Mock<() => number>,
getToolRegistry: vi
.fn()
.mockReturnValue({ discoverTools: vi.fn() }) as Mock<
() => { discoverTools: Mock<() => void> }
>,
};
instanceSetApprovalModeMock.mockImplementation((value: ApprovalMode) => {
instanceGetApprovalModeMock.mockReturnValue(value);
});
return instance;
});
mockedInkUseInput = useInput as MockedFunction<typeof useInput>;
mockedInkUseInput.mockImplementation((handler: UseInputHandler) => {
capturedUseInputHandler = handler;
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockConfigInstance = new (Config as any)() as MockConfigInstanceShape;
});
it('should initialize with ApprovalMode.AUTO_EDIT if config.getApprovalMode returns ApprovalMode.AUTO_EDIT', () => {
mockConfigInstance.getApprovalMode.mockReturnValue(ApprovalMode.AUTO_EDIT);
const { result } = renderHook(() =>
useAutoAcceptIndicator({
config: mockConfigInstance as unknown as ActualConfigType,
}),
);
expect(result.current).toBe(ApprovalMode.AUTO_EDIT);
expect(mockConfigInstance.getApprovalMode).toHaveBeenCalledTimes(1);
});
it('should initialize with ApprovalMode.DEFAULT if config.getApprovalMode returns ApprovalMode.DEFAULT', () => {
mockConfigInstance.getApprovalMode.mockReturnValue(ApprovalMode.DEFAULT);
const { result } = renderHook(() =>
useAutoAcceptIndicator({
config: mockConfigInstance as unknown as ActualConfigType,
}),
);
expect(result.current).toBe(ApprovalMode.DEFAULT);
expect(mockConfigInstance.getApprovalMode).toHaveBeenCalledTimes(1);
});
it('should initialize with ApprovalMode.YOLO if config.getApprovalMode returns ApprovalMode.YOLO', () => {
mockConfigInstance.getApprovalMode.mockReturnValue(ApprovalMode.YOLO);
const { result } = renderHook(() =>
useAutoAcceptIndicator({
config: mockConfigInstance as unknown as ActualConfigType,
}),
);
expect(result.current).toBe(ApprovalMode.YOLO);
expect(mockConfigInstance.getApprovalMode).toHaveBeenCalledTimes(1);
});
it('should toggle the indicator and update config when Shift+Tab or Ctrl+Y is pressed', () => {
mockConfigInstance.getApprovalMode.mockReturnValue(ApprovalMode.DEFAULT);
const { result } = renderHook(() =>
useAutoAcceptIndicator({
config: mockConfigInstance as unknown as ActualConfigType,
}),
);
expect(result.current).toBe(ApprovalMode.DEFAULT);
act(() => {
capturedUseInputHandler('', { tab: true, shift: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith(
ApprovalMode.AUTO_EDIT,
);
expect(result.current).toBe(ApprovalMode.AUTO_EDIT);
act(() => {
capturedUseInputHandler('y', { ctrl: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith(
ApprovalMode.YOLO,
);
expect(result.current).toBe(ApprovalMode.YOLO);
act(() => {
capturedUseInputHandler('y', { ctrl: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith(
ApprovalMode.DEFAULT,
);
expect(result.current).toBe(ApprovalMode.DEFAULT);
act(() => {
capturedUseInputHandler('y', { ctrl: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith(
ApprovalMode.YOLO,
);
expect(result.current).toBe(ApprovalMode.YOLO);
act(() => {
capturedUseInputHandler('', { tab: true, shift: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith(
ApprovalMode.AUTO_EDIT,
);
expect(result.current).toBe(ApprovalMode.AUTO_EDIT);
act(() => {
capturedUseInputHandler('', { tab: true, shift: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith(
ApprovalMode.DEFAULT,
);
expect(result.current).toBe(ApprovalMode.DEFAULT);
});
it('should not toggle if only one key or other keys combinations are pressed', () => {
mockConfigInstance.getApprovalMode.mockReturnValue(ApprovalMode.DEFAULT);
renderHook(() =>
useAutoAcceptIndicator({
config: mockConfigInstance as unknown as ActualConfigType,
}),
);
act(() => {
capturedUseInputHandler('', { tab: true, shift: false } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled();
act(() => {
capturedUseInputHandler('', { tab: false, shift: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled();
act(() => {
capturedUseInputHandler('a', { tab: false, shift: false } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled();
act(() => {
capturedUseInputHandler('y', { tab: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled();
act(() => {
capturedUseInputHandler('a', { ctrl: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled();
act(() => {
capturedUseInputHandler('y', { shift: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled();
act(() => {
capturedUseInputHandler('a', { ctrl: true, shift: true } as InkKey);
});
expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled();
});
it('should update indicator when config value changes externally (useEffect dependency)', () => {
mockConfigInstance.getApprovalMode.mockReturnValue(ApprovalMode.DEFAULT);
const { result, rerender } = renderHook(
(props: { config: ActualConfigType }) => useAutoAcceptIndicator(props),
{
initialProps: {
config: mockConfigInstance as unknown as ActualConfigType,
},
},
);
expect(result.current).toBe(ApprovalMode.DEFAULT);
mockConfigInstance.getApprovalMode.mockReturnValue(ApprovalMode.AUTO_EDIT);
rerender({ config: mockConfigInstance as unknown as ActualConfigType });
expect(result.current).toBe(ApprovalMode.AUTO_EDIT);
expect(mockConfigInstance.getApprovalMode).toHaveBeenCalledTimes(3);
});
});

View File

@@ -0,0 +1,49 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useEffect } from 'react';
import { useInput } from 'ink';
import { ApprovalMode, type Config } from '@qwen/qwen-code-core';
export interface UseAutoAcceptIndicatorArgs {
config: Config;
}
export function useAutoAcceptIndicator({
config,
}: UseAutoAcceptIndicatorArgs): ApprovalMode {
const currentConfigValue = config.getApprovalMode();
const [showAutoAcceptIndicator, setShowAutoAcceptIndicator] =
useState(currentConfigValue);
useEffect(() => {
setShowAutoAcceptIndicator(currentConfigValue);
}, [currentConfigValue]);
useInput((input, key) => {
let nextApprovalMode: ApprovalMode | undefined;
if (key.ctrl && input === 'y') {
nextApprovalMode =
config.getApprovalMode() === ApprovalMode.YOLO
? ApprovalMode.DEFAULT
: ApprovalMode.YOLO;
} else if (key.tab && key.shift) {
nextApprovalMode =
config.getApprovalMode() === ApprovalMode.AUTO_EDIT
? ApprovalMode.DEFAULT
: ApprovalMode.AUTO_EDIT;
}
if (nextApprovalMode) {
config.setApprovalMode(nextApprovalMode);
// Update local state immediately for responsiveness
setShowAutoAcceptIndicator(nextApprovalMode);
}
});
return showAutoAcceptIndicator;
}

View File

@@ -0,0 +1,37 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useEffect } from 'react';
const ENABLE_BRACKETED_PASTE = '\x1b[?2004h';
const DISABLE_BRACKETED_PASTE = '\x1b[?2004l';
/**
* Enables and disables bracketed paste mode in the terminal.
*
* This hook ensures that bracketed paste mode is enabled when the component
* mounts and disabled when it unmounts or when the process exits.
*/
export const useBracketedPaste = () => {
const cleanup = () => {
process.stdout.write(DISABLE_BRACKETED_PASTE);
};
useEffect(() => {
process.stdout.write(ENABLE_BRACKETED_PASTE);
process.on('exit', cleanup);
process.on('SIGINT', cleanup);
process.on('SIGTERM', cleanup);
return () => {
cleanup();
process.removeListener('exit', cleanup);
process.removeListener('SIGINT', cleanup);
process.removeListener('SIGTERM', cleanup);
};
}, []);
};

View File

@@ -0,0 +1,755 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
import type { Mocked } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useCompletion } from './useCompletion.js';
import * as fs from 'fs/promises';
import { glob } from 'glob';
import { CommandContext, SlashCommand } from '../commands/types.js';
import { Config, FileDiscoveryService } from '@qwen/qwen-code-core';
interface MockConfig {
getFileFilteringRespectGitIgnore: () => boolean;
getEnableRecursiveFileSearch: () => boolean;
getFileService: () => FileDiscoveryService | null;
}
// Mock dependencies
vi.mock('fs/promises');
vi.mock('@qwen/qwen-code-core', async () => {
const actual = await vi.importActual('@qwen/qwen-code-core');
return {
...actual,
FileDiscoveryService: vi.fn(),
isNodeError: vi.fn((error) => error.code === 'ENOENT'),
escapePath: vi.fn((path) => path),
unescapePath: vi.fn((path) => path),
getErrorMessage: vi.fn((error) => error.message),
};
});
vi.mock('glob');
describe('useCompletion git-aware filtering integration', () => {
let mockFileDiscoveryService: Mocked<FileDiscoveryService>;
let mockConfig: MockConfig;
const testCwd = '/test/project';
const slashCommands = [
{ name: 'help', description: 'Show help', action: vi.fn() },
{ name: 'clear', description: 'Clear screen', action: vi.fn() },
];
// A minimal mock is sufficient for these tests.
const mockCommandContext = {} as CommandContext;
const mockSlashCommands: SlashCommand[] = [
{
name: 'help',
altName: '?',
description: 'Show help',
action: vi.fn(),
},
{
name: 'clear',
description: 'Clear the screen',
action: vi.fn(),
},
{
name: 'memory',
description: 'Manage memory',
// This command is a parent, no action.
subCommands: [
{
name: 'show',
description: 'Show memory',
action: vi.fn(),
},
{
name: 'add',
description: 'Add to memory',
action: vi.fn(),
},
],
},
{
name: 'chat',
description: 'Manage chat history',
subCommands: [
{
name: 'save',
description: 'Save chat',
action: vi.fn(),
},
{
name: 'resume',
description: 'Resume a saved chat',
action: vi.fn(),
// This command provides its own argument completions
completion: vi
.fn()
.mockResolvedValue([
'my-chat-tag-1',
'my-chat-tag-2',
'my-channel',
]),
},
],
},
];
beforeEach(() => {
mockFileDiscoveryService = {
shouldGitIgnoreFile: vi.fn(),
shouldGeminiIgnoreFile: vi.fn(),
shouldIgnoreFile: vi.fn(),
filterFiles: vi.fn(),
getGeminiIgnorePatterns: vi.fn(),
projectRoot: '',
gitIgnoreFilter: null,
geminiIgnoreFilter: null,
} as unknown as Mocked<FileDiscoveryService>;
mockConfig = {
getFileFilteringRespectGitIgnore: vi.fn(() => true),
getFileService: vi.fn().mockReturnValue(mockFileDiscoveryService),
getEnableRecursiveFileSearch: vi.fn(() => true),
};
vi.mocked(FileDiscoveryService).mockImplementation(
() => mockFileDiscoveryService,
);
vi.clearAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should filter git-ignored entries from @ completions', async () => {
const globResults = [`${testCwd}/data`, `${testCwd}/dist`];
vi.mocked(glob).mockResolvedValue(globResults);
// Mock git ignore service to ignore certain files
mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation(
(path: string) => path.includes('dist'),
);
mockFileDiscoveryService.shouldIgnoreFile.mockImplementation(
(path: string, options) => {
if (options?.respectGitIgnore !== false) {
return mockFileDiscoveryService.shouldGitIgnoreFile(path);
}
return false;
},
);
const { result } = renderHook(() =>
useCompletion(
'@d',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfig as Config,
),
);
// Wait for async operations to complete
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150)); // Account for debounce
});
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions).toEqual(
expect.arrayContaining([{ label: 'data', value: 'data' }]),
);
expect(result.current.showSuggestions).toBe(true);
});
it('should filter git-ignored directories from @ completions', async () => {
// Mock fs.readdir to return both regular and git-ignored directories
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'src', isDirectory: () => true },
{ name: 'node_modules', isDirectory: () => true },
{ name: 'dist', isDirectory: () => true },
{ name: 'README.md', isDirectory: () => false },
{ name: '.env', isDirectory: () => false },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
// Mock git ignore service to ignore certain files
mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation(
(path: string) =>
path.includes('node_modules') ||
path.includes('dist') ||
path.includes('.env'),
);
mockFileDiscoveryService.shouldIgnoreFile.mockImplementation(
(path: string, options) => {
if (options?.respectGitIgnore !== false) {
return mockFileDiscoveryService.shouldGitIgnoreFile(path);
}
return false;
},
);
const { result } = renderHook(() =>
useCompletion(
'@',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfig as Config,
),
);
// Wait for async operations to complete
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150)); // Account for debounce
});
expect(result.current.suggestions).toHaveLength(2);
expect(result.current.suggestions).toEqual(
expect.arrayContaining([
{ label: 'src/', value: 'src/' },
{ label: 'README.md', value: 'README.md' },
]),
);
expect(result.current.showSuggestions).toBe(true);
});
it('should handle recursive search with git-aware filtering', async () => {
// Mock the recursive file search scenario
vi.mocked(fs.readdir).mockImplementation(
async (dirPath: string | Buffer | URL) => {
if (dirPath === testCwd) {
return [
{ name: 'src', isDirectory: () => true },
{ name: 'node_modules', isDirectory: () => true },
{ name: 'temp', isDirectory: () => true },
] as Array<{ name: string; isDirectory: () => boolean }>;
}
if (dirPath.endsWith('/src')) {
return [
{ name: 'index.ts', isDirectory: () => false },
{ name: 'components', isDirectory: () => true },
] as Array<{ name: string; isDirectory: () => boolean }>;
}
if (dirPath.endsWith('/temp')) {
return [{ name: 'temp.log', isDirectory: () => false }] as Array<{
name: string;
isDirectory: () => boolean;
}>;
}
return [] as Array<{ name: string; isDirectory: () => boolean }>;
},
);
// Mock git ignore service
mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation(
(path: string) => path.includes('node_modules') || path.includes('temp'),
);
mockFileDiscoveryService.shouldIgnoreFile.mockImplementation(
(path: string, options) => {
if (options?.respectGitIgnore !== false) {
return mockFileDiscoveryService.shouldGitIgnoreFile(path);
}
return false;
},
);
const { result } = renderHook(() =>
useCompletion(
'@t',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfig as Config,
),
);
// Wait for async operations to complete
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
// Should not include anything from node_modules or dist
const suggestionLabels = result.current.suggestions.map((s) => s.label);
expect(suggestionLabels).not.toContain('temp/');
expect(suggestionLabels.some((l) => l.includes('node_modules'))).toBe(
false,
);
});
it('should not perform recursive search when disabled in config', async () => {
const globResults = [`${testCwd}/data`, `${testCwd}/dist`];
vi.mocked(glob).mockResolvedValue(globResults);
// Disable recursive search in the mock config
const mockConfigNoRecursive = {
...mockConfig,
getEnableRecursiveFileSearch: vi.fn(() => false),
} as unknown as Config;
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'data', isDirectory: () => true },
{ name: 'dist', isDirectory: () => true },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
renderHook(() =>
useCompletion(
'@d',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfigNoRecursive,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
// `glob` should not be called because recursive search is disabled
expect(glob).not.toHaveBeenCalled();
// `fs.readdir` should be called for the top-level directory instead
expect(fs.readdir).toHaveBeenCalledWith(testCwd, { withFileTypes: true });
});
it('should work without config (fallback behavior)', async () => {
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'src', isDirectory: () => true },
{ name: 'node_modules', isDirectory: () => true },
{ name: 'README.md', isDirectory: () => false },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
const { result } = renderHook(() =>
useCompletion(
'@',
testCwd,
true,
slashCommands,
mockCommandContext,
undefined,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
// Without config, should include all files
expect(result.current.suggestions).toHaveLength(3);
expect(result.current.suggestions).toEqual(
expect.arrayContaining([
{ label: 'src/', value: 'src/' },
{ label: 'node_modules/', value: 'node_modules/' },
{ label: 'README.md', value: 'README.md' },
]),
);
});
it('should handle git discovery service initialization failure gracefully', async () => {
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'src', isDirectory: () => true },
{ name: 'README.md', isDirectory: () => false },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
const { result } = renderHook(() =>
useCompletion(
'@',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfig as Config,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
// Since we use centralized service, initialization errors are handled at config level
// This test should verify graceful fallback behavior
expect(result.current.suggestions.length).toBeGreaterThanOrEqual(0);
// Should still show completions even if git discovery fails
expect(result.current.suggestions.length).toBeGreaterThan(0);
consoleSpy.mockRestore();
});
it('should handle directory-specific completions with git filtering', async () => {
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'component.tsx', isDirectory: () => false },
{ name: 'temp.log', isDirectory: () => false },
{ name: 'index.ts', isDirectory: () => false },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation(
(path: string) => path.includes('.log'),
);
mockFileDiscoveryService.shouldIgnoreFile.mockImplementation(
(path: string, options) => {
if (options?.respectGitIgnore !== false) {
return mockFileDiscoveryService.shouldGitIgnoreFile(path);
}
return false;
},
);
const { result } = renderHook(() =>
useCompletion(
'@src/comp',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfig as Config,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
// Should filter out .log files but include matching .tsx files
expect(result.current.suggestions).toEqual([
{ label: 'component.tsx', value: 'component.tsx' },
]);
});
it('should use glob for top-level @ completions when available', async () => {
const globResults = [`${testCwd}/src/index.ts`, `${testCwd}/README.md`];
vi.mocked(glob).mockResolvedValue(globResults);
const { result } = renderHook(() =>
useCompletion(
'@s',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfig as Config,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(glob).toHaveBeenCalledWith('**/s*', {
cwd: testCwd,
dot: false,
nocase: true,
});
expect(fs.readdir).not.toHaveBeenCalled(); // Ensure glob is used instead of readdir
expect(result.current.suggestions).toEqual([
{ label: 'README.md', value: 'README.md' },
{ label: 'src/index.ts', value: 'src/index.ts' },
]);
});
it('should include dotfiles in glob search when input starts with a dot', async () => {
const globResults = [
`${testCwd}/.env`,
`${testCwd}/.gitignore`,
`${testCwd}/src/index.ts`,
];
vi.mocked(glob).mockResolvedValue(globResults);
const { result } = renderHook(() =>
useCompletion(
'@.',
testCwd,
true,
slashCommands,
mockCommandContext,
mockConfig as Config,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(glob).toHaveBeenCalledWith('**/.*', {
cwd: testCwd,
dot: true,
nocase: true,
});
expect(fs.readdir).not.toHaveBeenCalled();
expect(result.current.suggestions).toEqual([
{ label: '.env', value: '.env' },
{ label: '.gitignore', value: '.gitignore' },
{ label: 'src/index.ts', value: 'src/index.ts' },
]);
});
it('should suggest top-level command names based on partial input', async () => {
const { result } = renderHook(() =>
useCompletion(
'/mem',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toEqual([
{ label: 'memory', value: 'memory', description: 'Manage memory' },
]);
expect(result.current.showSuggestions).toBe(true);
});
it('should suggest commands based on altName', async () => {
const { result } = renderHook(() =>
useCompletion(
'/?',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toEqual([
{ label: 'help', value: 'help', description: 'Show help' },
]);
});
it('should suggest sub-command names for a parent command', async () => {
const { result } = renderHook(() =>
useCompletion(
'/memory a',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toEqual([
{ label: 'add', value: 'add', description: 'Add to memory' },
]);
});
it('should suggest all sub-commands when the query ends with the parent command and a space', async () => {
const { result } = renderHook(() =>
useCompletion(
'/memory ',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toHaveLength(2);
expect(result.current.suggestions).toEqual(
expect.arrayContaining([
{ label: 'show', value: 'show', description: 'Show memory' },
{ label: 'add', value: 'add', description: 'Add to memory' },
]),
);
});
it('should call the command.completion function for argument suggestions', async () => {
const availableTags = ['my-chat-tag-1', 'my-chat-tag-2', 'another-channel'];
const mockCompletionFn = vi
.fn()
.mockImplementation(async (context: CommandContext, partialArg: string) =>
availableTags.filter((tag) => tag.startsWith(partialArg)),
);
const mockCommandsWithFiltering = JSON.parse(
JSON.stringify(mockSlashCommands),
) as SlashCommand[];
const chatCmd = mockCommandsWithFiltering.find(
(cmd) => cmd.name === 'chat',
);
if (!chatCmd || !chatCmd.subCommands) {
throw new Error(
"Test setup error: Could not find the 'chat' command with subCommands in the mock data.",
);
}
const resumeCmd = chatCmd.subCommands.find((sc) => sc.name === 'resume');
if (!resumeCmd) {
throw new Error(
"Test setup error: Could not find the 'resume' sub-command in the mock data.",
);
}
resumeCmd.completion = mockCompletionFn;
const { result } = renderHook(() =>
useCompletion(
'/chat resume my-ch',
'/test/cwd',
true,
mockCommandsWithFiltering,
mockCommandContext,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(mockCompletionFn).toHaveBeenCalledWith(mockCommandContext, 'my-ch');
expect(result.current.suggestions).toEqual([
{ label: 'my-chat-tag-1', value: 'my-chat-tag-1' },
{ label: 'my-chat-tag-2', value: 'my-chat-tag-2' },
]);
});
it('should not provide suggestions for a fully typed command that has no sub-commands or argument completion', async () => {
const { result } = renderHook(() =>
useCompletion(
'/clear ',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
it('should not provide suggestions for an unknown command', async () => {
const { result } = renderHook(() =>
useCompletion(
'/unknown-command',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
it('should suggest sub-commands for a fully typed parent command without a trailing space', async () => {
const { result } = renderHook(() =>
useCompletion(
'/memory', // Note: no trailing space
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
// Assert that suggestions for sub-commands are shown immediately
expect(result.current.suggestions).toHaveLength(2);
expect(result.current.suggestions).toEqual(
expect.arrayContaining([
{ label: 'show', value: 'show', description: 'Show memory' },
{ label: 'add', value: 'add', description: 'Add to memory' },
]),
);
expect(result.current.showSuggestions).toBe(true);
});
it('should NOT provide suggestions for a perfectly typed command that is a leaf node', async () => {
const { result } = renderHook(() =>
useCompletion(
'/clear', // No trailing space
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
it('should call command.completion with an empty string when args start with a space', async () => {
const mockCompletionFn = vi
.fn()
.mockResolvedValue(['my-chat-tag-1', 'my-chat-tag-2', 'my-channel']);
const isolatedMockCommands = JSON.parse(
JSON.stringify(mockSlashCommands),
) as SlashCommand[];
const resumeCommand = isolatedMockCommands
.find((cmd) => cmd.name === 'chat')
?.subCommands?.find((cmd) => cmd.name === 'resume');
if (!resumeCommand) {
throw new Error(
'Test setup failed: could not find resume command in mock',
);
}
resumeCommand.completion = mockCompletionFn;
const { result } = renderHook(() =>
useCompletion(
'/chat resume ', // Trailing space, no partial argument
'/test/cwd',
true,
isolatedMockCommands,
mockCommandContext,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(mockCompletionFn).toHaveBeenCalledWith(mockCommandContext, '');
expect(result.current.suggestions).toHaveLength(3);
expect(result.current.showSuggestions).toBe(true);
});
it('should suggest all top-level commands for the root slash', async () => {
const { result } = renderHook(() =>
useCompletion(
'/',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions.length).toBe(mockSlashCommands.length);
expect(result.current.suggestions.map((s) => s.label)).toEqual(
expect.arrayContaining(['help', 'clear', 'memory', 'chat']),
);
});
it('should provide no suggestions for an invalid sub-command', async () => {
const { result } = renderHook(() =>
useCompletion(
'/memory dothisnow',
'/test/cwd',
true,
mockSlashCommands,
mockCommandContext,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
});

View File

@@ -0,0 +1,944 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
import type { Mocked } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useCompletion } from './useCompletion.js';
import * as fs from 'fs/promises';
import { glob } from 'glob';
import { CommandContext, SlashCommand } from '../commands/types.js';
import { Config, FileDiscoveryService } from '@google/gemini-cli-core';
// Mock dependencies
vi.mock('fs/promises');
vi.mock('glob');
vi.mock('@google/gemini-cli-core', async () => {
const actual = await vi.importActual('@google/gemini-cli-core');
return {
...actual,
FileDiscoveryService: vi.fn(),
isNodeError: vi.fn((error) => error.code === 'ENOENT'),
escapePath: vi.fn((path) => path),
unescapePath: vi.fn((path) => path),
getErrorMessage: vi.fn((error) => error.message),
};
});
vi.mock('glob');
describe('useCompletion', () => {
let mockFileDiscoveryService: Mocked<FileDiscoveryService>;
let mockConfig: Mocked<Config>;
let mockCommandContext: CommandContext;
let mockSlashCommands: SlashCommand[];
const testCwd = '/test/project';
beforeEach(() => {
mockFileDiscoveryService = {
shouldGitIgnoreFile: vi.fn(),
shouldGeminiIgnoreFile: vi.fn(),
shouldIgnoreFile: vi.fn(),
filterFiles: vi.fn(),
getGeminiIgnorePatterns: vi.fn(),
projectRoot: '',
gitIgnoreFilter: null,
geminiIgnoreFilter: null,
} as unknown as Mocked<FileDiscoveryService>;
mockConfig = {
getFileFilteringRespectGitIgnore: vi.fn(() => true),
getFileService: vi.fn().mockReturnValue(mockFileDiscoveryService),
getEnableRecursiveFileSearch: vi.fn(() => true),
} as unknown as Mocked<Config>;
mockCommandContext = {} as CommandContext;
mockSlashCommands = [
{
name: 'help',
altName: '?',
description: 'Show help',
action: vi.fn(),
},
{
name: 'clear',
description: 'Clear the screen',
action: vi.fn(),
},
{
name: 'memory',
description: 'Manage memory',
subCommands: [
{
name: 'show',
description: 'Show memory',
action: vi.fn(),
},
{
name: 'add',
description: 'Add to memory',
action: vi.fn(),
},
],
},
{
name: 'chat',
description: 'Manage chat history',
subCommands: [
{
name: 'save',
description: 'Save chat',
action: vi.fn(),
},
{
name: 'resume',
description: 'Resume a saved chat',
action: vi.fn(),
completion: vi.fn().mockResolvedValue(['chat1', 'chat2']),
},
],
},
];
vi.clearAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('Hook initialization and state', () => {
it('should initialize with default state', () => {
const { result } = renderHook(() =>
useCompletion(
'',
testCwd,
false,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toEqual([]);
expect(result.current.activeSuggestionIndex).toBe(-1);
expect(result.current.visibleStartIndex).toBe(0);
expect(result.current.showSuggestions).toBe(false);
expect(result.current.isLoadingSuggestions).toBe(false);
});
it('should reset state when isActive becomes false', () => {
const { result, rerender } = renderHook(
({ isActive }) =>
useCompletion(
'/help',
testCwd,
isActive,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
{ initialProps: { isActive: true } },
);
rerender({ isActive: false });
expect(result.current.suggestions).toEqual([]);
expect(result.current.activeSuggestionIndex).toBe(-1);
expect(result.current.visibleStartIndex).toBe(0);
expect(result.current.showSuggestions).toBe(false);
expect(result.current.isLoadingSuggestions).toBe(false);
});
it('should provide required functions', () => {
const { result } = renderHook(() =>
useCompletion(
'',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(typeof result.current.setActiveSuggestionIndex).toBe('function');
expect(typeof result.current.setShowSuggestions).toBe('function');
expect(typeof result.current.resetCompletionState).toBe('function');
expect(typeof result.current.navigateUp).toBe('function');
expect(typeof result.current.navigateDown).toBe('function');
});
});
describe('resetCompletionState', () => {
it('should reset all state to default values', () => {
const { result } = renderHook(() =>
useCompletion(
'/help',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
act(() => {
result.current.setActiveSuggestionIndex(5);
result.current.setShowSuggestions(true);
});
act(() => {
result.current.resetCompletionState();
});
expect(result.current.suggestions).toEqual([]);
expect(result.current.activeSuggestionIndex).toBe(-1);
expect(result.current.visibleStartIndex).toBe(0);
expect(result.current.showSuggestions).toBe(false);
expect(result.current.isLoadingSuggestions).toBe(false);
});
});
describe('Navigation functions', () => {
it('should handle navigateUp with no suggestions', () => {
const { result } = renderHook(() =>
useCompletion(
'',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
act(() => {
result.current.navigateUp();
});
expect(result.current.activeSuggestionIndex).toBe(-1);
});
it('should handle navigateDown with no suggestions', () => {
const { result } = renderHook(() =>
useCompletion(
'',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
act(() => {
result.current.navigateDown();
});
expect(result.current.activeSuggestionIndex).toBe(-1);
});
it('should navigate up through suggestions with wrap-around', () => {
const { result } = renderHook(() =>
useCompletion(
'/h',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions.length).toBe(1);
expect(result.current.activeSuggestionIndex).toBe(0);
act(() => {
result.current.navigateUp();
});
expect(result.current.activeSuggestionIndex).toBe(0);
});
it('should navigate down through suggestions with wrap-around', () => {
const { result } = renderHook(() =>
useCompletion(
'/h',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions.length).toBe(1);
expect(result.current.activeSuggestionIndex).toBe(0);
act(() => {
result.current.navigateDown();
});
expect(result.current.activeSuggestionIndex).toBe(0);
});
it('should handle navigation with multiple suggestions', () => {
const { result } = renderHook(() =>
useCompletion(
'/',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions.length).toBe(4);
expect(result.current.activeSuggestionIndex).toBe(0);
act(() => {
result.current.navigateDown();
});
expect(result.current.activeSuggestionIndex).toBe(1);
act(() => {
result.current.navigateDown();
});
expect(result.current.activeSuggestionIndex).toBe(2);
act(() => {
result.current.navigateUp();
});
expect(result.current.activeSuggestionIndex).toBe(1);
act(() => {
result.current.navigateUp();
});
expect(result.current.activeSuggestionIndex).toBe(0);
act(() => {
result.current.navigateUp();
});
expect(result.current.activeSuggestionIndex).toBe(3);
});
it('should handle navigation with large suggestion lists and scrolling', () => {
const largeMockCommands = Array.from({ length: 15 }, (_, i) => ({
name: `command${i}`,
description: `Command ${i}`,
action: vi.fn(),
}));
const { result } = renderHook(() =>
useCompletion(
'/command',
testCwd,
true,
largeMockCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions.length).toBe(15);
expect(result.current.activeSuggestionIndex).toBe(0);
expect(result.current.visibleStartIndex).toBe(0);
act(() => {
result.current.navigateUp();
});
expect(result.current.activeSuggestionIndex).toBe(14);
expect(result.current.visibleStartIndex).toBe(Math.max(0, 15 - 8));
});
});
describe('Slash command completion', () => {
it('should show all commands for root slash', () => {
const { result } = renderHook(() =>
useCompletion(
'/',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(4);
expect(result.current.suggestions.map((s) => s.label)).toEqual(
expect.arrayContaining(['help', 'clear', 'memory', 'chat']),
);
expect(result.current.showSuggestions).toBe(true);
expect(result.current.activeSuggestionIndex).toBe(0);
});
it('should filter commands by prefix', () => {
const { result } = renderHook(() =>
useCompletion(
'/h',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions[0].label).toBe('help');
expect(result.current.suggestions[0].description).toBe('Show help');
});
it('should suggest commands by altName', () => {
const { result } = renderHook(() =>
useCompletion(
'/?',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions[0].label).toBe('help');
});
it('should not show suggestions for exact leaf command match', () => {
const { result } = renderHook(() =>
useCompletion(
'/clear',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
it('should show sub-commands for parent commands', () => {
const { result } = renderHook(() =>
useCompletion(
'/memory',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(2);
expect(result.current.suggestions.map((s) => s.label)).toEqual(
expect.arrayContaining(['show', 'add']),
);
});
it('should show all sub-commands after parent command with space', () => {
const { result } = renderHook(() =>
useCompletion(
'/memory ',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(2);
expect(result.current.suggestions.map((s) => s.label)).toEqual(
expect.arrayContaining(['show', 'add']),
);
});
it('should filter sub-commands by prefix', () => {
const { result } = renderHook(() =>
useCompletion(
'/memory a',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions[0].label).toBe('add');
});
it('should handle unknown command gracefully', () => {
const { result } = renderHook(() =>
useCompletion(
'/unknown',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
});
describe('Command argument completion', () => {
it('should call completion function for command arguments', async () => {
const completionFn = vi.fn().mockResolvedValue(['arg1', 'arg2']);
const commandsWithCompletion = [...mockSlashCommands];
const chatCommand = commandsWithCompletion.find(
(cmd) => cmd.name === 'chat',
);
const resumeCommand = chatCommand?.subCommands?.find(
(cmd) => cmd.name === 'resume',
);
if (resumeCommand) {
resumeCommand.completion = completionFn;
}
const { result } = renderHook(() =>
useCompletion(
'/chat resume ',
testCwd,
true,
commandsWithCompletion,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(completionFn).toHaveBeenCalledWith(mockCommandContext, '');
expect(result.current.suggestions).toHaveLength(2);
expect(result.current.suggestions.map((s) => s.label)).toEqual([
'arg1',
'arg2',
]);
});
it('should call completion function with partial argument', async () => {
const completionFn = vi.fn().mockResolvedValue(['arg1', 'arg2']);
const commandsWithCompletion = [...mockSlashCommands];
const chatCommand = commandsWithCompletion.find(
(cmd) => cmd.name === 'chat',
);
const resumeCommand = chatCommand?.subCommands?.find(
(cmd) => cmd.name === 'resume',
);
if (resumeCommand) {
resumeCommand.completion = completionFn;
}
renderHook(() =>
useCompletion(
'/chat resume ar',
testCwd,
true,
commandsWithCompletion,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(completionFn).toHaveBeenCalledWith(mockCommandContext, 'ar');
});
it('should handle completion function that returns null', async () => {
const completionFn = vi.fn().mockResolvedValue(null);
const commandsWithCompletion = [...mockSlashCommands];
const chatCommand = commandsWithCompletion.find(
(cmd) => cmd.name === 'chat',
);
const resumeCommand = chatCommand?.subCommands?.find(
(cmd) => cmd.name === 'resume',
);
if (resumeCommand) {
resumeCommand.completion = completionFn;
}
const { result } = renderHook(() =>
useCompletion(
'/chat resume ',
testCwd,
true,
commandsWithCompletion,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
});
describe('File path completion (@-syntax)', () => {
beforeEach(() => {
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'file1.txt', isDirectory: () => false },
{ name: 'file2.js', isDirectory: () => false },
{ name: 'folder1', isDirectory: () => true },
{ name: '.hidden', isDirectory: () => false },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
});
it('should show file completions for @ prefix', async () => {
const { result } = renderHook(() =>
useCompletion(
'@',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(3);
expect(result.current.suggestions.map((s) => s.label)).toEqual(
expect.arrayContaining(['file1.txt', 'file2.js', 'folder1/']),
);
});
it('should filter files by prefix', async () => {
// Mock for recursive search since enableRecursiveFileSearch is true
vi.mocked(glob).mockResolvedValue([
`${testCwd}/file1.txt`,
`${testCwd}/file2.js`,
]);
const { result } = renderHook(() =>
useCompletion(
'@file',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(2);
expect(result.current.suggestions.map((s) => s.label)).toEqual(
expect.arrayContaining(['file1.txt', 'file2.js']),
);
});
it('should include hidden files when prefix starts with dot', async () => {
// Mock for recursive search since enableRecursiveFileSearch is true
vi.mocked(glob).mockResolvedValue([`${testCwd}/.hidden`]);
const { result } = renderHook(() =>
useCompletion(
'@.',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions[0].label).toBe('.hidden');
});
it('should handle ENOENT error gracefully', async () => {
const enoentError = new Error('No such file or directory');
(enoentError as Error & { code: string }).code = 'ENOENT';
vi.mocked(fs.readdir).mockRejectedValue(enoentError);
const { result } = renderHook(() =>
useCompletion(
'@nonexistent',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
it('should handle other errors by resetting state', async () => {
const consoleErrorSpy = vi
.spyOn(console, 'error')
.mockImplementation(() => {});
vi.mocked(fs.readdir).mockRejectedValue(new Error('Permission denied'));
const { result } = renderHook(() =>
useCompletion(
'@',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(consoleErrorSpy).toHaveBeenCalled();
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
expect(result.current.isLoadingSuggestions).toBe(false);
consoleErrorSpy.mockRestore();
});
});
describe('Debouncing', () => {
it('should debounce file completion requests', async () => {
// Mock for recursive search since enableRecursiveFileSearch is true
vi.mocked(glob).mockResolvedValue([`${testCwd}/file1.txt`]);
const { rerender } = renderHook(
({ query }) =>
useCompletion(
query,
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
{ initialProps: { query: '@f' } },
);
rerender({ query: '@fi' });
rerender({ query: '@fil' });
rerender({ query: '@file' });
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(glob).toHaveBeenCalledTimes(1);
});
});
describe('Query handling edge cases', () => {
it('should handle empty query', () => {
const { result } = renderHook(() =>
useCompletion(
'',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
it('should handle query without slash or @', () => {
const { result } = renderHook(() =>
useCompletion(
'regular text',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(0);
expect(result.current.showSuggestions).toBe(false);
});
it('should handle query with whitespace', () => {
const { result } = renderHook(() =>
useCompletion(
' /hel',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions[0].label).toBe('help');
});
it('should handle @ at the end of query', async () => {
// Mock for recursive search since enableRecursiveFileSearch is true
vi.mocked(glob).mockResolvedValue([`${testCwd}/file1.txt`]);
const { result } = renderHook(() =>
useCompletion(
'some text @',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
// Wait for completion
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
// Should process the @ query and get suggestions
expect(result.current.isLoadingSuggestions).toBe(false);
expect(result.current.suggestions.length).toBeGreaterThanOrEqual(0);
});
});
describe('File sorting behavior', () => {
it('should prioritize source files over test files with same base name', async () => {
// Mock glob to return files with same base name but different extensions
vi.mocked(glob).mockResolvedValue([
`${testCwd}/component.test.ts`,
`${testCwd}/component.ts`,
`${testCwd}/utils.spec.js`,
`${testCwd}/utils.js`,
`${testCwd}/api.test.tsx`,
`${testCwd}/api.tsx`,
]);
mockFileDiscoveryService.shouldIgnoreFile.mockReturnValue(false);
const { result } = renderHook(() =>
useCompletion(
'@comp',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(6);
// Extract labels for easier testing
const labels = result.current.suggestions.map((s) => s.label);
// Verify the exact sorted order: source files should come before their test counterparts
expect(labels).toEqual([
'api.tsx',
'api.test.tsx',
'component.ts',
'component.test.ts',
'utils.js',
'utils.spec.js',
]);
});
});
describe('Config and FileDiscoveryService integration', () => {
it('should work without config', async () => {
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'file1.txt', isDirectory: () => false },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
const { result } = renderHook(() =>
useCompletion(
'@',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
undefined,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions[0].label).toBe('file1.txt');
});
it('should respect file filtering when config is provided', async () => {
vi.mocked(fs.readdir).mockResolvedValue([
{ name: 'file1.txt', isDirectory: () => false },
{ name: 'ignored.log', isDirectory: () => false },
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
mockFileDiscoveryService.shouldIgnoreFile.mockImplementation(
(path: string) => path.includes('.log'),
);
const { result } = renderHook(() =>
useCompletion(
'@',
testCwd,
true,
mockSlashCommands,
mockCommandContext,
mockConfig,
),
);
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
expect(result.current.suggestions).toHaveLength(1);
expect(result.current.suggestions[0].label).toBe('file1.txt');
});
});
});

View File

@@ -0,0 +1,543 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useEffect, useCallback } from 'react';
import * as fs from 'fs/promises';
import * as path from 'path';
import { glob } from 'glob';
import {
isNodeError,
escapePath,
unescapePath,
getErrorMessage,
Config,
FileDiscoveryService,
} from '@qwen/qwen-code-core';
import {
MAX_SUGGESTIONS_TO_SHOW,
Suggestion,
} from '../components/SuggestionsDisplay.js';
import { CommandContext, SlashCommand } from '../commands/types.js';
export interface UseCompletionReturn {
suggestions: Suggestion[];
activeSuggestionIndex: number;
visibleStartIndex: number;
showSuggestions: boolean;
isLoadingSuggestions: boolean;
setActiveSuggestionIndex: React.Dispatch<React.SetStateAction<number>>;
setShowSuggestions: React.Dispatch<React.SetStateAction<boolean>>;
resetCompletionState: () => void;
navigateUp: () => void;
navigateDown: () => void;
}
export function useCompletion(
query: string,
cwd: string,
isActive: boolean,
slashCommands: SlashCommand[],
commandContext: CommandContext,
config?: Config,
): UseCompletionReturn {
const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
const [activeSuggestionIndex, setActiveSuggestionIndex] =
useState<number>(-1);
const [visibleStartIndex, setVisibleStartIndex] = useState<number>(0);
const [showSuggestions, setShowSuggestions] = useState<boolean>(false);
const [isLoadingSuggestions, setIsLoadingSuggestions] =
useState<boolean>(false);
const resetCompletionState = useCallback(() => {
setSuggestions([]);
setActiveSuggestionIndex(-1);
setVisibleStartIndex(0);
setShowSuggestions(false);
setIsLoadingSuggestions(false);
}, []);
const navigateUp = useCallback(() => {
if (suggestions.length === 0) return;
setActiveSuggestionIndex((prevActiveIndex) => {
// Calculate new active index, handling wrap-around
const newActiveIndex =
prevActiveIndex <= 0 ? suggestions.length - 1 : prevActiveIndex - 1;
// Adjust scroll position based on the new active index
setVisibleStartIndex((prevVisibleStart) => {
// Case 1: Wrapped around to the last item
if (
newActiveIndex === suggestions.length - 1 &&
suggestions.length > MAX_SUGGESTIONS_TO_SHOW
) {
return Math.max(0, suggestions.length - MAX_SUGGESTIONS_TO_SHOW);
}
// Case 2: Scrolled above the current visible window
if (newActiveIndex < prevVisibleStart) {
return newActiveIndex;
}
// Otherwise, keep the current scroll position
return prevVisibleStart;
});
return newActiveIndex;
});
}, [suggestions.length]);
const navigateDown = useCallback(() => {
if (suggestions.length === 0) return;
setActiveSuggestionIndex((prevActiveIndex) => {
// Calculate new active index, handling wrap-around
const newActiveIndex =
prevActiveIndex >= suggestions.length - 1 ? 0 : prevActiveIndex + 1;
// Adjust scroll position based on the new active index
setVisibleStartIndex((prevVisibleStart) => {
// Case 1: Wrapped around to the first item
if (
newActiveIndex === 0 &&
suggestions.length > MAX_SUGGESTIONS_TO_SHOW
) {
return 0;
}
// Case 2: Scrolled below the current visible window
const visibleEndIndex = prevVisibleStart + MAX_SUGGESTIONS_TO_SHOW;
if (newActiveIndex >= visibleEndIndex) {
return newActiveIndex - MAX_SUGGESTIONS_TO_SHOW + 1;
}
// Otherwise, keep the current scroll position
return prevVisibleStart;
});
return newActiveIndex;
});
}, [suggestions.length]);
useEffect(() => {
if (!isActive) {
resetCompletionState();
return;
}
const trimmedQuery = query.trimStart();
if (trimmedQuery.startsWith('/')) {
const fullPath = trimmedQuery.substring(1);
const hasTrailingSpace = trimmedQuery.endsWith(' ');
// Get all non-empty parts of the command.
const rawParts = fullPath.split(/\s+/).filter((p) => p);
let commandPathParts = rawParts;
let partial = '';
// If there's no trailing space, the last part is potentially a partial segment.
// We tentatively separate it.
if (!hasTrailingSpace && rawParts.length > 0) {
partial = rawParts[rawParts.length - 1];
commandPathParts = rawParts.slice(0, -1);
}
// Traverse the Command Tree using the tentative completed path
let currentLevel: SlashCommand[] | undefined = slashCommands;
let leafCommand: SlashCommand | null = null;
for (const part of commandPathParts) {
if (!currentLevel) {
leafCommand = null;
currentLevel = [];
break;
}
const found: SlashCommand | undefined = currentLevel.find(
(cmd) => cmd.name === part || cmd.altName === part,
);
if (found) {
leafCommand = found;
currentLevel = found.subCommands;
} else {
leafCommand = null;
currentLevel = [];
break;
}
}
// Handle the Ambiguous Case
if (!hasTrailingSpace && currentLevel) {
const exactMatchAsParent = currentLevel.find(
(cmd) =>
(cmd.name === partial || cmd.altName === partial) &&
cmd.subCommands,
);
if (exactMatchAsParent) {
// It's a perfect match for a parent command. Override our initial guess.
// Treat it as a completed command path.
leafCommand = exactMatchAsParent;
currentLevel = exactMatchAsParent.subCommands;
partial = ''; // We now want to suggest ALL of its sub-commands.
}
}
const depth = commandPathParts.length;
// Provide Suggestions based on the now-corrected context
// Argument Completion
if (
leafCommand?.completion &&
(hasTrailingSpace ||
(rawParts.length > depth && depth > 0 && partial !== ''))
) {
const fetchAndSetSuggestions = async () => {
setIsLoadingSuggestions(true);
const argString = rawParts.slice(depth).join(' ');
const results =
(await leafCommand!.completion!(commandContext, argString)) || [];
const finalSuggestions = results.map((s) => ({ label: s, value: s }));
setSuggestions(finalSuggestions);
setShowSuggestions(finalSuggestions.length > 0);
setActiveSuggestionIndex(finalSuggestions.length > 0 ? 0 : -1);
setIsLoadingSuggestions(false);
};
fetchAndSetSuggestions();
return;
}
// Command/Sub-command Completion
const commandsToSearch = currentLevel || [];
if (commandsToSearch.length > 0) {
let potentialSuggestions = commandsToSearch.filter(
(cmd) =>
cmd.description &&
(cmd.name.startsWith(partial) || cmd.altName?.startsWith(partial)),
);
// If a user's input is an exact match and it is a leaf command,
// enter should submit immediately.
if (potentialSuggestions.length > 0 && !hasTrailingSpace) {
const perfectMatch = potentialSuggestions.find(
(s) => s.name === partial,
);
if (perfectMatch && !perfectMatch.subCommands) {
potentialSuggestions = [];
}
}
const finalSuggestions = potentialSuggestions.map((cmd) => ({
label: cmd.name,
value: cmd.name,
description: cmd.description,
}));
setSuggestions(finalSuggestions);
setShowSuggestions(finalSuggestions.length > 0);
setActiveSuggestionIndex(finalSuggestions.length > 0 ? 0 : -1);
setIsLoadingSuggestions(false);
return;
}
// If we fall through, no suggestions are available.
resetCompletionState();
return;
}
// Handle At Command Completion
const atIndex = query.lastIndexOf('@');
if (atIndex === -1) {
resetCompletionState();
return;
}
const partialPath = query.substring(atIndex + 1);
const lastSlashIndex = partialPath.lastIndexOf('/');
const baseDirRelative =
lastSlashIndex === -1
? '.'
: partialPath.substring(0, lastSlashIndex + 1);
const prefix = unescapePath(
lastSlashIndex === -1
? partialPath
: partialPath.substring(lastSlashIndex + 1),
);
const baseDirAbsolute = path.resolve(cwd, baseDirRelative);
let isMounted = true;
const findFilesRecursively = async (
startDir: string,
searchPrefix: string,
fileDiscovery: FileDiscoveryService | null,
filterOptions: {
respectGitIgnore?: boolean;
respectGeminiIgnore?: boolean;
},
currentRelativePath = '',
depth = 0,
maxDepth = 10, // Limit recursion depth
maxResults = 50, // Limit number of results
): Promise<Suggestion[]> => {
if (depth > maxDepth) {
return [];
}
const lowerSearchPrefix = searchPrefix.toLowerCase();
let foundSuggestions: Suggestion[] = [];
try {
const entries = await fs.readdir(startDir, { withFileTypes: true });
for (const entry of entries) {
if (foundSuggestions.length >= maxResults) break;
const entryPathRelative = path.join(currentRelativePath, entry.name);
const entryPathFromRoot = path.relative(
cwd,
path.join(startDir, entry.name),
);
// Conditionally ignore dotfiles
if (!searchPrefix.startsWith('.') && entry.name.startsWith('.')) {
continue;
}
// Check if this entry should be ignored by filtering options
if (
fileDiscovery &&
fileDiscovery.shouldIgnoreFile(entryPathFromRoot, filterOptions)
) {
continue;
}
if (entry.name.toLowerCase().startsWith(lowerSearchPrefix)) {
foundSuggestions.push({
label: entryPathRelative + (entry.isDirectory() ? '/' : ''),
value: escapePath(
entryPathRelative + (entry.isDirectory() ? '/' : ''),
),
});
}
if (
entry.isDirectory() &&
entry.name !== 'node_modules' &&
!entry.name.startsWith('.')
) {
if (foundSuggestions.length < maxResults) {
foundSuggestions = foundSuggestions.concat(
await findFilesRecursively(
path.join(startDir, entry.name),
searchPrefix, // Pass original searchPrefix for recursive calls
fileDiscovery,
filterOptions,
entryPathRelative,
depth + 1,
maxDepth,
maxResults - foundSuggestions.length,
),
);
}
}
}
} catch (_err) {
// Ignore errors like permission denied or ENOENT during recursive search
}
return foundSuggestions.slice(0, maxResults);
};
const findFilesWithGlob = async (
searchPrefix: string,
fileDiscoveryService: FileDiscoveryService,
filterOptions: {
respectGitIgnore?: boolean;
respectGeminiIgnore?: boolean;
},
maxResults = 50,
): Promise<Suggestion[]> => {
const globPattern = `**/${searchPrefix}*`;
const files = await glob(globPattern, {
cwd,
dot: searchPrefix.startsWith('.'),
nocase: true,
});
const suggestions: Suggestion[] = files
.map((file: string) => {
const relativePath = path.relative(cwd, file);
return {
label: relativePath,
value: escapePath(relativePath),
};
})
.filter((s) => {
if (fileDiscoveryService) {
return !fileDiscoveryService.shouldIgnoreFile(
s.label,
filterOptions,
); // relative path
}
return true;
})
.slice(0, maxResults);
return suggestions;
};
const fetchSuggestions = async () => {
setIsLoadingSuggestions(true);
let fetchedSuggestions: Suggestion[] = [];
const fileDiscoveryService = config ? config.getFileService() : null;
const enableRecursiveSearch =
config?.getEnableRecursiveFileSearch() ?? true;
const filterOptions = {
respectGitIgnore: config?.getFileFilteringRespectGitIgnore() ?? true,
respectGeminiIgnore: true,
};
try {
// If there's no slash, or it's the root, do a recursive search from cwd
if (
partialPath.indexOf('/') === -1 &&
prefix &&
enableRecursiveSearch
) {
if (fileDiscoveryService) {
fetchedSuggestions = await findFilesWithGlob(
prefix,
fileDiscoveryService,
filterOptions,
);
} else {
fetchedSuggestions = await findFilesRecursively(
cwd,
prefix,
fileDiscoveryService,
filterOptions,
);
}
} else {
// Original behavior: list files in the specific directory
const lowerPrefix = prefix.toLowerCase();
const entries = await fs.readdir(baseDirAbsolute, {
withFileTypes: true,
});
// Filter entries using git-aware filtering
const filteredEntries = [];
for (const entry of entries) {
// Conditionally ignore dotfiles
if (!prefix.startsWith('.') && entry.name.startsWith('.')) {
continue;
}
if (!entry.name.toLowerCase().startsWith(lowerPrefix)) continue;
const relativePath = path.relative(
cwd,
path.join(baseDirAbsolute, entry.name),
);
if (
fileDiscoveryService &&
fileDiscoveryService.shouldIgnoreFile(relativePath, filterOptions)
) {
continue;
}
filteredEntries.push(entry);
}
fetchedSuggestions = filteredEntries.map((entry) => {
const label = entry.isDirectory() ? entry.name + '/' : entry.name;
return {
label,
value: escapePath(label), // Value for completion should be just the name part
};
});
}
// Sort by depth, then directories first, then alphabetically
fetchedSuggestions.sort((a, b) => {
const depthA = (a.label.match(/\//g) || []).length;
const depthB = (b.label.match(/\//g) || []).length;
if (depthA !== depthB) {
return depthA - depthB;
}
const aIsDir = a.label.endsWith('/');
const bIsDir = b.label.endsWith('/');
if (aIsDir && !bIsDir) return -1;
if (!aIsDir && bIsDir) return 1;
// exclude extension when comparing
const filenameA = a.label.substring(
0,
a.label.length - path.extname(a.label).length,
);
const filenameB = b.label.substring(
0,
b.label.length - path.extname(b.label).length,
);
return (
filenameA.localeCompare(filenameB) || a.label.localeCompare(b.label)
);
});
if (isMounted) {
setSuggestions(fetchedSuggestions);
setShowSuggestions(fetchedSuggestions.length > 0);
setActiveSuggestionIndex(fetchedSuggestions.length > 0 ? 0 : -1);
setVisibleStartIndex(0);
}
} catch (error: unknown) {
if (isNodeError(error) && error.code === 'ENOENT') {
if (isMounted) {
setSuggestions([]);
setShowSuggestions(false);
}
} else {
console.error(
`Error fetching completion suggestions for ${partialPath}: ${getErrorMessage(error)}`,
);
if (isMounted) {
resetCompletionState();
}
}
}
if (isMounted) {
setIsLoadingSuggestions(false);
}
};
const debounceTimeout = setTimeout(fetchSuggestions, 100);
return () => {
isMounted = false;
clearTimeout(debounceTimeout);
};
}, [
query,
cwd,
isActive,
resetCompletionState,
slashCommands,
commandContext,
config,
]);
return {
suggestions,
activeSuggestionIndex,
visibleStartIndex,
showSuggestions,
isLoadingSuggestions,
setActiveSuggestionIndex,
setShowSuggestions,
resetCompletionState,
navigateUp,
navigateDown,
};
}

View File

@@ -0,0 +1,212 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { act, renderHook } from '@testing-library/react';
import { useConsoleMessages } from './useConsoleMessages.js';
import { ConsoleMessageItem } from '../types.js';
// Mock setTimeout and clearTimeout
vi.useFakeTimers();
describe('useConsoleMessages', () => {
it('should initialize with an empty array of console messages', () => {
const { result } = renderHook(() => useConsoleMessages());
expect(result.current.consoleMessages).toEqual([]);
});
it('should add a new message', () => {
const { result } = renderHook(() => useConsoleMessages());
const message: ConsoleMessageItem = {
type: 'log',
content: 'Test message',
count: 1,
};
act(() => {
result.current.handleNewMessage(message);
});
act(() => {
vi.runAllTimers(); // Process the queue
});
expect(result.current.consoleMessages).toEqual([{ ...message, count: 1 }]);
});
it('should consolidate identical consecutive messages', () => {
const { result } = renderHook(() => useConsoleMessages());
const message: ConsoleMessageItem = {
type: 'log',
content: 'Test message',
count: 1,
};
act(() => {
result.current.handleNewMessage(message);
result.current.handleNewMessage(message);
});
act(() => {
vi.runAllTimers();
});
expect(result.current.consoleMessages).toEqual([{ ...message, count: 2 }]);
});
it('should not consolidate different messages', () => {
const { result } = renderHook(() => useConsoleMessages());
const message1: ConsoleMessageItem = {
type: 'log',
content: 'Test message 1',
count: 1,
};
const message2: ConsoleMessageItem = {
type: 'error',
content: 'Test message 2',
count: 1,
};
act(() => {
result.current.handleNewMessage(message1);
result.current.handleNewMessage(message2);
});
act(() => {
vi.runAllTimers();
});
expect(result.current.consoleMessages).toEqual([
{ ...message1, count: 1 },
{ ...message2, count: 1 },
]);
});
it('should not consolidate messages if type is different', () => {
const { result } = renderHook(() => useConsoleMessages());
const message1: ConsoleMessageItem = {
type: 'log',
content: 'Test message',
count: 1,
};
const message2: ConsoleMessageItem = {
type: 'error',
content: 'Test message',
count: 1,
};
act(() => {
result.current.handleNewMessage(message1);
result.current.handleNewMessage(message2);
});
act(() => {
vi.runAllTimers();
});
expect(result.current.consoleMessages).toEqual([
{ ...message1, count: 1 },
{ ...message2, count: 1 },
]);
});
it('should clear console messages', () => {
const { result } = renderHook(() => useConsoleMessages());
const message: ConsoleMessageItem = {
type: 'log',
content: 'Test message',
count: 1,
};
act(() => {
result.current.handleNewMessage(message);
});
act(() => {
vi.runAllTimers();
});
expect(result.current.consoleMessages).toHaveLength(1);
act(() => {
result.current.clearConsoleMessages();
});
expect(result.current.consoleMessages).toEqual([]);
});
it('should clear pending timeout on clearConsoleMessages', () => {
const { result } = renderHook(() => useConsoleMessages());
const message: ConsoleMessageItem = {
type: 'log',
content: 'Test message',
count: 1,
};
act(() => {
result.current.handleNewMessage(message); // This schedules a timeout
});
act(() => {
result.current.clearConsoleMessages();
});
// Ensure the queue is empty and no more messages are processed
act(() => {
vi.runAllTimers(); // If timeout wasn't cleared, this would process the queue
});
expect(result.current.consoleMessages).toEqual([]);
});
it('should clear message queue on clearConsoleMessages', () => {
const { result } = renderHook(() => useConsoleMessages());
const message: ConsoleMessageItem = {
type: 'log',
content: 'Test message',
count: 1,
};
act(() => {
// Add a message but don't process the queue yet
result.current.handleNewMessage(message);
});
act(() => {
result.current.clearConsoleMessages();
});
// Process any pending timeouts (should be none related to message queue)
act(() => {
vi.runAllTimers();
});
// The consoleMessages should be empty because the queue was cleared before processing
expect(result.current.consoleMessages).toEqual([]);
});
it('should cleanup timeout on unmount', () => {
const { result, unmount } = renderHook(() => useConsoleMessages());
const message: ConsoleMessageItem = {
type: 'log',
content: 'Test message',
count: 1,
};
act(() => {
result.current.handleNewMessage(message);
});
unmount();
// This is a bit indirect. We check that clearTimeout was called.
// If clearTimeout was not called, and we run timers, an error might occur
// or the state might change, which it shouldn't after unmount.
// Vitest's vi.clearAllTimers() or specific checks for clearTimeout calls
// would be more direct if available and easy to set up here.
// For now, we rely on the useEffect cleanup pattern.
expect(vi.getTimerCount()).toBe(0); // Check if all timers are cleared
});
});

View File

@@ -0,0 +1,89 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useCallback, useEffect, useRef, useState } from 'react';
import { ConsoleMessageItem } from '../types.js';
export interface UseConsoleMessagesReturn {
consoleMessages: ConsoleMessageItem[];
handleNewMessage: (message: ConsoleMessageItem) => void;
clearConsoleMessages: () => void;
}
export function useConsoleMessages(): UseConsoleMessagesReturn {
const [consoleMessages, setConsoleMessages] = useState<ConsoleMessageItem[]>(
[],
);
const messageQueueRef = useRef<ConsoleMessageItem[]>([]);
const messageQueueTimeoutRef = useRef<number | null>(null);
const processMessageQueue = useCallback(() => {
if (messageQueueRef.current.length === 0) {
return;
}
const newMessagesToAdd = messageQueueRef.current;
messageQueueRef.current = [];
setConsoleMessages((prevMessages) => {
const newMessages = [...prevMessages];
newMessagesToAdd.forEach((queuedMessage) => {
if (
newMessages.length > 0 &&
newMessages[newMessages.length - 1].type === queuedMessage.type &&
newMessages[newMessages.length - 1].content === queuedMessage.content
) {
newMessages[newMessages.length - 1].count =
(newMessages[newMessages.length - 1].count || 1) + 1;
} else {
newMessages.push({ ...queuedMessage, count: 1 });
}
});
return newMessages;
});
messageQueueTimeoutRef.current = null; // Allow next scheduling
}, []);
const scheduleQueueProcessing = useCallback(() => {
if (messageQueueTimeoutRef.current === null) {
messageQueueTimeoutRef.current = setTimeout(
processMessageQueue,
0,
) as unknown as number;
}
}, [processMessageQueue]);
const handleNewMessage = useCallback(
(message: ConsoleMessageItem) => {
messageQueueRef.current.push(message);
scheduleQueueProcessing();
},
[scheduleQueueProcessing],
);
const clearConsoleMessages = useCallback(() => {
setConsoleMessages([]);
if (messageQueueTimeoutRef.current !== null) {
clearTimeout(messageQueueTimeoutRef.current);
messageQueueTimeoutRef.current = null;
}
messageQueueRef.current = [];
}, []);
useEffect(
() =>
// Cleanup on unmount
() => {
if (messageQueueTimeoutRef.current !== null) {
clearTimeout(messageQueueTimeoutRef.current);
}
},
[],
);
return { consoleMessages, handleNewMessage, clearConsoleMessages };
}

View File

@@ -0,0 +1,283 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
afterEach,
beforeEach,
describe,
expect,
it,
vi,
type MockedFunction,
} from 'vitest';
import { act } from 'react';
import { renderHook } from '@testing-library/react';
import { useEditorSettings } from './useEditorSettings.js';
import { LoadedSettings, SettingScope } from '../../config/settings.js';
import { MessageType, type HistoryItem } from '../types.js';
import {
type EditorType,
checkHasEditorType,
allowEditorTypeInSandbox,
} from '@qwen/qwen-code-core';
vi.mock('@qwen/qwen-code-core', async () => {
const actual = await vi.importActual('@qwen/qwen-code-core');
return {
...actual,
checkHasEditorType: vi.fn(() => true),
allowEditorTypeInSandbox: vi.fn(() => true),
};
});
const mockCheckHasEditorType = vi.mocked(checkHasEditorType);
const mockAllowEditorTypeInSandbox = vi.mocked(allowEditorTypeInSandbox);
describe('useEditorSettings', () => {
let mockLoadedSettings: LoadedSettings;
let mockSetEditorError: MockedFunction<(error: string | null) => void>;
let mockAddItem: MockedFunction<
(item: Omit<HistoryItem, 'id'>, timestamp: number) => void
>;
beforeEach(() => {
vi.resetAllMocks();
mockLoadedSettings = {
setValue: vi.fn(),
} as unknown as LoadedSettings;
mockSetEditorError = vi.fn();
mockAddItem = vi.fn();
// Reset mock implementations to default
mockCheckHasEditorType.mockReturnValue(true);
mockAllowEditorTypeInSandbox.mockReturnValue(true);
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should initialize with dialog closed', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
expect(result.current.isEditorDialogOpen).toBe(false);
});
it('should open editor dialog when openEditorDialog is called', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
act(() => {
result.current.openEditorDialog();
});
expect(result.current.isEditorDialogOpen).toBe(true);
});
it('should close editor dialog when exitEditorDialog is called', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
act(() => {
result.current.openEditorDialog();
result.current.exitEditorDialog();
});
expect(result.current.isEditorDialogOpen).toBe(false);
});
it('should handle editor selection successfully', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
const editorType: EditorType = 'vscode';
const scope = SettingScope.User;
act(() => {
result.current.openEditorDialog();
result.current.handleEditorSelect(editorType, scope);
});
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
scope,
'preferredEditor',
editorType,
);
expect(mockAddItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: 'Editor preference set to "vscode" in User settings.',
},
expect.any(Number),
);
expect(mockSetEditorError).toHaveBeenCalledWith(null);
expect(result.current.isEditorDialogOpen).toBe(false);
});
it('should handle clearing editor preference (undefined editor)', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
const scope = SettingScope.Workspace;
act(() => {
result.current.openEditorDialog();
result.current.handleEditorSelect(undefined, scope);
});
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
scope,
'preferredEditor',
undefined,
);
expect(mockAddItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: 'Editor preference cleared in Workspace settings.',
},
expect.any(Number),
);
expect(mockSetEditorError).toHaveBeenCalledWith(null);
expect(result.current.isEditorDialogOpen).toBe(false);
});
it('should handle different editor types', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
const editorTypes: EditorType[] = ['cursor', 'windsurf', 'vim'];
const scope = SettingScope.User;
editorTypes.forEach((editorType) => {
act(() => {
result.current.handleEditorSelect(editorType, scope);
});
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
scope,
'preferredEditor',
editorType,
);
expect(mockAddItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: `Editor preference set to "${editorType}" in User settings.`,
},
expect.any(Number),
);
});
});
it('should handle different setting scopes', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
const editorType: EditorType = 'vscode';
const scopes = [SettingScope.User, SettingScope.Workspace];
scopes.forEach((scope) => {
act(() => {
result.current.handleEditorSelect(editorType, scope);
});
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
scope,
'preferredEditor',
editorType,
);
expect(mockAddItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: `Editor preference set to "vscode" in ${scope} settings.`,
},
expect.any(Number),
);
});
});
it('should not set preference for unavailable editors', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
mockCheckHasEditorType.mockReturnValue(false);
const editorType: EditorType = 'vscode';
const scope = SettingScope.User;
act(() => {
result.current.openEditorDialog();
result.current.handleEditorSelect(editorType, scope);
});
expect(mockLoadedSettings.setValue).not.toHaveBeenCalled();
expect(mockAddItem).not.toHaveBeenCalled();
expect(result.current.isEditorDialogOpen).toBe(true);
});
it('should not set preference for editors not allowed in sandbox', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
mockAllowEditorTypeInSandbox.mockReturnValue(false);
const editorType: EditorType = 'vscode';
const scope = SettingScope.User;
act(() => {
result.current.openEditorDialog();
result.current.handleEditorSelect(editorType, scope);
});
expect(mockLoadedSettings.setValue).not.toHaveBeenCalled();
expect(mockAddItem).not.toHaveBeenCalled();
expect(result.current.isEditorDialogOpen).toBe(true);
});
it('should handle errors during editor selection', () => {
const { result } = renderHook(() =>
useEditorSettings(mockLoadedSettings, mockSetEditorError, mockAddItem),
);
const errorMessage = 'Failed to save settings';
(
mockLoadedSettings.setValue as MockedFunction<
typeof mockLoadedSettings.setValue
>
).mockImplementation(() => {
throw new Error(errorMessage);
});
const editorType: EditorType = 'vscode';
const scope = SettingScope.User;
act(() => {
result.current.openEditorDialog();
result.current.handleEditorSelect(editorType, scope);
});
expect(mockSetEditorError).toHaveBeenCalledWith(
`Failed to set editor preference: Error: ${errorMessage}`,
);
expect(mockAddItem).not.toHaveBeenCalled();
expect(result.current.isEditorDialogOpen).toBe(true);
});
});

View File

@@ -0,0 +1,75 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useCallback } from 'react';
import { LoadedSettings, SettingScope } from '../../config/settings.js';
import { type HistoryItem, MessageType } from '../types.js';
import {
allowEditorTypeInSandbox,
checkHasEditorType,
EditorType,
} from '@qwen/qwen-code-core';
interface UseEditorSettingsReturn {
isEditorDialogOpen: boolean;
openEditorDialog: () => void;
handleEditorSelect: (
editorType: EditorType | undefined,
scope: SettingScope,
) => void;
exitEditorDialog: () => void;
}
export const useEditorSettings = (
loadedSettings: LoadedSettings,
setEditorError: (error: string | null) => void,
addItem: (item: Omit<HistoryItem, 'id'>, timestamp: number) => void,
): UseEditorSettingsReturn => {
const [isEditorDialogOpen, setIsEditorDialogOpen] = useState(false);
const openEditorDialog = useCallback(() => {
setIsEditorDialogOpen(true);
}, []);
const handleEditorSelect = useCallback(
(editorType: EditorType | undefined, scope: SettingScope) => {
if (
editorType &&
(!checkHasEditorType(editorType) ||
!allowEditorTypeInSandbox(editorType))
) {
return;
}
try {
loadedSettings.setValue(scope, 'preferredEditor', editorType);
addItem(
{
type: MessageType.INFO,
text: `Editor preference ${editorType ? `set to "${editorType}"` : 'cleared'} in ${scope} settings.`,
},
Date.now(),
);
setEditorError(null);
setIsEditorDialogOpen(false);
} catch (error) {
setEditorError(`Failed to set editor preference: ${error}`);
}
},
[loadedSettings, setEditorError, addItem],
);
const exitEditorDialog = useCallback(() => {
setIsEditorDialogOpen(false);
}, []);
return {
isEditorDialogOpen,
openEditorDialog,
handleEditorSelect,
exitEditorDialog,
};
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,888 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useRef, useCallback, useEffect, useMemo } from 'react';
import { useInput } from 'ink';
import {
Config,
GeminiClient,
GeminiEventType as ServerGeminiEventType,
ServerGeminiStreamEvent as GeminiEvent,
ServerGeminiContentEvent as ContentEvent,
ServerGeminiErrorEvent as ErrorEvent,
ServerGeminiChatCompressedEvent,
getErrorMessage,
isNodeError,
MessageSenderType,
ToolCallRequestInfo,
logUserPrompt,
GitService,
EditorType,
ThoughtSummary,
UnauthorizedError,
UserPromptEvent,
DEFAULT_GEMINI_FLASH_MODEL,
} from '@qwen/qwen-code-core';
import { type Part, type PartListUnion } from '@google/genai';
import {
StreamingState,
HistoryItem,
HistoryItemWithoutId,
HistoryItemToolGroup,
MessageType,
SlashCommandProcessorResult,
ToolCallStatus,
} from '../types.js';
import { isAtCommand } from '../utils/commandUtils.js';
import { parseAndFormatApiError } from '../utils/errorParsing.js';
import { useShellCommandProcessor } from './shellCommandProcessor.js';
import { handleAtCommand } from './atCommandProcessor.js';
import { findLastSafeSplitPoint } from '../utils/markdownUtilities.js';
import { useStateAndRef } from './useStateAndRef.js';
import { UseHistoryManagerReturn } from './useHistoryManager.js';
import { useLogger } from './useLogger.js';
import { promises as fs } from 'fs';
import path from 'path';
import {
useReactToolScheduler,
mapToDisplay as mapTrackedToolCallsToDisplay,
TrackedToolCall,
TrackedCompletedToolCall,
TrackedCancelledToolCall,
} from './useReactToolScheduler.js';
import { useSessionStats } from '../contexts/SessionContext.js';
export function mergePartListUnions(list: PartListUnion[]): PartListUnion {
const resultParts: PartListUnion = [];
for (const item of list) {
if (Array.isArray(item)) {
resultParts.push(...item);
} else {
resultParts.push(item);
}
}
return resultParts;
}
enum StreamProcessingStatus {
Completed,
UserCancelled,
Error,
}
/**
* Manages the Gemini stream, including user input, command processing,
* API interaction, and tool call lifecycle.
*/
export const useGeminiStream = (
geminiClient: GeminiClient,
history: HistoryItem[],
addItem: UseHistoryManagerReturn['addItem'],
setShowHelp: React.Dispatch<React.SetStateAction<boolean>>,
config: Config,
onDebugMessage: (message: string) => void,
handleSlashCommand: (
cmd: PartListUnion,
) => Promise<SlashCommandProcessorResult | false>,
shellModeActive: boolean,
getPreferredEditor: () => EditorType | undefined,
onAuthError: () => void,
performMemoryRefresh: () => Promise<void>,
modelSwitchedFromQuotaError: boolean,
setModelSwitchedFromQuotaError: React.Dispatch<React.SetStateAction<boolean>>,
) => {
const [initError, setInitError] = useState<string | null>(null);
const abortControllerRef = useRef<AbortController | null>(null);
const turnCancelledRef = useRef(false);
const [isResponding, setIsResponding] = useState<boolean>(false);
const [thought, setThought] = useState<ThoughtSummary | null>(null);
const [pendingHistoryItemRef, setPendingHistoryItem] =
useStateAndRef<HistoryItemWithoutId | null>(null);
const processedMemoryToolsRef = useRef<Set<string>>(new Set());
const { startNewPrompt, getPromptCount } = useSessionStats();
const logger = useLogger();
const gitService = useMemo(() => {
if (!config.getProjectRoot()) {
return;
}
return new GitService(config.getProjectRoot());
}, [config]);
const [toolCalls, scheduleToolCalls, markToolsAsSubmitted] =
useReactToolScheduler(
async (completedToolCallsFromScheduler) => {
// This onComplete is called when ALL scheduled tools for a given batch are done.
if (completedToolCallsFromScheduler.length > 0) {
// Add the final state of these tools to the history for display.
addItem(
mapTrackedToolCallsToDisplay(
completedToolCallsFromScheduler as TrackedToolCall[],
),
Date.now(),
);
// Handle tool response submission immediately when tools complete
await handleCompletedTools(
completedToolCallsFromScheduler as TrackedToolCall[],
);
}
},
config,
setPendingHistoryItem,
getPreferredEditor,
);
const pendingToolCallGroupDisplay = useMemo(
() =>
toolCalls.length ? mapTrackedToolCallsToDisplay(toolCalls) : undefined,
[toolCalls],
);
const loopDetectedRef = useRef(false);
const onExec = useCallback(async (done: Promise<void>) => {
setIsResponding(true);
await done;
setIsResponding(false);
}, []);
const { handleShellCommand } = useShellCommandProcessor(
addItem,
setPendingHistoryItem,
onExec,
onDebugMessage,
config,
geminiClient,
);
const streamingState = useMemo(() => {
if (toolCalls.some((tc) => tc.status === 'awaiting_approval')) {
return StreamingState.WaitingForConfirmation;
}
if (
isResponding ||
toolCalls.some(
(tc) =>
tc.status === 'executing' ||
tc.status === 'scheduled' ||
tc.status === 'validating' ||
((tc.status === 'success' ||
tc.status === 'error' ||
tc.status === 'cancelled') &&
!(tc as TrackedCompletedToolCall | TrackedCancelledToolCall)
.responseSubmittedToGemini),
)
) {
return StreamingState.Responding;
}
return StreamingState.Idle;
}, [isResponding, toolCalls]);
useInput((_input, key) => {
if (streamingState === StreamingState.Responding && key.escape) {
if (turnCancelledRef.current) {
return;
}
turnCancelledRef.current = true;
abortControllerRef.current?.abort();
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, Date.now());
}
addItem(
{
type: MessageType.INFO,
text: 'Request cancelled.',
},
Date.now(),
);
setPendingHistoryItem(null);
setIsResponding(false);
}
});
const prepareQueryForGemini = useCallback(
async (
query: PartListUnion,
userMessageTimestamp: number,
abortSignal: AbortSignal,
prompt_id: string,
): Promise<{
queryToSend: PartListUnion | null;
shouldProceed: boolean;
}> => {
if (turnCancelledRef.current) {
return { queryToSend: null, shouldProceed: false };
}
if (typeof query === 'string' && query.trim().length === 0) {
return { queryToSend: null, shouldProceed: false };
}
let localQueryToSendToGemini: PartListUnion | null = null;
if (typeof query === 'string') {
const trimmedQuery = query.trim();
logUserPrompt(
config,
new UserPromptEvent(
trimmedQuery.length,
prompt_id,
config.getContentGeneratorConfig()?.authType,
trimmedQuery,
),
);
onDebugMessage(`User query: '${trimmedQuery}'`);
await logger?.logMessage(MessageSenderType.USER, trimmedQuery);
// Handle UI-only commands first
const slashCommandResult = await handleSlashCommand(trimmedQuery);
if (slashCommandResult) {
if (slashCommandResult.type === 'schedule_tool') {
const { toolName, toolArgs } = slashCommandResult;
const toolCallRequest: ToolCallRequestInfo = {
callId: `${toolName}-${Date.now()}-${Math.random().toString(16).slice(2)}`,
name: toolName,
args: toolArgs,
isClientInitiated: true,
prompt_id,
};
scheduleToolCalls([toolCallRequest], abortSignal);
}
return { queryToSend: null, shouldProceed: false };
}
if (shellModeActive && handleShellCommand(trimmedQuery, abortSignal)) {
return { queryToSend: null, shouldProceed: false };
}
// Handle @-commands (which might involve tool calls)
if (isAtCommand(trimmedQuery)) {
const atCommandResult = await handleAtCommand({
query: trimmedQuery,
config,
addItem,
onDebugMessage,
messageId: userMessageTimestamp,
signal: abortSignal,
});
if (!atCommandResult.shouldProceed) {
return { queryToSend: null, shouldProceed: false };
}
localQueryToSendToGemini = atCommandResult.processedQuery;
} else {
// Normal query for Gemini
addItem(
{ type: MessageType.USER, text: trimmedQuery },
userMessageTimestamp,
);
localQueryToSendToGemini = trimmedQuery;
}
} else {
// It's a function response (PartListUnion that isn't a string)
localQueryToSendToGemini = query;
}
if (localQueryToSendToGemini === null) {
onDebugMessage(
'Query processing resulted in null, not sending to Gemini.',
);
return { queryToSend: null, shouldProceed: false };
}
return { queryToSend: localQueryToSendToGemini, shouldProceed: true };
},
[
config,
addItem,
onDebugMessage,
handleShellCommand,
handleSlashCommand,
logger,
shellModeActive,
scheduleToolCalls,
],
);
// --- Stream Event Handlers ---
const handleContentEvent = useCallback(
(
eventValue: ContentEvent['value'],
currentGeminiMessageBuffer: string,
userMessageTimestamp: number,
): string => {
if (turnCancelledRef.current) {
// Prevents additional output after a user initiated cancel.
return '';
}
let newGeminiMessageBuffer = currentGeminiMessageBuffer + eventValue;
if (
pendingHistoryItemRef.current?.type !== 'gemini' &&
pendingHistoryItemRef.current?.type !== 'gemini_content'
) {
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
}
setPendingHistoryItem({ type: 'gemini', text: '' });
newGeminiMessageBuffer = eventValue;
}
// Split large messages for better rendering performance. Ideally,
// we should maximize the amount of output sent to <Static />.
const splitPoint = findLastSafeSplitPoint(newGeminiMessageBuffer);
if (splitPoint === newGeminiMessageBuffer.length) {
// Update the existing message with accumulated content
setPendingHistoryItem((item) => ({
type: item?.type as 'gemini' | 'gemini_content',
text: newGeminiMessageBuffer,
}));
} else {
// This indicates that we need to split up this Gemini Message.
// Splitting a message is primarily a performance consideration. There is a
// <Static> component at the root of App.tsx which takes care of rendering
// content statically or dynamically. Everything but the last message is
// treated as static in order to prevent re-rendering an entire message history
// multiple times per-second (as streaming occurs). Prior to this change you'd
// see heavy flickering of the terminal. This ensures that larger messages get
// broken up so that there are more "statically" rendered.
const beforeText = newGeminiMessageBuffer.substring(0, splitPoint);
const afterText = newGeminiMessageBuffer.substring(splitPoint);
addItem(
{
type: pendingHistoryItemRef.current?.type as
| 'gemini'
| 'gemini_content',
text: beforeText,
},
userMessageTimestamp,
);
setPendingHistoryItem({ type: 'gemini_content', text: afterText });
newGeminiMessageBuffer = afterText;
}
return newGeminiMessageBuffer;
},
[addItem, pendingHistoryItemRef, setPendingHistoryItem],
);
const handleUserCancelledEvent = useCallback(
(userMessageTimestamp: number) => {
if (turnCancelledRef.current) {
return;
}
if (pendingHistoryItemRef.current) {
if (pendingHistoryItemRef.current.type === 'tool_group') {
const updatedTools = pendingHistoryItemRef.current.tools.map(
(tool) =>
tool.status === ToolCallStatus.Pending ||
tool.status === ToolCallStatus.Confirming ||
tool.status === ToolCallStatus.Executing
? { ...tool, status: ToolCallStatus.Canceled }
: tool,
);
const pendingItem: HistoryItemToolGroup = {
...pendingHistoryItemRef.current,
tools: updatedTools,
};
addItem(pendingItem, userMessageTimestamp);
} else {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
}
setPendingHistoryItem(null);
}
addItem(
{ type: MessageType.INFO, text: 'User cancelled the request.' },
userMessageTimestamp,
);
setIsResponding(false);
},
[addItem, pendingHistoryItemRef, setPendingHistoryItem],
);
const handleErrorEvent = useCallback(
(eventValue: ErrorEvent['value'], userMessageTimestamp: number) => {
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
setPendingHistoryItem(null);
}
addItem(
{
type: MessageType.ERROR,
text: parseAndFormatApiError(
eventValue.error,
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
userMessageTimestamp,
);
},
[addItem, pendingHistoryItemRef, setPendingHistoryItem, config],
);
const handleChatCompressionEvent = useCallback(
(eventValue: ServerGeminiChatCompressedEvent['value']) =>
addItem(
{
type: 'info',
text:
`IMPORTANT: This conversation approached the input token limit for ${config.getModel()}. ` +
`A compressed context will be sent for future messages (compressed from: ` +
`${eventValue?.originalTokenCount ?? 'unknown'} to ` +
`${eventValue?.newTokenCount ?? 'unknown'} tokens).`,
},
Date.now(),
),
[addItem, config],
);
const handleMaxSessionTurnsEvent = useCallback(
() =>
addItem(
{
type: 'info',
text:
`The session has reached the maximum number of turns: ${config.getMaxSessionTurns()}. ` +
`Please update this limit in your setting.json file.`,
},
Date.now(),
),
[addItem, config],
);
const handleLoopDetectedEvent = useCallback(() => {
addItem(
{
type: 'info',
text: `A potential loop was detected. This can happen due to repetitive tool calls or other model behavior. The request has been halted.`,
},
Date.now(),
);
}, [addItem]);
const processGeminiStreamEvents = useCallback(
async (
stream: AsyncIterable<GeminiEvent>,
userMessageTimestamp: number,
signal: AbortSignal,
): Promise<StreamProcessingStatus> => {
let geminiMessageBuffer = '';
const toolCallRequests: ToolCallRequestInfo[] = [];
for await (const event of stream) {
switch (event.type) {
case ServerGeminiEventType.Thought:
setThought(event.value);
break;
case ServerGeminiEventType.Content:
geminiMessageBuffer = handleContentEvent(
event.value,
geminiMessageBuffer,
userMessageTimestamp,
);
break;
case ServerGeminiEventType.ToolCallRequest:
toolCallRequests.push(event.value);
break;
case ServerGeminiEventType.UserCancelled:
handleUserCancelledEvent(userMessageTimestamp);
break;
case ServerGeminiEventType.Error:
handleErrorEvent(event.value, userMessageTimestamp);
break;
case ServerGeminiEventType.ChatCompressed:
handleChatCompressionEvent(event.value);
break;
case ServerGeminiEventType.ToolCallConfirmation:
case ServerGeminiEventType.ToolCallResponse:
// do nothing
break;
case ServerGeminiEventType.MaxSessionTurns:
handleMaxSessionTurnsEvent();
break;
case ServerGeminiEventType.LoopDetected:
// handle later because we want to move pending history to history
// before we add loop detected message to history
loopDetectedRef.current = true;
break;
default: {
// enforces exhaustive switch-case
const unreachable: never = event;
return unreachable;
}
}
}
if (toolCallRequests.length > 0) {
scheduleToolCalls(toolCallRequests, signal);
}
return StreamProcessingStatus.Completed;
},
[
handleContentEvent,
handleUserCancelledEvent,
handleErrorEvent,
scheduleToolCalls,
handleChatCompressionEvent,
handleMaxSessionTurnsEvent,
],
);
const submitQuery = useCallback(
async (
query: PartListUnion,
options?: { isContinuation: boolean },
prompt_id?: string,
) => {
if (
(streamingState === StreamingState.Responding ||
streamingState === StreamingState.WaitingForConfirmation) &&
!options?.isContinuation
)
return;
const userMessageTimestamp = Date.now();
setShowHelp(false);
// Reset quota error flag when starting a new query (not a continuation)
if (!options?.isContinuation) {
setModelSwitchedFromQuotaError(false);
config.setQuotaErrorOccurred(false);
}
abortControllerRef.current = new AbortController();
const abortSignal = abortControllerRef.current.signal;
turnCancelledRef.current = false;
if (!prompt_id) {
prompt_id = config.getSessionId() + '########' + getPromptCount();
}
const { queryToSend, shouldProceed } = await prepareQueryForGemini(
query,
userMessageTimestamp,
abortSignal,
prompt_id!,
);
if (!shouldProceed || queryToSend === null) {
return;
}
if (!options?.isContinuation) {
startNewPrompt();
}
setIsResponding(true);
setInitError(null);
try {
const stream = geminiClient.sendMessageStream(
queryToSend,
abortSignal,
prompt_id!,
);
const processingStatus = await processGeminiStreamEvents(
stream,
userMessageTimestamp,
abortSignal,
);
if (processingStatus === StreamProcessingStatus.UserCancelled) {
return;
}
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
setPendingHistoryItem(null);
}
if (loopDetectedRef.current) {
loopDetectedRef.current = false;
handleLoopDetectedEvent();
}
} catch (error: unknown) {
if (error instanceof UnauthorizedError) {
onAuthError();
} else if (!isNodeError(error) || error.name !== 'AbortError') {
addItem(
{
type: MessageType.ERROR,
text: parseAndFormatApiError(
getErrorMessage(error) || 'Unknown error',
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
userMessageTimestamp,
);
}
} finally {
setIsResponding(false);
}
},
[
streamingState,
setShowHelp,
setModelSwitchedFromQuotaError,
prepareQueryForGemini,
processGeminiStreamEvents,
pendingHistoryItemRef,
addItem,
setPendingHistoryItem,
setInitError,
geminiClient,
onAuthError,
config,
startNewPrompt,
getPromptCount,
handleLoopDetectedEvent,
],
);
const handleCompletedTools = useCallback(
async (completedToolCallsFromScheduler: TrackedToolCall[]) => {
if (isResponding) {
return;
}
const completedAndReadyToSubmitTools =
completedToolCallsFromScheduler.filter(
(
tc: TrackedToolCall,
): tc is TrackedCompletedToolCall | TrackedCancelledToolCall => {
const isTerminalState =
tc.status === 'success' ||
tc.status === 'error' ||
tc.status === 'cancelled';
if (isTerminalState) {
const completedOrCancelledCall = tc as
| TrackedCompletedToolCall
| TrackedCancelledToolCall;
return (
completedOrCancelledCall.response?.responseParts !== undefined
);
}
return false;
},
);
// Finalize any client-initiated tools as soon as they are done.
const clientTools = completedAndReadyToSubmitTools.filter(
(t) => t.request.isClientInitiated,
);
if (clientTools.length > 0) {
markToolsAsSubmitted(clientTools.map((t) => t.request.callId));
}
// Identify new, successful save_memory calls that we haven't processed yet.
const newSuccessfulMemorySaves = completedAndReadyToSubmitTools.filter(
(t) =>
t.request.name === 'save_memory' &&
t.status === 'success' &&
!processedMemoryToolsRef.current.has(t.request.callId),
);
if (newSuccessfulMemorySaves.length > 0) {
// Perform the refresh only if there are new ones.
void performMemoryRefresh();
// Mark them as processed so we don't do this again on the next render.
newSuccessfulMemorySaves.forEach((t) =>
processedMemoryToolsRef.current.add(t.request.callId),
);
}
const geminiTools = completedAndReadyToSubmitTools.filter(
(t) => !t.request.isClientInitiated,
);
if (geminiTools.length === 0) {
return;
}
// If all the tools were cancelled, don't submit a response to Gemini.
const allToolsCancelled = geminiTools.every(
(tc) => tc.status === 'cancelled',
);
if (allToolsCancelled) {
if (geminiClient) {
// We need to manually add the function responses to the history
// so the model knows the tools were cancelled.
const responsesToAdd = geminiTools.flatMap(
(toolCall) => toolCall.response.responseParts,
);
const combinedParts: Part[] = [];
for (const response of responsesToAdd) {
if (Array.isArray(response)) {
combinedParts.push(...response);
} else if (typeof response === 'string') {
combinedParts.push({ text: response });
} else {
combinedParts.push(response);
}
}
geminiClient.addHistory({
role: 'user',
parts: combinedParts,
});
}
const callIdsToMarkAsSubmitted = geminiTools.map(
(toolCall) => toolCall.request.callId,
);
markToolsAsSubmitted(callIdsToMarkAsSubmitted);
return;
}
const responsesToSend: PartListUnion[] = geminiTools.map(
(toolCall) => toolCall.response.responseParts,
);
const callIdsToMarkAsSubmitted = geminiTools.map(
(toolCall) => toolCall.request.callId,
);
const prompt_ids = geminiTools.map(
(toolCall) => toolCall.request.prompt_id,
);
markToolsAsSubmitted(callIdsToMarkAsSubmitted);
// Don't continue if model was switched due to quota error
if (modelSwitchedFromQuotaError) {
return;
}
submitQuery(
mergePartListUnions(responsesToSend),
{
isContinuation: true,
},
prompt_ids[0],
);
},
[
isResponding,
submitQuery,
markToolsAsSubmitted,
geminiClient,
performMemoryRefresh,
modelSwitchedFromQuotaError,
],
);
const pendingHistoryItems = [
pendingHistoryItemRef.current,
pendingToolCallGroupDisplay,
].filter((i) => i !== undefined && i !== null);
useEffect(() => {
const saveRestorableToolCalls = async () => {
if (!config.getCheckpointingEnabled()) {
return;
}
const restorableToolCalls = toolCalls.filter(
(toolCall) =>
(toolCall.request.name === 'replace' ||
toolCall.request.name === 'write_file') &&
toolCall.status === 'awaiting_approval',
);
if (restorableToolCalls.length > 0) {
const checkpointDir = config.getProjectTempDir()
? path.join(config.getProjectTempDir(), 'checkpoints')
: undefined;
if (!checkpointDir) {
return;
}
try {
await fs.mkdir(checkpointDir, { recursive: true });
} catch (error) {
if (!isNodeError(error) || error.code !== 'EEXIST') {
onDebugMessage(
`Failed to create checkpoint directory: ${getErrorMessage(error)}`,
);
return;
}
}
for (const toolCall of restorableToolCalls) {
const filePath = toolCall.request.args['file_path'] as string;
if (!filePath) {
onDebugMessage(
`Skipping restorable tool call due to missing file_path: ${toolCall.request.name}`,
);
continue;
}
try {
let commitHash = await gitService?.createFileSnapshot(
`Snapshot for ${toolCall.request.name}`,
);
if (!commitHash) {
commitHash = await gitService?.getCurrentCommitHash();
}
if (!commitHash) {
onDebugMessage(
`Failed to create snapshot for ${filePath}. Skipping restorable tool call.`,
);
continue;
}
const timestamp = new Date()
.toISOString()
.replace(/:/g, '-')
.replace(/\./g, '_');
const toolName = toolCall.request.name;
const fileName = path.basename(filePath);
const toolCallWithSnapshotFileName = `${timestamp}-${fileName}-${toolName}.json`;
const clientHistory = await geminiClient?.getHistory();
const toolCallWithSnapshotFilePath = path.join(
checkpointDir,
toolCallWithSnapshotFileName,
);
await fs.writeFile(
toolCallWithSnapshotFilePath,
JSON.stringify(
{
history,
clientHistory,
toolCall: {
name: toolCall.request.name,
args: toolCall.request.args,
},
commitHash,
filePath,
},
null,
2,
),
);
} catch (error) {
onDebugMessage(
`Failed to write restorable tool call file: ${getErrorMessage(
error,
)}`,
);
}
}
}
};
saveRestorableToolCalls();
}, [toolCalls, config, onDebugMessage, gitService, history, geminiClient]);
return {
streamingState,
submitQuery,
initError,
pendingHistoryItems,
thought,
};
};

View File

@@ -0,0 +1,237 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
afterEach,
beforeEach,
describe,
expect,
it,
vi,
MockedFunction,
} from 'vitest';
import { act } from 'react';
import { renderHook } from '@testing-library/react';
import { useGitBranchName } from './useGitBranchName.js';
import { fs, vol } from 'memfs'; // For mocking fs
import { EventEmitter } from 'node:events';
import { exec as mockExec, type ChildProcess } from 'node:child_process';
import type { FSWatcher } from 'memfs/lib/volume.js';
// Mock child_process
vi.mock('child_process');
// Mock fs and fs/promises
vi.mock('node:fs', async () => {
const memfs = await vi.importActual<typeof import('memfs')>('memfs');
return memfs.fs;
});
vi.mock('node:fs/promises', async () => {
const memfs = await vi.importActual<typeof import('memfs')>('memfs');
return memfs.fs.promises;
});
const CWD = '/test/project';
const GIT_HEAD_PATH = `${CWD}/.git/HEAD`;
describe('useGitBranchName', () => {
beforeEach(() => {
vol.reset(); // Reset in-memory filesystem
vol.fromJSON({
[GIT_HEAD_PATH]: 'ref: refs/heads/main',
});
vi.useFakeTimers(); // Use fake timers for async operations
});
afterEach(() => {
vi.restoreAllMocks();
vi.clearAllTimers();
});
it('should return branch name', async () => {
(mockExec as MockedFunction<typeof mockExec>).mockImplementation(
(_command, _options, callback) => {
callback?.(null, 'main\n', '');
return new EventEmitter() as ChildProcess;
},
);
const { result, rerender } = renderHook(() => useGitBranchName(CWD));
await act(async () => {
vi.runAllTimers(); // Advance timers to trigger useEffect and exec callback
rerender(); // Rerender to get the updated state
});
expect(result.current).toBe('main');
});
it('should return undefined if git command fails', async () => {
(mockExec as MockedFunction<typeof mockExec>).mockImplementation(
(_command, _options, callback) => {
callback?.(new Error('Git error'), '', 'error output');
return new EventEmitter() as ChildProcess;
},
);
const { result, rerender } = renderHook(() => useGitBranchName(CWD));
expect(result.current).toBeUndefined();
await act(async () => {
vi.runAllTimers();
rerender();
});
expect(result.current).toBeUndefined();
});
it('should return short commit hash if branch is HEAD (detached state)', async () => {
(mockExec as MockedFunction<typeof mockExec>).mockImplementation(
(command, _options, callback) => {
if (command === 'git rev-parse --abbrev-ref HEAD') {
callback?.(null, 'HEAD\n', '');
} else if (command === 'git rev-parse --short HEAD') {
callback?.(null, 'a1b2c3d\n', '');
}
return new EventEmitter() as ChildProcess;
},
);
const { result, rerender } = renderHook(() => useGitBranchName(CWD));
await act(async () => {
vi.runAllTimers();
rerender();
});
expect(result.current).toBe('a1b2c3d');
});
it('should return undefined if branch is HEAD and getting commit hash fails', async () => {
(mockExec as MockedFunction<typeof mockExec>).mockImplementation(
(command, _options, callback) => {
if (command === 'git rev-parse --abbrev-ref HEAD') {
callback?.(null, 'HEAD\n', '');
} else if (command === 'git rev-parse --short HEAD') {
callback?.(new Error('Git error'), '', 'error output');
}
return new EventEmitter() as ChildProcess;
},
);
const { result, rerender } = renderHook(() => useGitBranchName(CWD));
await act(async () => {
vi.runAllTimers();
rerender();
});
expect(result.current).toBeUndefined();
});
it('should update branch name when .git/HEAD changes', async ({ skip }) => {
skip(); // TODO: fix
(mockExec as MockedFunction<typeof mockExec>).mockImplementationOnce(
(_command, _options, callback) => {
callback?.(null, 'main\n', '');
return new EventEmitter() as ChildProcess;
},
);
const { result, rerender } = renderHook(() => useGitBranchName(CWD));
await act(async () => {
vi.runAllTimers();
rerender();
});
expect(result.current).toBe('main');
// Simulate a branch change
(mockExec as MockedFunction<typeof mockExec>).mockImplementationOnce(
(_command, _options, callback) => {
callback?.(null, 'develop\n', '');
return new EventEmitter() as ChildProcess;
},
);
// Simulate file change event
// Ensure the watcher is set up before triggering the change
await act(async () => {
fs.writeFileSync(GIT_HEAD_PATH, 'ref: refs/heads/develop'); // Trigger watcher
vi.runAllTimers(); // Process timers for watcher and exec
rerender();
});
expect(result.current).toBe('develop');
});
it('should handle watcher setup error silently', async () => {
// Remove .git/HEAD to cause an error in fs.watch setup
vol.unlinkSync(GIT_HEAD_PATH);
(mockExec as MockedFunction<typeof mockExec>).mockImplementation(
(_command, _options, callback) => {
callback?.(null, 'main\n', '');
return new EventEmitter() as ChildProcess;
},
);
const { result, rerender } = renderHook(() => useGitBranchName(CWD));
await act(async () => {
vi.runAllTimers();
rerender();
});
expect(result.current).toBe('main'); // Branch name should still be fetched initially
// Try to trigger a change that would normally be caught by the watcher
(mockExec as MockedFunction<typeof mockExec>).mockImplementationOnce(
(_command, _options, callback) => {
callback?.(null, 'develop\n', '');
return new EventEmitter() as ChildProcess;
},
);
// This write would trigger the watcher if it was set up
// but since it failed, the branch name should not update
// We need to create the file again for writeFileSync to not throw
vol.fromJSON({
[GIT_HEAD_PATH]: 'ref: refs/heads/develop',
});
await act(async () => {
fs.writeFileSync(GIT_HEAD_PATH, 'ref: refs/heads/develop');
vi.runAllTimers();
rerender();
});
// Branch name should not change because watcher setup failed
expect(result.current).toBe('main');
});
it('should cleanup watcher on unmount', async ({ skip }) => {
skip(); // TODO: fix
const closeMock = vi.fn();
const watchMock = vi.spyOn(fs, 'watch').mockReturnValue({
close: closeMock,
} as unknown as FSWatcher);
(mockExec as MockedFunction<typeof mockExec>).mockImplementation(
(_command, _options, callback) => {
callback?.(null, 'main\n', '');
return new EventEmitter() as ChildProcess;
},
);
const { unmount, rerender } = renderHook(() => useGitBranchName(CWD));
await act(async () => {
vi.runAllTimers();
rerender();
});
unmount();
expect(watchMock).toHaveBeenCalledWith(GIT_HEAD_PATH, expect.any(Function));
expect(closeMock).toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,79 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useEffect, useCallback } from 'react';
import { exec } from 'node:child_process';
import fs from 'node:fs';
import fsPromises from 'node:fs/promises';
import path from 'path';
export function useGitBranchName(cwd: string): string | undefined {
const [branchName, setBranchName] = useState<string | undefined>(undefined);
const fetchBranchName = useCallback(
() =>
exec(
'git rev-parse --abbrev-ref HEAD',
{ cwd },
(error, stdout, _stderr) => {
if (error) {
setBranchName(undefined);
return;
}
const branch = stdout.toString().trim();
if (branch && branch !== 'HEAD') {
setBranchName(branch);
} else {
exec(
'git rev-parse --short HEAD',
{ cwd },
(error, stdout, _stderr) => {
if (error) {
setBranchName(undefined);
return;
}
setBranchName(stdout.toString().trim());
},
);
}
},
),
[cwd, setBranchName],
);
useEffect(() => {
fetchBranchName(); // Initial fetch
const gitLogsHeadPath = path.join(cwd, '.git', 'logs', 'HEAD');
let watcher: fs.FSWatcher | undefined;
const setupWatcher = async () => {
try {
// Check if .git/logs/HEAD exists, as it might not in a new repo or orphaned head
await fsPromises.access(gitLogsHeadPath, fs.constants.F_OK);
watcher = fs.watch(gitLogsHeadPath, (eventType: string) => {
// Changes to .git/logs/HEAD (appends) indicate HEAD has likely changed
if (eventType === 'change' || eventType === 'rename') {
// Handle rename just in case
fetchBranchName();
}
});
} catch (_watchError) {
// Silently ignore watcher errors (e.g. permissions or file not existing),
// similar to how exec errors are handled.
// The branch name will simply not update automatically.
}
};
setupWatcher();
return () => {
watcher?.close();
};
}, [cwd, fetchBranchName]);
return branchName;
}

View File

@@ -0,0 +1,202 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useHistory } from './useHistoryManager.js';
import { HistoryItem } from '../types.js';
describe('useHistoryManager', () => {
it('should initialize with an empty history', () => {
const { result } = renderHook(() => useHistory());
expect(result.current.history).toEqual([]);
});
it('should add an item to history with a unique ID', () => {
const { result } = renderHook(() => useHistory());
const timestamp = Date.now();
const itemData: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'Hello',
};
act(() => {
result.current.addItem(itemData, timestamp);
});
expect(result.current.history).toHaveLength(1);
expect(result.current.history[0]).toEqual(
expect.objectContaining({
...itemData,
id: expect.any(Number),
}),
);
// Basic check that ID incorporates timestamp
expect(result.current.history[0].id).toBeGreaterThanOrEqual(timestamp);
});
it('should generate unique IDs for items added with the same base timestamp', () => {
const { result } = renderHook(() => useHistory());
const timestamp = Date.now();
const itemData1: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'First',
};
const itemData2: Omit<HistoryItem, 'id'> = {
type: 'gemini', // Replaced HistoryItemType.Gemini
text: 'Second',
};
let id1!: number;
let id2!: number;
act(() => {
id1 = result.current.addItem(itemData1, timestamp);
id2 = result.current.addItem(itemData2, timestamp);
});
expect(result.current.history).toHaveLength(2);
expect(id1).not.toEqual(id2);
expect(result.current.history[0].id).toEqual(id1);
expect(result.current.history[1].id).toEqual(id2);
// IDs should be sequential based on the counter
expect(id2).toBeGreaterThan(id1);
});
it('should update an existing history item', () => {
const { result } = renderHook(() => useHistory());
const timestamp = Date.now();
const initialItem: Omit<HistoryItem, 'id'> = {
type: 'gemini', // Replaced HistoryItemType.Gemini
text: 'Initial content',
};
let itemId!: number;
act(() => {
itemId = result.current.addItem(initialItem, timestamp);
});
const updatedText = 'Updated content';
act(() => {
result.current.updateItem(itemId, { text: updatedText });
});
expect(result.current.history).toHaveLength(1);
expect(result.current.history[0]).toEqual({
...initialItem,
id: itemId,
text: updatedText,
});
});
it('should not change history if updateHistoryItem is called with a non-existent ID', () => {
const { result } = renderHook(() => useHistory());
const timestamp = Date.now();
const itemData: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'Hello',
};
act(() => {
result.current.addItem(itemData, timestamp);
});
const originalHistory = [...result.current.history]; // Clone before update attempt
act(() => {
result.current.updateItem(99999, { text: 'Should not apply' }); // Non-existent ID
});
expect(result.current.history).toEqual(originalHistory);
});
it('should clear the history', () => {
const { result } = renderHook(() => useHistory());
const timestamp = Date.now();
const itemData1: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'First',
};
const itemData2: Omit<HistoryItem, 'id'> = {
type: 'gemini', // Replaced HistoryItemType.Gemini
text: 'Second',
};
act(() => {
result.current.addItem(itemData1, timestamp);
result.current.addItem(itemData2, timestamp);
});
expect(result.current.history).toHaveLength(2);
act(() => {
result.current.clearItems();
});
expect(result.current.history).toEqual([]);
});
it('should not add consecutive duplicate user messages', () => {
const { result } = renderHook(() => useHistory());
const timestamp = Date.now();
const itemData1: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'Duplicate message',
};
const itemData2: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'Duplicate message',
};
const itemData3: Omit<HistoryItem, 'id'> = {
type: 'gemini', // Replaced HistoryItemType.Gemini
text: 'Gemini response',
};
const itemData4: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'Another user message',
};
act(() => {
result.current.addItem(itemData1, timestamp);
result.current.addItem(itemData2, timestamp + 1); // Same text, different timestamp
result.current.addItem(itemData3, timestamp + 2);
result.current.addItem(itemData4, timestamp + 3);
});
expect(result.current.history).toHaveLength(3);
expect(result.current.history[0].text).toBe('Duplicate message');
expect(result.current.history[1].text).toBe('Gemini response');
expect(result.current.history[2].text).toBe('Another user message');
});
it('should add duplicate user messages if they are not consecutive', () => {
const { result } = renderHook(() => useHistory());
const timestamp = Date.now();
const itemData1: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'Message 1',
};
const itemData2: Omit<HistoryItem, 'id'> = {
type: 'gemini', // Replaced HistoryItemType.Gemini
text: 'Gemini response',
};
const itemData3: Omit<HistoryItem, 'id'> = {
type: 'user', // Replaced HistoryItemType.User
text: 'Message 1', // Duplicate text, but not consecutive
};
act(() => {
result.current.addItem(itemData1, timestamp);
result.current.addItem(itemData2, timestamp + 1);
result.current.addItem(itemData3, timestamp + 2);
});
expect(result.current.history).toHaveLength(3);
expect(result.current.history[0].text).toBe('Message 1');
expect(result.current.history[1].text).toBe('Gemini response');
expect(result.current.history[2].text).toBe('Message 1');
});
});

View File

@@ -0,0 +1,111 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useRef, useCallback } from 'react';
import { HistoryItem } from '../types.js';
// Type for the updater function passed to updateHistoryItem
type HistoryItemUpdater = (
prevItem: HistoryItem,
) => Partial<Omit<HistoryItem, 'id'>>;
export interface UseHistoryManagerReturn {
history: HistoryItem[];
addItem: (itemData: Omit<HistoryItem, 'id'>, baseTimestamp: number) => number; // Returns the generated ID
updateItem: (
id: number,
updates: Partial<Omit<HistoryItem, 'id'>> | HistoryItemUpdater,
) => void;
clearItems: () => void;
loadHistory: (newHistory: HistoryItem[]) => void;
}
/**
* Custom hook to manage the chat history state.
*
* Encapsulates the history array, message ID generation, adding items,
* updating items, and clearing the history.
*/
export function useHistory(): UseHistoryManagerReturn {
const [history, setHistory] = useState<HistoryItem[]>([]);
const messageIdCounterRef = useRef(0);
// Generates a unique message ID based on a timestamp and a counter.
const getNextMessageId = useCallback((baseTimestamp: number): number => {
messageIdCounterRef.current += 1;
return baseTimestamp + messageIdCounterRef.current;
}, []);
const loadHistory = useCallback((newHistory: HistoryItem[]) => {
setHistory(newHistory);
}, []);
// Adds a new item to the history state with a unique ID.
const addItem = useCallback(
(itemData: Omit<HistoryItem, 'id'>, baseTimestamp: number): number => {
const id = getNextMessageId(baseTimestamp);
const newItem: HistoryItem = { ...itemData, id } as HistoryItem;
setHistory((prevHistory) => {
if (prevHistory.length > 0) {
const lastItem = prevHistory[prevHistory.length - 1];
// Prevent adding duplicate consecutive user messages
if (
lastItem.type === 'user' &&
newItem.type === 'user' &&
lastItem.text === newItem.text
) {
return prevHistory; // Don't add the duplicate
}
}
return [...prevHistory, newItem];
});
return id; // Return the generated ID (even if not added, to keep signature)
},
[getNextMessageId],
);
/**
* Updates an existing history item identified by its ID.
* @deprecated Prefer not to update history item directly as we are currently
* rendering all history items in <Static /> for performance reasons. Only use
* if ABSOLUTELY NECESSARY
*/
//
const updateItem = useCallback(
(
id: number,
updates: Partial<Omit<HistoryItem, 'id'>> | HistoryItemUpdater,
) => {
setHistory((prevHistory) =>
prevHistory.map((item) => {
if (item.id === id) {
// Apply updates based on whether it's an object or a function
const newUpdates =
typeof updates === 'function' ? updates(item) : updates;
return { ...item, ...newUpdates } as HistoryItem;
}
return item;
}),
);
},
[],
);
// Clears the entire history state and resets the ID counter.
const clearItems = useCallback(() => {
setHistory([]);
messageIdCounterRef.current = 0;
}, []);
return {
history,
addItem,
updateItem,
clearItems,
loadHistory,
};
}

View File

@@ -0,0 +1,261 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { act, renderHook } from '@testing-library/react';
import { useInputHistory } from './useInputHistory.js';
describe('useInputHistory', () => {
const mockOnSubmit = vi.fn();
const mockOnChange = vi.fn();
beforeEach(() => {
vi.clearAllMocks();
});
const userMessages = ['message 1', 'message 2', 'message 3'];
it('should initialize with historyIndex -1 and empty originalQueryBeforeNav', () => {
const { result } = renderHook(() =>
useInputHistory({
userMessages: [],
onSubmit: mockOnSubmit,
isActive: true,
currentQuery: '',
onChange: mockOnChange,
}),
);
// Internal state is not directly testable, but we can infer from behavior.
// Attempting to navigate down should do nothing if historyIndex is -1.
act(() => {
result.current.navigateDown();
});
expect(mockOnChange).not.toHaveBeenCalled();
});
describe('handleSubmit', () => {
it('should call onSubmit with trimmed value and reset history', () => {
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: true,
currentQuery: ' test query ',
onChange: mockOnChange,
}),
);
act(() => {
result.current.handleSubmit(' submit value ');
});
expect(mockOnSubmit).toHaveBeenCalledWith('submit value');
// Check if history is reset (e.g., by trying to navigate down)
act(() => {
result.current.navigateDown();
});
expect(mockOnChange).not.toHaveBeenCalled();
});
it('should not call onSubmit if value is empty after trimming', () => {
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: true,
currentQuery: '',
onChange: mockOnChange,
}),
);
act(() => {
result.current.handleSubmit(' ');
});
expect(mockOnSubmit).not.toHaveBeenCalled();
});
});
describe('navigateUp', () => {
it('should not navigate if isActive is false', () => {
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: false,
currentQuery: 'current',
onChange: mockOnChange,
}),
);
act(() => {
const navigated = result.current.navigateUp();
expect(navigated).toBe(false);
});
expect(mockOnChange).not.toHaveBeenCalled();
});
it('should not navigate if userMessages is empty', () => {
const { result } = renderHook(() =>
useInputHistory({
userMessages: [],
onSubmit: mockOnSubmit,
isActive: true,
currentQuery: 'current',
onChange: mockOnChange,
}),
);
act(() => {
const navigated = result.current.navigateUp();
expect(navigated).toBe(false);
});
expect(mockOnChange).not.toHaveBeenCalled();
});
it('should call onChange with the last message when navigating up from initial state', () => {
const currentQuery = 'current query';
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: true,
currentQuery,
onChange: mockOnChange,
}),
);
act(() => {
result.current.navigateUp();
});
expect(mockOnChange).toHaveBeenCalledWith(userMessages[2]); // Last message
});
it('should store currentQuery as originalQueryBeforeNav on first navigateUp', () => {
const currentQuery = 'original user input';
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: true,
currentQuery,
onChange: mockOnChange,
}),
);
act(() => {
result.current.navigateUp(); // historyIndex becomes 0
});
expect(mockOnChange).toHaveBeenCalledWith(userMessages[2]);
// Navigate down to restore original query
act(() => {
result.current.navigateDown(); // historyIndex becomes -1
});
expect(mockOnChange).toHaveBeenCalledWith(currentQuery);
});
it('should navigate through history messages on subsequent navigateUp calls', () => {
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: true,
currentQuery: '',
onChange: mockOnChange,
}),
);
act(() => {
result.current.navigateUp(); // Navigates to 'message 3'
});
expect(mockOnChange).toHaveBeenCalledWith(userMessages[2]);
act(() => {
result.current.navigateUp(); // Navigates to 'message 2'
});
expect(mockOnChange).toHaveBeenCalledWith(userMessages[1]);
act(() => {
result.current.navigateUp(); // Navigates to 'message 1'
});
expect(mockOnChange).toHaveBeenCalledWith(userMessages[0]);
});
});
describe('navigateDown', () => {
it('should not navigate if isActive is false', () => {
const initialProps = {
userMessages,
onSubmit: mockOnSubmit,
isActive: true, // Start active to allow setup navigation
currentQuery: 'current',
onChange: mockOnChange,
};
const { result, rerender } = renderHook(
(props) => useInputHistory(props),
{
initialProps,
},
);
// First navigate up to have something in history
act(() => {
result.current.navigateUp();
});
mockOnChange.mockClear(); // Clear calls from setup
// Set isActive to false for the actual test
rerender({ ...initialProps, isActive: false });
act(() => {
const navigated = result.current.navigateDown();
expect(navigated).toBe(false);
});
expect(mockOnChange).not.toHaveBeenCalled();
});
it('should not navigate if historyIndex is -1 (not in history navigation)', () => {
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: true,
currentQuery: 'current',
onChange: mockOnChange,
}),
);
act(() => {
const navigated = result.current.navigateDown();
expect(navigated).toBe(false);
});
expect(mockOnChange).not.toHaveBeenCalled();
});
it('should restore originalQueryBeforeNav when navigating down to initial state', () => {
const originalQuery = 'my original input';
const { result } = renderHook(() =>
useInputHistory({
userMessages,
onSubmit: mockOnSubmit,
isActive: true,
currentQuery: originalQuery,
onChange: mockOnChange,
}),
);
act(() => {
result.current.navigateUp(); // Navigates to 'message 3', stores 'originalQuery'
});
expect(mockOnChange).toHaveBeenCalledWith(userMessages[2]);
mockOnChange.mockClear();
act(() => {
result.current.navigateDown(); // Navigates back to original query
});
expect(mockOnChange).toHaveBeenCalledWith(originalQuery);
});
});
});

View File

@@ -0,0 +1,111 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useCallback } from 'react';
interface UseInputHistoryProps {
userMessages: readonly string[];
onSubmit: (value: string) => void;
isActive: boolean;
currentQuery: string; // Renamed from query to avoid confusion
onChange: (value: string) => void;
}
interface UseInputHistoryReturn {
handleSubmit: (value: string) => void;
navigateUp: () => boolean;
navigateDown: () => boolean;
}
export function useInputHistory({
userMessages,
onSubmit,
isActive,
currentQuery,
onChange,
}: UseInputHistoryProps): UseInputHistoryReturn {
const [historyIndex, setHistoryIndex] = useState<number>(-1);
const [originalQueryBeforeNav, setOriginalQueryBeforeNav] =
useState<string>('');
const resetHistoryNav = useCallback(() => {
setHistoryIndex(-1);
setOriginalQueryBeforeNav('');
}, []);
const handleSubmit = useCallback(
(value: string) => {
const trimmedValue = value.trim();
if (trimmedValue) {
onSubmit(trimmedValue); // Parent handles clearing the query
}
resetHistoryNav();
},
[onSubmit, resetHistoryNav],
);
const navigateUp = useCallback(() => {
if (!isActive) return false;
if (userMessages.length === 0) return false;
let nextIndex = historyIndex;
if (historyIndex === -1) {
// Store the current query from the parent before navigating
setOriginalQueryBeforeNav(currentQuery);
nextIndex = 0;
} else if (historyIndex < userMessages.length - 1) {
nextIndex = historyIndex + 1;
} else {
return false; // Already at the oldest message
}
if (nextIndex !== historyIndex) {
setHistoryIndex(nextIndex);
const newValue = userMessages[userMessages.length - 1 - nextIndex];
onChange(newValue);
return true;
}
return false;
}, [
historyIndex,
setHistoryIndex,
onChange,
userMessages,
isActive,
currentQuery, // Use currentQuery from props
setOriginalQueryBeforeNav,
]);
const navigateDown = useCallback(() => {
if (!isActive) return false;
if (historyIndex === -1) return false; // Not currently navigating history
const nextIndex = historyIndex - 1;
setHistoryIndex(nextIndex);
if (nextIndex === -1) {
// Reached the end of history navigation, restore original query
onChange(originalQueryBeforeNav);
} else {
const newValue = userMessages[userMessages.length - 1 - nextIndex];
onChange(newValue);
}
return true;
}, [
historyIndex,
setHistoryIndex,
originalQueryBeforeNav,
onChange,
userMessages,
isActive,
]);
return {
handleSubmit,
navigateUp,
navigateDown,
};
}

View File

@@ -0,0 +1,261 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { renderHook, act } from '@testing-library/react';
import { useKeypress, Key } from './useKeypress.js';
import { useStdin } from 'ink';
import { EventEmitter } from 'events';
import { PassThrough } from 'stream';
// Mock the 'ink' module to control stdin
vi.mock('ink', async (importOriginal) => {
const original = await importOriginal<typeof import('ink')>();
return {
...original,
useStdin: vi.fn(),
};
});
// Mock the 'readline' module
vi.mock('readline', () => {
const mockedReadline = {
createInterface: vi.fn().mockReturnValue({ close: vi.fn() }),
// The paste workaround involves replacing stdin with a PassThrough stream.
// This mock ensures that when emitKeypressEvents is called on that
// stream, we simulate the 'keypress' events that the hook expects.
emitKeypressEvents: vi.fn((stream: EventEmitter) => {
if (stream instanceof PassThrough) {
stream.on('data', (data) => {
const str = data.toString();
for (const char of str) {
stream.emit('keypress', null, {
name: char,
sequence: char,
ctrl: false,
meta: false,
shift: false,
});
}
});
}
}),
};
return {
...mockedReadline,
default: mockedReadline,
};
});
class MockStdin extends EventEmitter {
isTTY = true;
setRawMode = vi.fn();
on = this.addListener;
removeListener = this.removeListener;
write = vi.fn();
resume = vi.fn();
private isLegacy = false;
setLegacy(isLegacy: boolean) {
this.isLegacy = isLegacy;
}
// Helper to simulate a full paste event.
paste(text: string) {
if (this.isLegacy) {
const PASTE_START = '\x1B[200~';
const PASTE_END = '\x1B[201~';
this.emit('data', Buffer.from(`${PASTE_START}${text}${PASTE_END}`));
} else {
this.emit('keypress', null, { name: 'paste-start' });
this.emit('keypress', null, { sequence: text });
this.emit('keypress', null, { name: 'paste-end' });
}
}
// Helper to simulate the start of a paste, without the end.
startPaste(text: string) {
if (this.isLegacy) {
this.emit('data', Buffer.from('\x1B[200~' + text));
} else {
this.emit('keypress', null, { name: 'paste-start' });
this.emit('keypress', null, { sequence: text });
}
}
// Helper to simulate a single keypress event.
pressKey(key: Partial<Key>) {
if (this.isLegacy) {
this.emit('data', Buffer.from(key.sequence ?? ''));
} else {
this.emit('keypress', null, key);
}
}
}
describe('useKeypress', () => {
let stdin: MockStdin;
const mockSetRawMode = vi.fn();
const onKeypress = vi.fn();
let originalNodeVersion: string;
beforeEach(() => {
vi.clearAllMocks();
stdin = new MockStdin();
(useStdin as vi.Mock).mockReturnValue({
stdin,
setRawMode: mockSetRawMode,
});
originalNodeVersion = process.versions.node;
delete process.env['PASTE_WORKAROUND'];
});
afterEach(() => {
Object.defineProperty(process.versions, 'node', {
value: originalNodeVersion,
configurable: true,
});
});
const setNodeVersion = (version: string) => {
Object.defineProperty(process.versions, 'node', {
value: version,
configurable: true,
});
};
it('should not listen if isActive is false', () => {
renderHook(() => useKeypress(onKeypress, { isActive: false }));
act(() => stdin.pressKey({ name: 'a' }));
expect(onKeypress).not.toHaveBeenCalled();
});
it('should listen for keypress when active', () => {
renderHook(() => useKeypress(onKeypress, { isActive: true }));
const key = { name: 'a', sequence: 'a' };
act(() => stdin.pressKey(key));
expect(onKeypress).toHaveBeenCalledWith(expect.objectContaining(key));
});
it('should set and release raw mode', () => {
const { unmount } = renderHook(() =>
useKeypress(onKeypress, { isActive: true }),
);
expect(mockSetRawMode).toHaveBeenCalledWith(true);
unmount();
expect(mockSetRawMode).toHaveBeenCalledWith(false);
});
it('should stop listening after being unmounted', () => {
const { unmount } = renderHook(() =>
useKeypress(onKeypress, { isActive: true }),
);
unmount();
act(() => stdin.pressKey({ name: 'a' }));
expect(onKeypress).not.toHaveBeenCalled();
});
it('should correctly identify alt+enter (meta key)', () => {
renderHook(() => useKeypress(onKeypress, { isActive: true }));
const key = { name: 'return', sequence: '\x1B\r' };
act(() => stdin.pressKey(key));
expect(onKeypress).toHaveBeenCalledWith(
expect.objectContaining({ ...key, meta: true, paste: false }),
);
});
describe.each([
{
description: 'Modern Node (>= v20)',
setup: () => setNodeVersion('20.0.0'),
isLegacy: false,
},
{
description: 'Legacy Node (< v20)',
setup: () => setNodeVersion('18.0.0'),
isLegacy: true,
},
{
description: 'Workaround Env Var',
setup: () => {
setNodeVersion('20.0.0');
process.env['PASTE_WORKAROUND'] = 'true';
},
isLegacy: true,
},
])('Paste Handling in $description', ({ setup, isLegacy }) => {
beforeEach(() => {
setup();
stdin.setLegacy(isLegacy);
});
it('should process a paste as a single event', () => {
renderHook(() => useKeypress(onKeypress, { isActive: true }));
const pasteText = 'hello world';
act(() => stdin.paste(pasteText));
expect(onKeypress).toHaveBeenCalledTimes(1);
expect(onKeypress).toHaveBeenCalledWith({
name: '',
ctrl: false,
meta: false,
shift: false,
paste: true,
sequence: pasteText,
});
});
it('should handle keypress interspersed with pastes', () => {
renderHook(() => useKeypress(onKeypress, { isActive: true }));
const keyA = { name: 'a', sequence: 'a' };
act(() => stdin.pressKey(keyA));
expect(onKeypress).toHaveBeenCalledWith(
expect.objectContaining({ ...keyA, paste: false }),
);
const pasteText = 'pasted';
act(() => stdin.paste(pasteText));
expect(onKeypress).toHaveBeenCalledWith(
expect.objectContaining({ paste: true, sequence: pasteText }),
);
const keyB = { name: 'b', sequence: 'b' };
act(() => stdin.pressKey(keyB));
expect(onKeypress).toHaveBeenCalledWith(
expect.objectContaining({ ...keyB, paste: false }),
);
expect(onKeypress).toHaveBeenCalledTimes(3);
});
it('should emit partial paste content if unmounted mid-paste', () => {
const { unmount } = renderHook(() =>
useKeypress(onKeypress, { isActive: true }),
);
const pasteText = 'incomplete paste';
act(() => stdin.startPaste(pasteText));
// No event should be fired yet.
expect(onKeypress).not.toHaveBeenCalled();
// Unmounting should trigger the flush.
unmount();
expect(onKeypress).toHaveBeenCalledTimes(1);
expect(onKeypress).toHaveBeenCalledWith({
name: '',
ctrl: false,
meta: false,
shift: false,
paste: true,
sequence: pasteText,
});
});
});
});

View File

@@ -0,0 +1,184 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useEffect, useRef } from 'react';
import { useStdin } from 'ink';
import readline from 'readline';
import { PassThrough } from 'stream';
export interface Key {
name: string;
ctrl: boolean;
meta: boolean;
shift: boolean;
paste: boolean;
sequence: string;
}
/**
* A hook that listens for keypress events from stdin, providing a
* key object that mirrors the one from Node's `readline` module,
* adding a 'paste' flag for characters input as part of a bracketed
* paste (when enabled).
*
* Pastes are currently sent as a single key event where the full paste
* is in the sequence field.
*
* @param onKeypress - The callback function to execute on each keypress.
* @param options - Options to control the hook's behavior.
* @param options.isActive - Whether the hook should be actively listening for input.
*/
export function useKeypress(
onKeypress: (key: Key) => void,
{ isActive }: { isActive: boolean },
) {
const { stdin, setRawMode } = useStdin();
const onKeypressRef = useRef(onKeypress);
useEffect(() => {
onKeypressRef.current = onKeypress;
}, [onKeypress]);
useEffect(() => {
if (!isActive || !stdin.isTTY) {
return;
}
setRawMode(true);
const keypressStream = new PassThrough();
let usePassthrough = false;
const nodeMajorVersion = parseInt(process.versions.node.split('.')[0], 10);
if (
nodeMajorVersion < 20 ||
process.env['PASTE_WORKAROUND'] === '1' ||
process.env['PASTE_WORKAROUND'] === 'true'
) {
// Prior to node 20, node's built-in readline does not support bracketed
// paste mode. We hack by detecting it with our own handler.
usePassthrough = true;
}
let isPaste = false;
let pasteBuffer = Buffer.alloc(0);
const handleKeypress = (_: unknown, key: Key) => {
if (key.name === 'paste-start') {
isPaste = true;
} else if (key.name === 'paste-end') {
isPaste = false;
onKeypressRef.current({
name: '',
ctrl: false,
meta: false,
shift: false,
paste: true,
sequence: pasteBuffer.toString(),
});
pasteBuffer = Buffer.alloc(0);
} else {
if (isPaste) {
pasteBuffer = Buffer.concat([pasteBuffer, Buffer.from(key.sequence)]);
} else {
// Handle special keys
if (key.name === 'return' && key.sequence === '\x1B\r') {
key.meta = true;
}
onKeypressRef.current({ ...key, paste: isPaste });
}
}
};
const handleRawKeypress = (data: Buffer) => {
const PASTE_MODE_PREFIX = Buffer.from('\x1B[200~');
const PASTE_MODE_SUFFIX = Buffer.from('\x1B[201~');
let pos = 0;
while (pos < data.length) {
const prefixPos = data.indexOf(PASTE_MODE_PREFIX, pos);
const suffixPos = data.indexOf(PASTE_MODE_SUFFIX, pos);
// Determine which marker comes first, if any.
const isPrefixNext =
prefixPos !== -1 && (suffixPos === -1 || prefixPos < suffixPos);
const isSuffixNext =
suffixPos !== -1 && (prefixPos === -1 || suffixPos < prefixPos);
let nextMarkerPos = -1;
let markerLength = 0;
if (isPrefixNext) {
nextMarkerPos = prefixPos;
} else if (isSuffixNext) {
nextMarkerPos = suffixPos;
}
markerLength = PASTE_MODE_SUFFIX.length;
if (nextMarkerPos === -1) {
keypressStream.write(data.slice(pos));
return;
}
const nextData = data.slice(pos, nextMarkerPos);
if (nextData.length > 0) {
keypressStream.write(nextData);
}
const createPasteKeyEvent = (
name: 'paste-start' | 'paste-end',
): Key => ({
name,
ctrl: false,
meta: false,
shift: false,
paste: false,
sequence: '',
});
if (isPrefixNext) {
handleKeypress(undefined, createPasteKeyEvent('paste-start'));
} else if (isSuffixNext) {
handleKeypress(undefined, createPasteKeyEvent('paste-end'));
}
pos = nextMarkerPos + markerLength;
}
};
let rl: readline.Interface;
if (usePassthrough) {
rl = readline.createInterface({ input: keypressStream });
readline.emitKeypressEvents(keypressStream, rl);
keypressStream.on('keypress', handleKeypress);
stdin.on('data', handleRawKeypress);
} else {
rl = readline.createInterface({ input: stdin });
readline.emitKeypressEvents(stdin, rl);
stdin.on('keypress', handleKeypress);
}
return () => {
if (usePassthrough) {
keypressStream.removeListener('keypress', handleKeypress);
stdin.removeListener('data', handleRawKeypress);
} else {
stdin.removeListener('keypress', handleKeypress);
}
rl.close();
setRawMode(false);
// If we are in the middle of a paste, send what we have.
if (isPaste) {
onKeypressRef.current({
name: '',
ctrl: false,
meta: false,
shift: false,
paste: true,
sequence: pasteBuffer.toString(),
});
pasteBuffer = Buffer.alloc(0);
}
};
}, [isActive, stdin, setRawMode]);
}

View File

@@ -0,0 +1,139 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useLoadingIndicator } from './useLoadingIndicator.js';
import { StreamingState } from '../types.js';
import {
WITTY_LOADING_PHRASES,
PHRASE_CHANGE_INTERVAL_MS,
} from './usePhraseCycler.js';
describe('useLoadingIndicator', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers(); // Restore real timers after each test
act(() => vi.runOnlyPendingTimers);
});
it('should initialize with default values when Idle', () => {
const { result } = renderHook(() =>
useLoadingIndicator(StreamingState.Idle),
);
expect(result.current.elapsedTime).toBe(0);
expect(result.current.currentLoadingPhrase).toBe(WITTY_LOADING_PHRASES[0]);
});
it('should reflect values when Responding', async () => {
const { result } = renderHook(() =>
useLoadingIndicator(StreamingState.Responding),
);
// Initial state before timers advance
expect(result.current.elapsedTime).toBe(0);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
await act(async () => {
await vi.advanceTimersByTimeAsync(PHRASE_CHANGE_INTERVAL_MS + 1);
});
// Phrase should cycle if PHRASE_CHANGE_INTERVAL_MS has passed
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
});
it('should show waiting phrase and retain elapsedTime when WaitingForConfirmation', async () => {
const { result, rerender } = renderHook(
({ streamingState }) => useLoadingIndicator(streamingState),
{ initialProps: { streamingState: StreamingState.Responding } },
);
await act(async () => {
await vi.advanceTimersByTimeAsync(60000);
});
expect(result.current.elapsedTime).toBe(60);
act(() => {
rerender({ streamingState: StreamingState.WaitingForConfirmation });
});
expect(result.current.currentLoadingPhrase).toBe(
'Waiting for user confirmation...',
);
expect(result.current.elapsedTime).toBe(60); // Elapsed time should be retained
// Timer should not advance further
await act(async () => {
await vi.advanceTimersByTimeAsync(2000);
});
expect(result.current.elapsedTime).toBe(60);
});
it('should reset elapsedTime and use a witty phrase when transitioning from WaitingForConfirmation to Responding', async () => {
const { result, rerender } = renderHook(
({ streamingState }) => useLoadingIndicator(streamingState),
{ initialProps: { streamingState: StreamingState.Responding } },
);
await act(async () => {
await vi.advanceTimersByTimeAsync(5000); // 5s
});
expect(result.current.elapsedTime).toBe(5);
act(() => {
rerender({ streamingState: StreamingState.WaitingForConfirmation });
});
expect(result.current.elapsedTime).toBe(5);
expect(result.current.currentLoadingPhrase).toBe(
'Waiting for user confirmation...',
);
act(() => {
rerender({ streamingState: StreamingState.Responding });
});
expect(result.current.elapsedTime).toBe(0); // Should reset
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
await act(async () => {
await vi.advanceTimersByTimeAsync(1000);
});
expect(result.current.elapsedTime).toBe(1);
});
it('should reset timer and phrase when streamingState changes from Responding to Idle', async () => {
const { result, rerender } = renderHook(
({ streamingState }) => useLoadingIndicator(streamingState),
{ initialProps: { streamingState: StreamingState.Responding } },
);
await act(async () => {
await vi.advanceTimersByTimeAsync(10000); // 10s
});
expect(result.current.elapsedTime).toBe(10);
act(() => {
rerender({ streamingState: StreamingState.Idle });
});
expect(result.current.elapsedTime).toBe(0);
expect(result.current.currentLoadingPhrase).toBe(WITTY_LOADING_PHRASES[0]);
// Timer should not advance
await act(async () => {
await vi.advanceTimersByTimeAsync(2000);
});
expect(result.current.elapsedTime).toBe(0);
});
});

View File

@@ -0,0 +1,57 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { StreamingState } from '../types.js';
import { useTimer } from './useTimer.js';
import { usePhraseCycler } from './usePhraseCycler.js';
import { useState, useEffect, useRef } from 'react'; // Added useRef
export const useLoadingIndicator = (streamingState: StreamingState) => {
const [timerResetKey, setTimerResetKey] = useState(0);
const isTimerActive = streamingState === StreamingState.Responding;
const elapsedTimeFromTimer = useTimer(isTimerActive, timerResetKey);
const isPhraseCyclingActive = streamingState === StreamingState.Responding;
const isWaiting = streamingState === StreamingState.WaitingForConfirmation;
const currentLoadingPhrase = usePhraseCycler(
isPhraseCyclingActive,
isWaiting,
);
const [retainedElapsedTime, setRetainedElapsedTime] = useState(0);
const prevStreamingStateRef = useRef<StreamingState | null>(null);
useEffect(() => {
if (
prevStreamingStateRef.current === StreamingState.WaitingForConfirmation &&
streamingState === StreamingState.Responding
) {
setTimerResetKey((prevKey) => prevKey + 1);
setRetainedElapsedTime(0); // Clear retained time when going back to responding
} else if (
streamingState === StreamingState.Idle &&
prevStreamingStateRef.current === StreamingState.Responding
) {
setTimerResetKey((prevKey) => prevKey + 1); // Reset timer when becoming idle from responding
setRetainedElapsedTime(0);
} else if (streamingState === StreamingState.WaitingForConfirmation) {
// Capture the time when entering WaitingForConfirmation
// elapsedTimeFromTimer will hold the last value from when isTimerActive was true.
setRetainedElapsedTime(elapsedTimeFromTimer);
}
prevStreamingStateRef.current = streamingState;
}, [streamingState, elapsedTimeFromTimer]);
return {
elapsedTime:
streamingState === StreamingState.WaitingForConfirmation
? retainedElapsedTime
: elapsedTimeFromTimer,
currentLoadingPhrase,
};
};

View File

@@ -0,0 +1,32 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useEffect } from 'react';
import { sessionId, Logger } from '@qwen/qwen-code-core';
/**
* Hook to manage the logger instance.
*/
export const useLogger = () => {
const [logger, setLogger] = useState<Logger | null>(null);
useEffect(() => {
const newLogger = new Logger(sessionId);
/**
* Start async initialization, no need to await. Using await slows down the
* time from launch to see the gemini-cli prompt and it's better to not save
* messages than for the cli to hanging waiting for the logger to loading.
*/
newLogger
.initialize()
.then(() => {
setLogger(newLogger);
})
.catch(() => {});
}, []);
return logger;
};

View File

@@ -0,0 +1,145 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import {
usePhraseCycler,
WITTY_LOADING_PHRASES,
PHRASE_CHANGE_INTERVAL_MS,
} from './usePhraseCycler.js';
describe('usePhraseCycler', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should initialize with the first witty phrase when not active and not waiting', () => {
const { result } = renderHook(() => usePhraseCycler(false, false));
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
});
it('should show "Waiting for user confirmation..." when isWaiting is true', () => {
const { result, rerender } = renderHook(
({ isActive, isWaiting }) => usePhraseCycler(isActive, isWaiting),
{ initialProps: { isActive: true, isWaiting: false } },
);
rerender({ isActive: true, isWaiting: true });
expect(result.current).toBe('Waiting for user confirmation...');
});
it('should not cycle phrases if isActive is false and not waiting', () => {
const { result } = renderHook(() => usePhraseCycler(false, false));
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS * 2);
});
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
});
it('should cycle through witty phrases when isActive is true and not waiting', () => {
const { result } = renderHook(() => usePhraseCycler(true, false));
// Initial phrase should be one of the witty phrases
expect(WITTY_LOADING_PHRASES).toContain(result.current);
const _initialPhrase = result.current;
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
// Phrase should change and be one of the witty phrases
expect(WITTY_LOADING_PHRASES).toContain(result.current);
const _secondPhrase = result.current;
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
expect(WITTY_LOADING_PHRASES).toContain(result.current);
});
it('should reset to a witty phrase when isActive becomes true after being false (and not waiting)', () => {
// Ensure there are at least two phrases for this test to be meaningful.
if (WITTY_LOADING_PHRASES.length < 2) {
return;
}
// Mock Math.random to make the test deterministic.
let callCount = 0;
vi.spyOn(Math, 'random').mockImplementation(() => {
// Cycle through 0, 1, 0, 1, ...
const val = callCount % 2;
callCount++;
return val / WITTY_LOADING_PHRASES.length;
});
const { result, rerender } = renderHook(
({ isActive, isWaiting }) => usePhraseCycler(isActive, isWaiting),
{ initialProps: { isActive: false, isWaiting: false } },
);
// Activate
rerender({ isActive: true, isWaiting: false });
const firstActivePhrase = result.current;
expect(WITTY_LOADING_PHRASES).toContain(firstActivePhrase);
// With our mock, this should be the first phrase.
expect(firstActivePhrase).toBe(WITTY_LOADING_PHRASES[0]);
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
// Phrase should change to the second phrase.
expect(result.current).not.toBe(firstActivePhrase);
expect(result.current).toBe(WITTY_LOADING_PHRASES[1]);
// Set to inactive - should reset to the default initial phrase
rerender({ isActive: false, isWaiting: false });
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
// Set back to active - should pick a random witty phrase (which our mock controls)
act(() => {
rerender({ isActive: true, isWaiting: false });
});
// The random mock will now return 0, so it should be the first phrase again.
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
});
it('should clear phrase interval on unmount when active', () => {
const { unmount } = renderHook(() => usePhraseCycler(true, false));
const clearIntervalSpy = vi.spyOn(global, 'clearInterval');
unmount();
expect(clearIntervalSpy).toHaveBeenCalledOnce();
});
it('should reset to a witty phrase when transitioning from waiting to active', () => {
const { result, rerender } = renderHook(
({ isActive, isWaiting }) => usePhraseCycler(isActive, isWaiting),
{ initialProps: { isActive: true, isWaiting: false } },
);
const _initialPhrase = result.current;
expect(WITTY_LOADING_PHRASES).toContain(_initialPhrase);
// Cycle to a different phrase (potentially)
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
if (WITTY_LOADING_PHRASES.length > 1) {
// This check is probabilistic with random selection
}
expect(WITTY_LOADING_PHRASES).toContain(result.current);
// Go to waiting state
rerender({ isActive: false, isWaiting: true });
expect(result.current).toBe('Waiting for user confirmation...');
// Go back to active cycling - should pick a random witty phrase
rerender({ isActive: true, isWaiting: false });
expect(WITTY_LOADING_PHRASES).toContain(result.current);
});
});

View File

@@ -0,0 +1,200 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useEffect, useRef } from 'react';
export const WITTY_LOADING_PHRASES = [
"I'm Feeling Lucky",
'Shipping awesomeness... ',
'Painting the serifs back on...',
'Navigating the slime mold...',
'Consulting the digital spirits...',
'Reticulating splines...',
'Warming up the AI hamsters...',
'Asking the magic conch shell...',
'Generating witty retort...',
'Polishing the algorithms...',
"Don't rush perfection (or my code)...",
'Brewing fresh bytes...',
'Counting electrons...',
'Engaging cognitive processors...',
'Checking for syntax errors in the universe...',
'One moment, optimizing humor...',
'Shuffling punchlines...',
'Untangling neural nets...',
'Compiling brilliance...',
'Loading wit.exe...',
'Summoning the cloud of wisdom...',
'Preparing a witty response...',
"Just a sec, I'm debugging reality...",
'Confuzzling the options...',
'Tuning the cosmic frequencies...',
'Crafting a response worthy of your patience...',
'Compiling the 1s and 0s...',
'Resolving dependencies... and existential crises...',
'Defragmenting memories... both RAM and personal...',
'Rebooting the humor module...',
'Caching the essentials (mostly cat memes)...',
'Running sudo make me a sandwich...',
'Optimizing for ludicrous speed',
"Swapping bits... don't tell the bytes...",
'Garbage collecting... be right back...',
'Assembling the interwebs...',
'Converting coffee into code...',
'Pushing to production (and hoping for the best)...',
'Updating the syntax for reality...',
'Rewiring the synapses...',
'Looking for a misplaced semicolon...',
"Greasin' the cogs of the machine...",
'Pre-heating the servers...',
'Calibrating the flux capacitor...',
'Engaging the improbability drive...',
'Channeling the Force...',
'Aligning the stars for optimal response...',
'So say we all...',
'Loading the next great idea...',
"Just a moment, I'm in the zone...",
'Preparing to dazzle you with brilliance...',
"Just a tick, I'm polishing my wit...",
"Hold tight, I'm crafting a masterpiece...",
"Just a jiffy, I'm debugging the universe...",
"Just a moment, I'm aligning the pixels...",
"Just a sec, I'm optimizing the humor...",
"Just a moment, I'm tuning the algorithms...",
'Warp speed engaged...',
'Mining for more Dilithium crystals...',
"I'm Giving Her all she's got Captain!",
"Don't panic...",
'Following the white rabbit...',
'The truth is in here... somewhere...',
'Blowing on the cartridge...',
'Looking for the princess in another castle...',
'Loading... Do a barrel roll!',
'Waiting for the respawn...',
'Finishing the Kessel Run in less than 12 parsecs...',
"The cake is not a lie, it's just still loading...",
'Fiddling with the character creation screen...',
"Just a moment, I'm finding the right meme...",
"Pressing 'A' to continue...",
'Herding digital cats...',
'Polishing the pixels...',
'Finding a suitable loading screen pun...',
'Distracting you with this witty phrase...',
'Almost there... probably...',
'Our hamsters are working as fast as they can...',
'Giving Cloudy a pat on the head...',
'Petting the cat...',
'Rickrolling my boss...',
'Never gonna give you up, never gonna let you down...',
'Slapping the bass...',
'Tasting the snozberries...',
"I'm going the distance, I'm going for speed...",
'Is this the real life? Is this just fantasy?...',
"I've got a good feeling about this...",
'Poking the bear...',
'Doing research on the latest memes...',
'Figuring out how to make this more witty...',
'Hmmm... let me think...',
'What do you call a fish with no eyes? A fsh...',
'Why did the computer go to therapy? It had too many bytes...',
"Why don't programmers like nature? It has too many bugs...",
'Why do programmers prefer dark mode? Because light attracts bugs...',
'Why did the developer go broke? Because he used up all his cache...',
"What can you do with a broken pencil? Nothing, it's pointless...",
'Applying percussive maintenance...',
'Searching for the correct USB orientation...',
'Ensuring the magic smoke stays inside the wires...',
'Rewriting in Rust for no particular reason...',
'Trying to exit Vim...',
'Spinning up the hamster wheel...',
"That's not a bug, it's an undocumented feature...",
'Engage.',
"I'll be back... with an answer.",
'My other process is a TARDIS...',
'Communing with the machine spirit...',
'Letting the thoughts marinate...',
'Just remembered where I put my keys...',
'Pondering the orb...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
'Initiating thoughtful gaze...',
"What's a computer's favorite snack? Microchips.",
"Why do Java developers wear glasses? Because they don't C#.",
'Charging the laser... pew pew!',
'Dividing by zero... just kidding!',
'Looking for an adult superviso... I mean, processing.',
'Making it go beep boop.',
'Buffering... because even AIs need a moment.',
'Entangling quantum particles for a faster response...',
'Polishing the chrome... on the algorithms.',
'Are you not entertained? (Working on it!)',
'Summoning the code gremlins... to help, of course.',
'Just waiting for the dial-up tone to finish...',
'Recalibrating the humor-o-meter.',
'My other loading screen is even funnier.',
"Pretty sure there's a cat walking on the keyboard somewhere...",
'Enhancing... Enhancing... Still loading.',
"It's not a bug, it's a feature... of this loading screen.",
'Have you tried turning it off and on again? (The loading screen, not me.)',
];
export const PHRASE_CHANGE_INTERVAL_MS = 15000;
/**
* Custom hook to manage cycling through loading phrases.
* @param isActive Whether the phrase cycling should be active.
* @param isWaiting Whether to show a specific waiting phrase.
* @returns The current loading phrase.
*/
export const usePhraseCycler = (isActive: boolean, isWaiting: boolean) => {
const [currentLoadingPhrase, setCurrentLoadingPhrase] = useState(
WITTY_LOADING_PHRASES[0],
);
const phraseIntervalRef = useRef<NodeJS.Timeout | null>(null);
useEffect(() => {
if (isWaiting) {
setCurrentLoadingPhrase('Waiting for user confirmation...');
if (phraseIntervalRef.current) {
clearInterval(phraseIntervalRef.current);
phraseIntervalRef.current = null;
}
} else if (isActive) {
if (phraseIntervalRef.current) {
clearInterval(phraseIntervalRef.current);
}
// Select an initial random phrase
const initialRandomIndex = Math.floor(
Math.random() * WITTY_LOADING_PHRASES.length,
);
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[initialRandomIndex]);
phraseIntervalRef.current = setInterval(() => {
// Select a new random phrase
const randomIndex = Math.floor(
Math.random() * WITTY_LOADING_PHRASES.length,
);
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[randomIndex]);
}, PHRASE_CHANGE_INTERVAL_MS);
} else {
// Idle or other states, clear the phrase interval
// and reset to the first phrase for next active state.
if (phraseIntervalRef.current) {
clearInterval(phraseIntervalRef.current);
phraseIntervalRef.current = null;
}
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[0]);
}
return () => {
if (phraseIntervalRef.current) {
clearInterval(phraseIntervalRef.current);
phraseIntervalRef.current = null;
}
};
}, [isActive, isWaiting]);
return currentLoadingPhrase;
};

View File

@@ -0,0 +1,135 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { GaxiosError } from 'gaxios';
import { useState, useEffect, useCallback } from 'react';
import { Config, CodeAssistServer, UserTierId } from '@qwen/qwen-code-core';
export interface PrivacyState {
isLoading: boolean;
error?: string;
isFreeTier?: boolean;
dataCollectionOptIn?: boolean;
}
export const usePrivacySettings = (config: Config) => {
const [privacyState, setPrivacyState] = useState<PrivacyState>({
isLoading: true,
});
useEffect(() => {
const fetchInitialState = async () => {
setPrivacyState({
isLoading: true,
});
try {
const server = getCodeAssistServer(config);
const tier = await getTier(server);
if (tier !== UserTierId.FREE) {
// We don't need to fetch opt-out info since non-free tier
// data gathering is already worked out some other way.
setPrivacyState({
isLoading: false,
isFreeTier: false,
});
return;
}
const optIn = await getRemoteDataCollectionOptIn(server);
setPrivacyState({
isLoading: false,
isFreeTier: true,
dataCollectionOptIn: optIn,
});
} catch (e) {
setPrivacyState({
isLoading: false,
error: e instanceof Error ? e.message : String(e),
});
}
};
fetchInitialState();
}, [config]);
const updateDataCollectionOptIn = useCallback(
async (optIn: boolean) => {
try {
const server = getCodeAssistServer(config);
const updatedOptIn = await setRemoteDataCollectionOptIn(server, optIn);
setPrivacyState({
isLoading: false,
isFreeTier: true,
dataCollectionOptIn: updatedOptIn,
});
} catch (e) {
setPrivacyState({
isLoading: false,
error: e instanceof Error ? e.message : String(e),
});
}
},
[config],
);
return {
privacyState,
updateDataCollectionOptIn,
};
};
function getCodeAssistServer(config: Config): CodeAssistServer {
const server = config.getGeminiClient().getContentGenerator();
// Neither of these cases should ever happen.
if (!(server instanceof CodeAssistServer)) {
throw new Error('Oauth not being used');
} else if (!server.projectId) {
throw new Error('Oauth not being used');
}
return server;
}
async function getTier(server: CodeAssistServer): Promise<UserTierId> {
const loadRes = await server.loadCodeAssist({
cloudaicompanionProject: server.projectId,
metadata: {
ideType: 'IDE_UNSPECIFIED',
platform: 'PLATFORM_UNSPECIFIED',
pluginType: 'GEMINI',
duetProject: server.projectId,
},
});
if (!loadRes.currentTier) {
throw new Error('User does not have a current tier');
}
return loadRes.currentTier.id;
}
async function getRemoteDataCollectionOptIn(
server: CodeAssistServer,
): Promise<boolean> {
try {
const resp = await server.getCodeAssistGlobalUserSetting();
return resp.freeTierDataCollectionOptin;
} catch (e) {
if (e instanceof GaxiosError) {
if (e.response?.status === 404) {
return true;
}
}
throw e;
}
}
async function setRemoteDataCollectionOptIn(
server: CodeAssistServer,
optIn: boolean,
): Promise<boolean> {
const resp = await server.setCodeAssistGlobalUserSetting({
cloudaicompanionProject: server.projectId,
freeTierDataCollectionOptin: optIn,
});
return resp.freeTierDataCollectionOptin;
}

View File

@@ -0,0 +1,312 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
Config,
ToolCallRequestInfo,
ExecutingToolCall,
ScheduledToolCall,
ValidatingToolCall,
WaitingToolCall,
CompletedToolCall,
CancelledToolCall,
CoreToolScheduler,
OutputUpdateHandler,
AllToolCallsCompleteHandler,
ToolCallsUpdateHandler,
Tool,
ToolCall,
Status as CoreStatus,
EditorType,
} from '@qwen/qwen-code-core';
import { useCallback, useState, useMemo } from 'react';
import {
HistoryItemToolGroup,
IndividualToolCallDisplay,
ToolCallStatus,
HistoryItemWithoutId,
} from '../types.js';
export type ScheduleFn = (
request: ToolCallRequestInfo | ToolCallRequestInfo[],
signal: AbortSignal,
) => void;
export type MarkToolsAsSubmittedFn = (callIds: string[]) => void;
export type TrackedScheduledToolCall = ScheduledToolCall & {
responseSubmittedToGemini?: boolean;
};
export type TrackedValidatingToolCall = ValidatingToolCall & {
responseSubmittedToGemini?: boolean;
};
export type TrackedWaitingToolCall = WaitingToolCall & {
responseSubmittedToGemini?: boolean;
};
export type TrackedExecutingToolCall = ExecutingToolCall & {
responseSubmittedToGemini?: boolean;
};
export type TrackedCompletedToolCall = CompletedToolCall & {
responseSubmittedToGemini?: boolean;
};
export type TrackedCancelledToolCall = CancelledToolCall & {
responseSubmittedToGemini?: boolean;
};
export type TrackedToolCall =
| TrackedScheduledToolCall
| TrackedValidatingToolCall
| TrackedWaitingToolCall
| TrackedExecutingToolCall
| TrackedCompletedToolCall
| TrackedCancelledToolCall;
export function useReactToolScheduler(
onComplete: (tools: CompletedToolCall[]) => void,
config: Config,
setPendingHistoryItem: React.Dispatch<
React.SetStateAction<HistoryItemWithoutId | null>
>,
getPreferredEditor: () => EditorType | undefined,
): [TrackedToolCall[], ScheduleFn, MarkToolsAsSubmittedFn] {
const [toolCallsForDisplay, setToolCallsForDisplay] = useState<
TrackedToolCall[]
>([]);
const outputUpdateHandler: OutputUpdateHandler = useCallback(
(toolCallId, outputChunk) => {
setPendingHistoryItem((prevItem) => {
if (prevItem?.type === 'tool_group') {
return {
...prevItem,
tools: prevItem.tools.map((toolDisplay) =>
toolDisplay.callId === toolCallId &&
toolDisplay.status === ToolCallStatus.Executing
? { ...toolDisplay, resultDisplay: outputChunk }
: toolDisplay,
),
};
}
return prevItem;
});
setToolCallsForDisplay((prevCalls) =>
prevCalls.map((tc) => {
if (tc.request.callId === toolCallId && tc.status === 'executing') {
const executingTc = tc as TrackedExecutingToolCall;
return { ...executingTc, liveOutput: outputChunk };
}
return tc;
}),
);
},
[setPendingHistoryItem],
);
const allToolCallsCompleteHandler: AllToolCallsCompleteHandler = useCallback(
(completedToolCalls) => {
onComplete(completedToolCalls);
},
[onComplete],
);
const toolCallsUpdateHandler: ToolCallsUpdateHandler = useCallback(
(updatedCoreToolCalls: ToolCall[]) => {
setToolCallsForDisplay((prevTrackedCalls) =>
updatedCoreToolCalls.map((coreTc) => {
const existingTrackedCall = prevTrackedCalls.find(
(ptc) => ptc.request.callId === coreTc.request.callId,
);
const newTrackedCall: TrackedToolCall = {
...coreTc,
responseSubmittedToGemini:
existingTrackedCall?.responseSubmittedToGemini ?? false,
} as TrackedToolCall;
return newTrackedCall;
}),
);
},
[setToolCallsForDisplay],
);
const scheduler = useMemo(
() =>
new CoreToolScheduler({
toolRegistry: config.getToolRegistry(),
outputUpdateHandler,
onAllToolCallsComplete: allToolCallsCompleteHandler,
onToolCallsUpdate: toolCallsUpdateHandler,
approvalMode: config.getApprovalMode(),
getPreferredEditor,
config,
}),
[
config,
outputUpdateHandler,
allToolCallsCompleteHandler,
toolCallsUpdateHandler,
getPreferredEditor,
],
);
const schedule: ScheduleFn = useCallback(
(
request: ToolCallRequestInfo | ToolCallRequestInfo[],
signal: AbortSignal,
) => {
scheduler.schedule(request, signal);
},
[scheduler],
);
const markToolsAsSubmitted: MarkToolsAsSubmittedFn = useCallback(
(callIdsToMark: string[]) => {
setToolCallsForDisplay((prevCalls) =>
prevCalls.map((tc) =>
callIdsToMark.includes(tc.request.callId)
? { ...tc, responseSubmittedToGemini: true }
: tc,
),
);
},
[],
);
return [toolCallsForDisplay, schedule, markToolsAsSubmitted];
}
/**
* Maps a CoreToolScheduler status to the UI's ToolCallStatus enum.
*/
function mapCoreStatusToDisplayStatus(coreStatus: CoreStatus): ToolCallStatus {
switch (coreStatus) {
case 'validating':
return ToolCallStatus.Executing;
case 'awaiting_approval':
return ToolCallStatus.Confirming;
case 'executing':
return ToolCallStatus.Executing;
case 'success':
return ToolCallStatus.Success;
case 'cancelled':
return ToolCallStatus.Canceled;
case 'error':
return ToolCallStatus.Error;
case 'scheduled':
return ToolCallStatus.Pending;
default: {
const exhaustiveCheck: never = coreStatus;
console.warn(`Unknown core status encountered: ${exhaustiveCheck}`);
return ToolCallStatus.Error;
}
}
}
/**
* Transforms `TrackedToolCall` objects into `HistoryItemToolGroup` objects for UI display.
*/
export function mapToDisplay(
toolOrTools: TrackedToolCall[] | TrackedToolCall,
): HistoryItemToolGroup {
const toolCalls = Array.isArray(toolOrTools) ? toolOrTools : [toolOrTools];
const toolDisplays = toolCalls.map(
(trackedCall): IndividualToolCallDisplay => {
let displayName = trackedCall.request.name;
let description = '';
let renderOutputAsMarkdown = false;
const currentToolInstance =
'tool' in trackedCall && trackedCall.tool
? (trackedCall as { tool: Tool }).tool
: undefined;
if (currentToolInstance) {
displayName = currentToolInstance.displayName;
description = currentToolInstance.getDescription(
trackedCall.request.args,
);
renderOutputAsMarkdown = currentToolInstance.isOutputMarkdown;
} else if ('request' in trackedCall && 'args' in trackedCall.request) {
description = JSON.stringify(trackedCall.request.args);
}
const baseDisplayProperties: Omit<
IndividualToolCallDisplay,
'status' | 'resultDisplay' | 'confirmationDetails'
> = {
callId: trackedCall.request.callId,
name: displayName,
description,
renderOutputAsMarkdown,
};
switch (trackedCall.status) {
case 'success':
return {
...baseDisplayProperties,
status: mapCoreStatusToDisplayStatus(trackedCall.status),
resultDisplay: trackedCall.response.resultDisplay,
confirmationDetails: undefined,
};
case 'error':
return {
...baseDisplayProperties,
name: currentToolInstance?.displayName ?? trackedCall.request.name,
status: mapCoreStatusToDisplayStatus(trackedCall.status),
resultDisplay: trackedCall.response.resultDisplay,
confirmationDetails: undefined,
};
case 'cancelled':
return {
...baseDisplayProperties,
status: mapCoreStatusToDisplayStatus(trackedCall.status),
resultDisplay: trackedCall.response.resultDisplay,
confirmationDetails: undefined,
};
case 'awaiting_approval':
return {
...baseDisplayProperties,
status: mapCoreStatusToDisplayStatus(trackedCall.status),
resultDisplay: undefined,
confirmationDetails: trackedCall.confirmationDetails,
};
case 'executing':
return {
...baseDisplayProperties,
status: mapCoreStatusToDisplayStatus(trackedCall.status),
resultDisplay:
(trackedCall as TrackedExecutingToolCall).liveOutput ?? undefined,
confirmationDetails: undefined,
};
case 'validating': // Fallthrough
case 'scheduled':
return {
...baseDisplayProperties,
status: mapCoreStatusToDisplayStatus(trackedCall.status),
resultDisplay: undefined,
confirmationDetails: undefined,
};
default: {
const exhaustiveCheck: never = trackedCall;
return {
callId: (exhaustiveCheck as TrackedToolCall).request.callId,
name: 'Unknown Tool',
description: 'Encountered an unknown tool call state.',
status: ToolCallStatus.Error,
resultDisplay: 'Unknown tool call state',
confirmationDetails: undefined,
renderOutputAsMarkdown: false,
};
}
}
},
);
return {
type: 'tool_group',
tools: toolDisplays,
};
}

View File

@@ -0,0 +1,7 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export const REFRESH_MEMORY_COMMAND_NAME = '/refreshmemory';

View File

@@ -0,0 +1,219 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { renderHook, act, waitFor } from '@testing-library/react';
import { useShellHistory } from './useShellHistory.js';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';
import * as crypto from 'crypto';
vi.mock('fs/promises');
vi.mock('os');
vi.mock('crypto');
const MOCKED_PROJECT_ROOT = '/test/project';
const MOCKED_HOME_DIR = '/test/home';
const MOCKED_PROJECT_HASH = 'mocked_hash';
const MOCKED_HISTORY_DIR = path.join(
MOCKED_HOME_DIR,
'.qwen',
'tmp',
MOCKED_PROJECT_HASH,
);
const MOCKED_HISTORY_FILE = path.join(MOCKED_HISTORY_DIR, 'shell_history');
describe('useShellHistory', () => {
const mockedFs = vi.mocked(fs);
const mockedOs = vi.mocked(os);
const mockedCrypto = vi.mocked(crypto);
beforeEach(() => {
vi.resetAllMocks();
mockedFs.readFile.mockResolvedValue('');
mockedFs.writeFile.mockResolvedValue(undefined);
mockedFs.mkdir.mockResolvedValue(undefined);
mockedOs.homedir.mockReturnValue(MOCKED_HOME_DIR);
const hashMock = {
update: vi.fn().mockReturnThis(),
digest: vi.fn().mockReturnValue(MOCKED_PROJECT_HASH),
};
mockedCrypto.createHash.mockReturnValue(hashMock as never);
});
it('should initialize and read the history file from the correct path', async () => {
mockedFs.readFile.mockResolvedValue('cmd1\ncmd2');
const { result } = renderHook(() => useShellHistory(MOCKED_PROJECT_ROOT));
await waitFor(() => {
expect(mockedFs.readFile).toHaveBeenCalledWith(
MOCKED_HISTORY_FILE,
'utf-8',
);
});
let command: string | null = null;
act(() => {
command = result.current.getPreviousCommand();
});
// History is loaded newest-first: ['cmd2', 'cmd1']
expect(command).toBe('cmd2');
});
it('should handle a non-existent history file gracefully', async () => {
const error = new Error('File not found') as NodeJS.ErrnoException;
error.code = 'ENOENT';
mockedFs.readFile.mockRejectedValue(error);
const { result } = renderHook(() => useShellHistory(MOCKED_PROJECT_ROOT));
await waitFor(() => {
expect(mockedFs.readFile).toHaveBeenCalled();
});
let command: string | null = null;
act(() => {
command = result.current.getPreviousCommand();
});
expect(command).toBe(null);
});
it('should add a command and write to the history file', async () => {
const { result } = renderHook(() => useShellHistory(MOCKED_PROJECT_ROOT));
await waitFor(() => expect(mockedFs.readFile).toHaveBeenCalled());
act(() => {
result.current.addCommandToHistory('new_command');
});
await waitFor(() => {
expect(mockedFs.mkdir).toHaveBeenCalledWith(MOCKED_HISTORY_DIR, {
recursive: true,
});
expect(mockedFs.writeFile).toHaveBeenCalledWith(
MOCKED_HISTORY_FILE,
'new_command', // Written to file oldest-first.
);
});
let command: string | null = null;
act(() => {
command = result.current.getPreviousCommand();
});
expect(command).toBe('new_command');
});
it('should navigate history correctly with previous/next commands', async () => {
mockedFs.readFile.mockResolvedValue('cmd1\ncmd2\ncmd3');
const { result } = renderHook(() => useShellHistory(MOCKED_PROJECT_ROOT));
// Wait for history to be loaded: ['cmd3', 'cmd2', 'cmd1']
await waitFor(() => expect(mockedFs.readFile).toHaveBeenCalled());
let command: string | null = null;
act(() => {
command = result.current.getPreviousCommand();
});
expect(command).toBe('cmd3');
act(() => {
command = result.current.getPreviousCommand();
});
expect(command).toBe('cmd2');
act(() => {
command = result.current.getPreviousCommand();
});
expect(command).toBe('cmd1');
// Should stay at the oldest command
act(() => {
command = result.current.getPreviousCommand();
});
expect(command).toBe('cmd1');
act(() => {
command = result.current.getNextCommand();
});
expect(command).toBe('cmd2');
act(() => {
command = result.current.getNextCommand();
});
expect(command).toBe('cmd3');
// Should return to the "new command" line (represented as empty string)
act(() => {
command = result.current.getNextCommand();
});
expect(command).toBe('');
});
it('should not add empty or whitespace-only commands to history', async () => {
const { result } = renderHook(() => useShellHistory(MOCKED_PROJECT_ROOT));
await waitFor(() => expect(mockedFs.readFile).toHaveBeenCalled());
act(() => {
result.current.addCommandToHistory(' ');
});
expect(mockedFs.writeFile).not.toHaveBeenCalled();
});
it('should truncate history to MAX_HISTORY_LENGTH (100)', async () => {
const oldCommands = Array.from({ length: 120 }, (_, i) => `old_cmd_${i}`);
mockedFs.readFile.mockResolvedValue(oldCommands.join('\n'));
const { result } = renderHook(() => useShellHistory(MOCKED_PROJECT_ROOT));
await waitFor(() => expect(mockedFs.readFile).toHaveBeenCalled());
act(() => {
result.current.addCommandToHistory('new_cmd');
});
// Wait for the async write to happen and then inspect the arguments.
await waitFor(() => expect(mockedFs.writeFile).toHaveBeenCalled());
// The hook stores history newest-first.
// Initial state: ['old_cmd_119', ..., 'old_cmd_0']
// After adding 'new_cmd': ['new_cmd', 'old_cmd_119', ..., 'old_cmd_21'] (100 items)
// Written to file (reversed): ['old_cmd_21', ..., 'old_cmd_119', 'new_cmd']
const writtenContent = mockedFs.writeFile.mock.calls[0][1] as string;
const writtenLines = writtenContent.split('\n');
expect(writtenLines.length).toBe(100);
expect(writtenLines[0]).toBe('old_cmd_21'); // New oldest command
expect(writtenLines[99]).toBe('new_cmd'); // Newest command
});
it('should move an existing command to the top when re-added', async () => {
mockedFs.readFile.mockResolvedValue('cmd1\ncmd2\ncmd3');
const { result } = renderHook(() => useShellHistory(MOCKED_PROJECT_ROOT));
// Initial state: ['cmd3', 'cmd2', 'cmd1']
await waitFor(() => expect(mockedFs.readFile).toHaveBeenCalled());
act(() => {
result.current.addCommandToHistory('cmd1');
});
// After re-adding 'cmd1': ['cmd1', 'cmd3', 'cmd2']
// Written to file (reversed): ['cmd2', 'cmd3', 'cmd1']
await waitFor(() => expect(mockedFs.writeFile).toHaveBeenCalled());
const writtenContent = mockedFs.writeFile.mock.calls[0][1] as string;
const writtenLines = writtenContent.split('\n');
expect(writtenLines).toEqual(['cmd2', 'cmd3', 'cmd1']);
});
});

View File

@@ -0,0 +1,103 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useEffect, useCallback } from 'react';
import * as fs from 'fs/promises';
import * as path from 'path';
import { isNodeError, getProjectTempDir } from '@qwen/qwen-code-core';
const HISTORY_FILE = 'shell_history';
const MAX_HISTORY_LENGTH = 100;
async function getHistoryFilePath(projectRoot: string): Promise<string> {
const historyDir = getProjectTempDir(projectRoot);
return path.join(historyDir, HISTORY_FILE);
}
async function readHistoryFile(filePath: string): Promise<string[]> {
try {
const content = await fs.readFile(filePath, 'utf-8');
return content.split('\n').filter(Boolean);
} catch (error) {
if (isNodeError(error) && error.code === 'ENOENT') {
return [];
}
console.error('Error reading shell history:', error);
return [];
}
}
async function writeHistoryFile(
filePath: string,
history: string[],
): Promise<void> {
try {
await fs.mkdir(path.dirname(filePath), { recursive: true });
await fs.writeFile(filePath, history.join('\n'));
} catch (error) {
console.error('Error writing shell history:', error);
}
}
export function useShellHistory(projectRoot: string) {
const [history, setHistory] = useState<string[]>([]);
const [historyIndex, setHistoryIndex] = useState(-1);
const [historyFilePath, setHistoryFilePath] = useState<string | null>(null);
useEffect(() => {
async function loadHistory() {
const filePath = await getHistoryFilePath(projectRoot);
setHistoryFilePath(filePath);
const loadedHistory = await readHistoryFile(filePath);
setHistory(loadedHistory.reverse()); // Newest first
}
loadHistory();
}, [projectRoot]);
const addCommandToHistory = useCallback(
(command: string) => {
if (!command.trim() || !historyFilePath) {
return;
}
const newHistory = [command, ...history.filter((c) => c !== command)]
.slice(0, MAX_HISTORY_LENGTH)
.filter(Boolean);
setHistory(newHistory);
// Write to file in reverse order (oldest first)
writeHistoryFile(historyFilePath, [...newHistory].reverse());
setHistoryIndex(-1);
},
[history, historyFilePath],
);
const getPreviousCommand = useCallback(() => {
if (history.length === 0) {
return null;
}
const newIndex = Math.min(historyIndex + 1, history.length - 1);
setHistoryIndex(newIndex);
return history[newIndex] ?? null;
}, [history, historyIndex]);
const getNextCommand = useCallback(() => {
if (historyIndex < 0) {
return null;
}
const newIndex = historyIndex - 1;
setHistoryIndex(newIndex);
if (newIndex < 0) {
return '';
}
return history[newIndex] ?? null;
}, [history, historyIndex]);
return {
addCommandToHistory,
getPreviousCommand,
getNextCommand,
resetHistoryPosition: () => setHistoryIndex(-1),
};
}

View File

@@ -0,0 +1,75 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { Message, MessageType } from '../types.js';
import { Config } from '@qwen/qwen-code-core';
import { LoadedSettings } from '../../config/settings.js';
export function createShowMemoryAction(
config: Config | null,
settings: LoadedSettings,
addMessage: (message: Message) => void,
) {
return async () => {
if (!config) {
addMessage({
type: MessageType.ERROR,
content: 'Configuration not available. Cannot show memory.',
timestamp: new Date(),
});
return;
}
const debugMode = config.getDebugMode();
if (debugMode) {
console.log('[DEBUG] Show Memory command invoked.');
}
const currentMemory = config.getUserMemory();
const fileCount = config.getGeminiMdFileCount();
const contextFileName = settings.merged.contextFileName;
const contextFileNames = Array.isArray(contextFileName)
? contextFileName
: [contextFileName];
if (debugMode) {
console.log(
`[DEBUG] Showing memory. Content from config.getUserMemory() (first 200 chars): ${currentMemory.substring(0, 200)}...`,
);
console.log(`[DEBUG] Number of context files loaded: ${fileCount}`);
}
if (fileCount > 0) {
const allNamesTheSame = new Set(contextFileNames).size < 2;
const name = allNamesTheSame ? contextFileNames[0] : 'context';
addMessage({
type: MessageType.INFO,
content: `Loaded memory from ${fileCount} ${name} file${
fileCount > 1 ? 's' : ''
}.`,
timestamp: new Date(),
});
}
if (currentMemory && currentMemory.trim().length > 0) {
addMessage({
type: MessageType.INFO,
content: `Current combined memory content:\n\`\`\`markdown\n${currentMemory}\n\`\`\``,
timestamp: new Date(),
});
} else {
addMessage({
type: MessageType.INFO,
content:
fileCount > 0
? 'Hierarchical memory (GEMINI.md or other context files) is loaded but content is empty.'
: 'No hierarchical memory (GEMINI.md or other context files) is currently loaded.',
timestamp: new Date(),
});
}
};
}

View File

@@ -0,0 +1,36 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import React from 'react';
// Hook to return state, state setter, and ref to most up-to-date value of state.
// We need this in order to setState and reference the updated state multiple
// times in the same function.
export const useStateAndRef = <
// Everything but function.
T extends object | null | undefined | number | string,
>(
initialValue: T,
) => {
const [_, setState] = React.useState<T>(initialValue);
const ref = React.useRef<T>(initialValue);
const setStateInternal = React.useCallback<typeof setState>(
(newStateOrCallback) => {
let newValue: T;
if (typeof newStateOrCallback === 'function') {
newValue = newStateOrCallback(ref.current);
} else {
newValue = newStateOrCallback;
}
setState(newValue);
ref.current = newValue;
},
[],
);
return [ref, setStateInternal] as const;
};

View File

@@ -0,0 +1,32 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useEffect, useState } from 'react';
const TERMINAL_PADDING_X = 8;
export function useTerminalSize(): { columns: number; rows: number } {
const [size, setSize] = useState({
columns: (process.stdout.columns || 60) - TERMINAL_PADDING_X,
rows: process.stdout.rows || 20,
});
useEffect(() => {
function updateSize() {
setSize({
columns: (process.stdout.columns || 60) - TERMINAL_PADDING_X,
rows: process.stdout.rows || 20,
});
}
process.stdout.on('resize', updateSize);
return () => {
process.stdout.off('resize', updateSize);
};
}, []);
return size;
}

View File

@@ -0,0 +1,116 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useCallback, useEffect } from 'react';
import { themeManager } from '../themes/theme-manager.js';
import { LoadedSettings, SettingScope } from '../../config/settings.js'; // Import LoadedSettings, AppSettings, MergedSetting
import { type HistoryItem, MessageType } from '../types.js';
import process from 'node:process';
interface UseThemeCommandReturn {
isThemeDialogOpen: boolean;
openThemeDialog: () => void;
handleThemeSelect: (
themeName: string | undefined,
scope: SettingScope,
) => void; // Added scope
handleThemeHighlight: (themeName: string | undefined) => void;
}
export const useThemeCommand = (
loadedSettings: LoadedSettings,
setThemeError: (error: string | null) => void,
addItem: (item: Omit<HistoryItem, 'id'>, timestamp: number) => void,
): UseThemeCommandReturn => {
// Determine the effective theme
const effectiveTheme = loadedSettings.merged.theme;
// Initial state: Open dialog if no theme is set in either user or workspace settings
const [isThemeDialogOpen, setIsThemeDialogOpen] = useState(
effectiveTheme === undefined && !process.env.NO_COLOR,
);
// TODO: refactor how theme's are accessed to avoid requiring a forced render.
const [, setForceRender] = useState(0);
// Apply initial theme on component mount
useEffect(() => {
if (effectiveTheme === undefined) {
if (process.env.NO_COLOR) {
addItem(
{
type: MessageType.INFO,
text: 'Theme configuration unavailable due to NO_COLOR env variable.',
},
Date.now(),
);
}
// If no theme is set and NO_COLOR is not set, the dialog is already open.
return;
}
if (!themeManager.setActiveTheme(effectiveTheme)) {
setIsThemeDialogOpen(true);
setThemeError(`Theme "${effectiveTheme}" not found.`);
} else {
setThemeError(null);
}
}, [effectiveTheme, setThemeError, addItem]); // Re-run if effectiveTheme or setThemeError changes
const openThemeDialog = useCallback(() => {
if (process.env.NO_COLOR) {
addItem(
{
type: MessageType.INFO,
text: 'Theme configuration unavailable due to NO_COLOR env variable.',
},
Date.now(),
);
return;
}
setIsThemeDialogOpen(true);
}, [addItem]);
const applyTheme = useCallback(
(themeName: string | undefined) => {
if (!themeManager.setActiveTheme(themeName)) {
// If theme is not found, open the theme selection dialog and set error message
setIsThemeDialogOpen(true);
setThemeError(`Theme "${themeName}" not found.`);
} else {
setForceRender((v) => v + 1); // Trigger potential re-render
setThemeError(null); // Clear any previous theme error on success
}
},
[setForceRender, setThemeError],
);
const handleThemeHighlight = useCallback(
(themeName: string | undefined) => {
applyTheme(themeName);
},
[applyTheme],
);
const handleThemeSelect = useCallback(
(themeName: string | undefined, scope: SettingScope) => {
// Added scope parameter
try {
loadedSettings.setValue(scope, 'theme', themeName); // Update the merged settings
applyTheme(loadedSettings.merged.theme); // Apply the current theme
} finally {
setIsThemeDialogOpen(false); // Close the dialog
}
},
[applyTheme, loadedSettings],
);
return {
isThemeDialogOpen,
openThemeDialog,
handleThemeSelect,
handleThemeHighlight,
};
};

View File

@@ -0,0 +1,120 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useTimer } from './useTimer.js';
describe('useTimer', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should initialize with 0', () => {
const { result } = renderHook(() => useTimer(false, 0));
expect(result.current).toBe(0);
});
it('should not increment time if isActive is false', () => {
const { result } = renderHook(() => useTimer(false, 0));
act(() => {
vi.advanceTimersByTime(5000);
});
expect(result.current).toBe(0);
});
it('should increment time every second if isActive is true', () => {
const { result } = renderHook(() => useTimer(true, 0));
act(() => {
vi.advanceTimersByTime(1000);
});
expect(result.current).toBe(1);
act(() => {
vi.advanceTimersByTime(2000);
});
expect(result.current).toBe(3);
});
it('should reset to 0 and start incrementing when isActive becomes true from false', () => {
const { result, rerender } = renderHook(
({ isActive, resetKey }) => useTimer(isActive, resetKey),
{ initialProps: { isActive: false, resetKey: 0 } },
);
expect(result.current).toBe(0);
rerender({ isActive: true, resetKey: 0 });
expect(result.current).toBe(0); // Should reset to 0 upon becoming active
act(() => {
vi.advanceTimersByTime(1000);
});
expect(result.current).toBe(1);
});
it('should reset to 0 when resetKey changes while active', () => {
const { result, rerender } = renderHook(
({ isActive, resetKey }) => useTimer(isActive, resetKey),
{ initialProps: { isActive: true, resetKey: 0 } },
);
act(() => {
vi.advanceTimersByTime(3000); // 3s
});
expect(result.current).toBe(3);
rerender({ isActive: true, resetKey: 1 }); // Change resetKey
expect(result.current).toBe(0); // Should reset to 0
act(() => {
vi.advanceTimersByTime(1000);
});
expect(result.current).toBe(1); // Starts incrementing from 0
});
it('should be 0 if isActive is false, regardless of resetKey changes', () => {
const { result, rerender } = renderHook(
({ isActive, resetKey }) => useTimer(isActive, resetKey),
{ initialProps: { isActive: false, resetKey: 0 } },
);
expect(result.current).toBe(0);
rerender({ isActive: false, resetKey: 1 });
expect(result.current).toBe(0);
});
it('should clear timer on unmount', () => {
const { unmount } = renderHook(() => useTimer(true, 0));
const clearIntervalSpy = vi.spyOn(global, 'clearInterval');
unmount();
expect(clearIntervalSpy).toHaveBeenCalledOnce();
});
it('should preserve elapsedTime when isActive becomes false, and reset to 0 when it becomes active again', () => {
const { result, rerender } = renderHook(
({ isActive, resetKey }) => useTimer(isActive, resetKey),
{ initialProps: { isActive: true, resetKey: 0 } },
);
act(() => {
vi.advanceTimersByTime(3000); // Advance to 3 seconds
});
expect(result.current).toBe(3);
rerender({ isActive: false, resetKey: 0 });
expect(result.current).toBe(3); // Time should be preserved when timer becomes inactive
// Now make it active again, it should reset to 0
rerender({ isActive: true, resetKey: 0 });
expect(result.current).toBe(0);
act(() => {
vi.advanceTimersByTime(1000);
});
expect(result.current).toBe(1);
});
});

View File

@@ -0,0 +1,65 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { useState, useEffect, useRef } from 'react';
/**
* Custom hook to manage a timer that increments every second.
* @param isActive Whether the timer should be running.
* @param resetKey A key that, when changed, will reset the timer to 0 and restart the interval.
* @returns The elapsed time in seconds.
*/
export const useTimer = (isActive: boolean, resetKey: unknown) => {
const [elapsedTime, setElapsedTime] = useState(0);
const timerRef = useRef<NodeJS.Timeout | null>(null);
const prevResetKeyRef = useRef(resetKey);
const prevIsActiveRef = useRef(isActive);
useEffect(() => {
let shouldResetTime = false;
if (prevResetKeyRef.current !== resetKey) {
shouldResetTime = true;
prevResetKeyRef.current = resetKey;
}
if (prevIsActiveRef.current === false && isActive) {
// Transitioned from inactive to active
shouldResetTime = true;
}
if (shouldResetTime) {
setElapsedTime(0);
}
prevIsActiveRef.current = isActive;
// Manage interval
if (isActive) {
// Clear previous interval unconditionally before starting a new one
// This handles resetKey changes while active, ensuring a fresh interval start.
if (timerRef.current) {
clearInterval(timerRef.current);
}
timerRef.current = setInterval(() => {
setElapsedTime((prev) => prev + 1);
}, 1000);
} else {
if (timerRef.current) {
clearInterval(timerRef.current);
timerRef.current = null;
}
}
return () => {
if (timerRef.current) {
clearInterval(timerRef.current);
timerRef.current = null;
}
};
}, [isActive, resetKey]);
return elapsedTime;
};

File diff suppressed because it is too large Load Diff