mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
1006 lines
29 KiB
TypeScript
1006 lines
29 KiB
TypeScript
/**
|
|
* @license
|
|
* Copyright 2025 Google LLC
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
import type {
|
|
Config,
|
|
ToolRegistry,
|
|
ServerGeminiStreamEvent,
|
|
SessionMetrics,
|
|
} from '@qwen-code/qwen-code-core';
|
|
import {
|
|
executeToolCall,
|
|
ToolErrorType,
|
|
shutdownTelemetry,
|
|
GeminiEventType,
|
|
OutputFormat,
|
|
uiTelemetryService,
|
|
FatalInputError,
|
|
} from '@qwen-code/qwen-code-core';
|
|
import type { Part } from '@google/genai';
|
|
import { runNonInteractive } from './nonInteractiveCli.js';
|
|
import { vi } from 'vitest';
|
|
import type { LoadedSettings } from './config/settings.js';
|
|
|
|
// Mock core modules
|
|
vi.mock('./ui/hooks/atCommandProcessor.js');
|
|
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
|
const original =
|
|
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
|
|
|
|
class MockChatRecordingService {
|
|
initialize = vi.fn();
|
|
recordMessage = vi.fn();
|
|
recordMessageTokens = vi.fn();
|
|
recordToolCalls = vi.fn();
|
|
}
|
|
|
|
return {
|
|
...original,
|
|
executeToolCall: vi.fn(),
|
|
shutdownTelemetry: vi.fn(),
|
|
isTelemetrySdkInitialized: vi.fn().mockReturnValue(true),
|
|
ChatRecordingService: MockChatRecordingService,
|
|
uiTelemetryService: {
|
|
getMetrics: vi.fn(),
|
|
},
|
|
};
|
|
});
|
|
|
|
const mockGetCommands = vi.hoisted(() => vi.fn());
|
|
const mockCommandServiceCreate = vi.hoisted(() => vi.fn());
|
|
vi.mock('./services/CommandService.js', () => ({
|
|
CommandService: {
|
|
create: mockCommandServiceCreate,
|
|
},
|
|
}));
|
|
|
|
describe('runNonInteractive', () => {
|
|
let mockConfig: Config;
|
|
let mockSettings: LoadedSettings;
|
|
let mockToolRegistry: ToolRegistry;
|
|
let mockCoreExecuteToolCall: vi.Mock;
|
|
let mockShutdownTelemetry: vi.Mock;
|
|
let consoleErrorSpy: vi.SpyInstance;
|
|
let processStdoutSpy: vi.SpyInstance;
|
|
let mockGeminiClient: {
|
|
sendMessageStream: vi.Mock;
|
|
getChatRecordingService: vi.Mock;
|
|
getChat: vi.Mock;
|
|
};
|
|
let mockGetDebugResponses: vi.Mock;
|
|
|
|
beforeEach(async () => {
|
|
mockCoreExecuteToolCall = vi.mocked(executeToolCall);
|
|
mockShutdownTelemetry = vi.mocked(shutdownTelemetry);
|
|
mockCommandServiceCreate.mockResolvedValue({
|
|
getCommands: mockGetCommands,
|
|
});
|
|
|
|
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
processStdoutSpy = vi
|
|
.spyOn(process.stdout, 'write')
|
|
.mockImplementation(() => true);
|
|
vi.spyOn(process, 'exit').mockImplementation((code) => {
|
|
throw new Error(`process.exit(${code}) called`);
|
|
});
|
|
|
|
mockToolRegistry = {
|
|
getTool: vi.fn(),
|
|
getFunctionDeclarations: vi.fn().mockReturnValue([]),
|
|
} as unknown as ToolRegistry;
|
|
|
|
mockGetDebugResponses = vi.fn(() => []);
|
|
|
|
mockGeminiClient = {
|
|
sendMessageStream: vi.fn(),
|
|
getChatRecordingService: vi.fn(() => ({
|
|
initialize: vi.fn(),
|
|
recordMessage: vi.fn(),
|
|
recordMessageTokens: vi.fn(),
|
|
recordToolCalls: vi.fn(),
|
|
})),
|
|
getChat: vi.fn(() => ({
|
|
getDebugResponses: mockGetDebugResponses,
|
|
})),
|
|
};
|
|
|
|
let currentModel = 'test-model';
|
|
|
|
mockConfig = {
|
|
initialize: vi.fn().mockResolvedValue(undefined),
|
|
getGeminiClient: vi.fn().mockReturnValue(mockGeminiClient),
|
|
getToolRegistry: vi.fn().mockReturnValue(mockToolRegistry),
|
|
getMaxSessionTurns: vi.fn().mockReturnValue(10),
|
|
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
|
|
storage: {
|
|
getProjectTempDir: vi.fn().mockReturnValue('/test/project/.gemini/tmp'),
|
|
},
|
|
getIdeMode: vi.fn().mockReturnValue(false),
|
|
getFullContext: vi.fn().mockReturnValue(false),
|
|
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
|
|
getDebugMode: vi.fn().mockReturnValue(false),
|
|
getOutputFormat: vi.fn().mockReturnValue('text'),
|
|
getFolderTrustFeature: vi.fn().mockReturnValue(false),
|
|
getFolderTrust: vi.fn().mockReturnValue(false),
|
|
getIncludePartialMessages: vi.fn().mockReturnValue(false),
|
|
getSessionId: vi.fn().mockReturnValue('test-session-id'),
|
|
getModel: vi.fn(() => currentModel),
|
|
setModel: vi.fn(async (model: string) => {
|
|
currentModel = model;
|
|
}),
|
|
} as unknown as Config;
|
|
|
|
mockSettings = {
|
|
system: { path: '', settings: {} },
|
|
systemDefaults: { path: '', settings: {} },
|
|
user: { path: '', settings: {} },
|
|
workspace: { path: '', settings: {} },
|
|
errors: [],
|
|
setValue: vi.fn(),
|
|
merged: {
|
|
security: {
|
|
auth: {
|
|
enforcedType: undefined,
|
|
},
|
|
},
|
|
},
|
|
isTrusted: true,
|
|
migratedInMemorScopes: new Set(),
|
|
forScope: vi.fn(),
|
|
computeMergedSettings: vi.fn(),
|
|
} as unknown as LoadedSettings;
|
|
|
|
const { handleAtCommand } = await import(
|
|
'./ui/hooks/atCommandProcessor.js'
|
|
);
|
|
vi.mocked(handleAtCommand).mockImplementation(async ({ query }) => ({
|
|
processedQuery: [{ text: query }],
|
|
shouldProceed: true,
|
|
}));
|
|
});
|
|
|
|
afterEach(() => {
|
|
vi.restoreAllMocks();
|
|
});
|
|
|
|
async function* createStreamFromEvents(
|
|
events: ServerGeminiStreamEvent[],
|
|
): AsyncGenerator<ServerGeminiStreamEvent> {
|
|
for (const event of events) {
|
|
yield event;
|
|
}
|
|
}
|
|
|
|
it('should process input and write text output', async () => {
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Hello' },
|
|
{ type: GeminiEventType.Content, value: ' World' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 10 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Test input',
|
|
'prompt-id-1',
|
|
);
|
|
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
|
|
[{ text: 'Test input' }],
|
|
expect.any(AbortSignal),
|
|
'prompt-id-1',
|
|
);
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('Hello');
|
|
expect(processStdoutSpy).toHaveBeenCalledWith(' World');
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('\n');
|
|
expect(mockShutdownTelemetry).toHaveBeenCalled();
|
|
});
|
|
|
|
it('should handle a single tool call and respond', async () => {
|
|
const toolCallEvent: ServerGeminiStreamEvent = {
|
|
type: GeminiEventType.ToolCallRequest,
|
|
value: {
|
|
callId: 'tool-1',
|
|
name: 'testTool',
|
|
args: { arg1: 'value1' },
|
|
isClientInitiated: false,
|
|
prompt_id: 'prompt-id-2',
|
|
},
|
|
};
|
|
const toolResponse: Part[] = [{ text: 'Tool response' }];
|
|
mockCoreExecuteToolCall.mockResolvedValue({ responseParts: toolResponse });
|
|
|
|
const firstCallEvents: ServerGeminiStreamEvent[] = [toolCallEvent];
|
|
const secondCallEvents: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Final answer' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 10 } },
|
|
},
|
|
];
|
|
|
|
mockGeminiClient.sendMessageStream
|
|
.mockReturnValueOnce(createStreamFromEvents(firstCallEvents))
|
|
.mockReturnValueOnce(createStreamFromEvents(secondCallEvents));
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Use a tool',
|
|
'prompt-id-2',
|
|
);
|
|
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
|
|
expect(mockCoreExecuteToolCall).toHaveBeenCalledWith(
|
|
mockConfig,
|
|
expect.objectContaining({ name: 'testTool' }),
|
|
expect.any(AbortSignal),
|
|
);
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenNthCalledWith(
|
|
2,
|
|
[{ text: 'Tool response' }],
|
|
expect.any(AbortSignal),
|
|
'prompt-id-2',
|
|
);
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('Final answer');
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('\n');
|
|
});
|
|
|
|
it('should handle error during tool execution and should send error back to the model', async () => {
|
|
const toolCallEvent: ServerGeminiStreamEvent = {
|
|
type: GeminiEventType.ToolCallRequest,
|
|
value: {
|
|
callId: 'tool-1',
|
|
name: 'errorTool',
|
|
args: {},
|
|
isClientInitiated: false,
|
|
prompt_id: 'prompt-id-3',
|
|
},
|
|
};
|
|
mockCoreExecuteToolCall.mockResolvedValue({
|
|
error: new Error('Execution failed'),
|
|
errorType: ToolErrorType.EXECUTION_FAILED,
|
|
responseParts: [
|
|
{
|
|
functionResponse: {
|
|
name: 'errorTool',
|
|
response: {
|
|
output: 'Error: Execution failed',
|
|
},
|
|
},
|
|
},
|
|
],
|
|
resultDisplay: 'Execution failed',
|
|
});
|
|
const finalResponse: ServerGeminiStreamEvent[] = [
|
|
{
|
|
type: GeminiEventType.Content,
|
|
value: 'Sorry, let me try again.',
|
|
},
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 10 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream
|
|
.mockReturnValueOnce(createStreamFromEvents([toolCallEvent]))
|
|
.mockReturnValueOnce(createStreamFromEvents(finalResponse));
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Trigger tool error',
|
|
'prompt-id-3',
|
|
);
|
|
|
|
expect(mockCoreExecuteToolCall).toHaveBeenCalled();
|
|
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
|
'Error executing tool errorTool: Execution failed',
|
|
);
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenNthCalledWith(
|
|
2,
|
|
[
|
|
{
|
|
functionResponse: {
|
|
name: 'errorTool',
|
|
response: {
|
|
output: 'Error: Execution failed',
|
|
},
|
|
},
|
|
},
|
|
],
|
|
expect.any(AbortSignal),
|
|
'prompt-id-3',
|
|
);
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('Sorry, let me try again.');
|
|
});
|
|
|
|
it('should exit with error if sendMessageStream throws initially', async () => {
|
|
const apiError = new Error('API connection failed');
|
|
mockGeminiClient.sendMessageStream.mockImplementation(() => {
|
|
throw apiError;
|
|
});
|
|
|
|
await expect(
|
|
runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Initial fail',
|
|
'prompt-id-4',
|
|
),
|
|
).rejects.toThrow(apiError);
|
|
});
|
|
|
|
it('should not exit if a tool is not found, and should send error back to model', async () => {
|
|
const toolCallEvent: ServerGeminiStreamEvent = {
|
|
type: GeminiEventType.ToolCallRequest,
|
|
value: {
|
|
callId: 'tool-1',
|
|
name: 'nonexistentTool',
|
|
args: {},
|
|
isClientInitiated: false,
|
|
prompt_id: 'prompt-id-5',
|
|
},
|
|
};
|
|
mockCoreExecuteToolCall.mockResolvedValue({
|
|
error: new Error('Tool "nonexistentTool" not found in registry.'),
|
|
resultDisplay: 'Tool "nonexistentTool" not found in registry.',
|
|
responseParts: [],
|
|
});
|
|
const finalResponse: ServerGeminiStreamEvent[] = [
|
|
{
|
|
type: GeminiEventType.Content,
|
|
value: "Sorry, I can't find that tool.",
|
|
},
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 10 } },
|
|
},
|
|
];
|
|
|
|
mockGeminiClient.sendMessageStream
|
|
.mockReturnValueOnce(createStreamFromEvents([toolCallEvent]))
|
|
.mockReturnValueOnce(createStreamFromEvents(finalResponse));
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Trigger tool not found',
|
|
'prompt-id-5',
|
|
);
|
|
|
|
expect(mockCoreExecuteToolCall).toHaveBeenCalled();
|
|
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
|
'Error executing tool nonexistentTool: Tool "nonexistentTool" not found in registry.',
|
|
);
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
|
|
expect(processStdoutSpy).toHaveBeenCalledWith(
|
|
"Sorry, I can't find that tool.",
|
|
);
|
|
});
|
|
|
|
it('should exit when max session turns are exceeded', async () => {
|
|
vi.mocked(mockConfig.getMaxSessionTurns).mockReturnValue(0);
|
|
await expect(
|
|
runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Trigger loop',
|
|
'prompt-id-6',
|
|
),
|
|
).rejects.toThrow('process.exit(53) called');
|
|
});
|
|
|
|
it('should preprocess @include commands before sending to the model', async () => {
|
|
// 1. Mock the imported atCommandProcessor
|
|
const { handleAtCommand } = await import(
|
|
'./ui/hooks/atCommandProcessor.js'
|
|
);
|
|
const mockHandleAtCommand = vi.mocked(handleAtCommand);
|
|
|
|
// 2. Define the raw input and the expected processed output
|
|
const rawInput = 'Summarize @file.txt';
|
|
const processedParts: Part[] = [
|
|
{ text: 'Summarize @file.txt' },
|
|
{ text: '\n--- Content from referenced files ---\n' },
|
|
{ text: 'This is the content of the file.' },
|
|
{ text: '\n--- End of content ---' },
|
|
];
|
|
|
|
// 3. Setup the mock to return the processed parts
|
|
mockHandleAtCommand.mockResolvedValue({
|
|
processedQuery: processedParts,
|
|
shouldProceed: true,
|
|
});
|
|
|
|
// Mock a simple stream response from the Gemini client
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Summary complete.' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 10 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
|
|
// 4. Run the non-interactive mode with the raw input
|
|
await runNonInteractive(mockConfig, mockSettings, rawInput, 'prompt-id-7');
|
|
|
|
// 5. Assert that sendMessageStream was called with the PROCESSED parts, not the raw input
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
|
|
processedParts,
|
|
expect.any(AbortSignal),
|
|
'prompt-id-7',
|
|
);
|
|
|
|
// 6. Assert the final output is correct
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('Summary complete.');
|
|
});
|
|
|
|
it('should process input and write JSON output with stats', async () => {
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Hello World' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 10 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
vi.mocked(mockConfig.getOutputFormat).mockReturnValue(OutputFormat.JSON);
|
|
const mockMetrics: SessionMetrics = {
|
|
models: {},
|
|
tools: {
|
|
totalCalls: 0,
|
|
totalSuccess: 0,
|
|
totalFail: 0,
|
|
totalDurationMs: 0,
|
|
totalDecisions: {
|
|
accept: 0,
|
|
reject: 0,
|
|
modify: 0,
|
|
auto_accept: 0,
|
|
},
|
|
byName: {},
|
|
},
|
|
files: {
|
|
totalLinesAdded: 0,
|
|
totalLinesRemoved: 0,
|
|
},
|
|
};
|
|
vi.mocked(uiTelemetryService.getMetrics).mockReturnValue(mockMetrics);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Test input',
|
|
'prompt-id-1',
|
|
);
|
|
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
|
|
[{ text: 'Test input' }],
|
|
expect.any(AbortSignal),
|
|
'prompt-id-1',
|
|
);
|
|
expect(processStdoutSpy).toHaveBeenCalledWith(
|
|
JSON.stringify({ response: 'Hello World', stats: mockMetrics }, null, 2),
|
|
);
|
|
});
|
|
|
|
it('should write JSON output with stats for tool-only commands (no text response)', async () => {
|
|
// Test the scenario where a command completes successfully with only tool calls
|
|
// but no text response - this would have caught the original bug
|
|
const toolCallEvent: ServerGeminiStreamEvent = {
|
|
type: GeminiEventType.ToolCallRequest,
|
|
value: {
|
|
callId: 'tool-1',
|
|
name: 'testTool',
|
|
args: { arg1: 'value1' },
|
|
isClientInitiated: false,
|
|
prompt_id: 'prompt-id-tool-only',
|
|
},
|
|
};
|
|
const toolResponse: Part[] = [{ text: 'Tool executed successfully' }];
|
|
mockCoreExecuteToolCall.mockResolvedValue({ responseParts: toolResponse });
|
|
|
|
// First call returns only tool call, no content
|
|
const firstCallEvents: ServerGeminiStreamEvent[] = [
|
|
toolCallEvent,
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 5 } },
|
|
},
|
|
];
|
|
|
|
// Second call returns no content (tool-only completion)
|
|
const secondCallEvents: ServerGeminiStreamEvent[] = [
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 3 } },
|
|
},
|
|
];
|
|
|
|
mockGeminiClient.sendMessageStream
|
|
.mockReturnValueOnce(createStreamFromEvents(firstCallEvents))
|
|
.mockReturnValueOnce(createStreamFromEvents(secondCallEvents));
|
|
|
|
vi.mocked(mockConfig.getOutputFormat).mockReturnValue(OutputFormat.JSON);
|
|
const mockMetrics: SessionMetrics = {
|
|
models: {},
|
|
tools: {
|
|
totalCalls: 1,
|
|
totalSuccess: 1,
|
|
totalFail: 0,
|
|
totalDurationMs: 100,
|
|
totalDecisions: {
|
|
accept: 1,
|
|
reject: 0,
|
|
modify: 0,
|
|
auto_accept: 0,
|
|
},
|
|
byName: {
|
|
testTool: {
|
|
count: 1,
|
|
success: 1,
|
|
fail: 0,
|
|
durationMs: 100,
|
|
decisions: {
|
|
accept: 1,
|
|
reject: 0,
|
|
modify: 0,
|
|
auto_accept: 0,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
files: {
|
|
totalLinesAdded: 0,
|
|
totalLinesRemoved: 0,
|
|
},
|
|
};
|
|
vi.mocked(uiTelemetryService.getMetrics).mockReturnValue(mockMetrics);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Execute tool only',
|
|
'prompt-id-tool-only',
|
|
);
|
|
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
|
|
expect(mockCoreExecuteToolCall).toHaveBeenCalledWith(
|
|
mockConfig,
|
|
expect.objectContaining({ name: 'testTool' }),
|
|
expect.any(AbortSignal),
|
|
);
|
|
|
|
// This should output JSON with empty response but include stats
|
|
expect(processStdoutSpy).toHaveBeenCalledWith(
|
|
JSON.stringify({ response: '', stats: mockMetrics }, null, 2),
|
|
);
|
|
});
|
|
|
|
it('should write JSON output with stats for empty response commands', async () => {
|
|
// Test the scenario where a command completes but produces no content at all
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 1 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
vi.mocked(mockConfig.getOutputFormat).mockReturnValue(OutputFormat.JSON);
|
|
const mockMetrics: SessionMetrics = {
|
|
models: {},
|
|
tools: {
|
|
totalCalls: 0,
|
|
totalSuccess: 0,
|
|
totalFail: 0,
|
|
totalDurationMs: 0,
|
|
totalDecisions: {
|
|
accept: 0,
|
|
reject: 0,
|
|
modify: 0,
|
|
auto_accept: 0,
|
|
},
|
|
byName: {},
|
|
},
|
|
files: {
|
|
totalLinesAdded: 0,
|
|
totalLinesRemoved: 0,
|
|
},
|
|
};
|
|
vi.mocked(uiTelemetryService.getMetrics).mockReturnValue(mockMetrics);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Empty response test',
|
|
'prompt-id-empty',
|
|
);
|
|
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
|
|
[{ text: 'Empty response test' }],
|
|
expect.any(AbortSignal),
|
|
'prompt-id-empty',
|
|
);
|
|
|
|
// This should output JSON with empty response but include stats
|
|
expect(processStdoutSpy).toHaveBeenCalledWith(
|
|
JSON.stringify({ response: '', stats: mockMetrics }, null, 2),
|
|
);
|
|
});
|
|
|
|
it('should handle errors in JSON format', async () => {
|
|
vi.mocked(mockConfig.getOutputFormat).mockReturnValue(OutputFormat.JSON);
|
|
const testError = new Error('Invalid input provided');
|
|
|
|
mockGeminiClient.sendMessageStream.mockImplementation(() => {
|
|
throw testError;
|
|
});
|
|
|
|
// Mock console.error to capture JSON error output
|
|
const consoleErrorJsonSpy = vi
|
|
.spyOn(console, 'error')
|
|
.mockImplementation(() => {});
|
|
|
|
let thrownError: Error | null = null;
|
|
try {
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Test input',
|
|
'prompt-id-error',
|
|
);
|
|
// Should not reach here
|
|
expect.fail('Expected process.exit to be called');
|
|
} catch (error) {
|
|
thrownError = error as Error;
|
|
}
|
|
|
|
// Should throw because of mocked process.exit
|
|
expect(thrownError?.message).toBe('process.exit(1) called');
|
|
|
|
expect(consoleErrorJsonSpy).toHaveBeenCalledWith(
|
|
JSON.stringify(
|
|
{
|
|
error: {
|
|
type: 'Error',
|
|
message: 'Invalid input provided',
|
|
code: 1,
|
|
},
|
|
},
|
|
null,
|
|
2,
|
|
),
|
|
);
|
|
});
|
|
|
|
it('should handle FatalInputError with custom exit code in JSON format', async () => {
|
|
vi.mocked(mockConfig.getOutputFormat).mockReturnValue(OutputFormat.JSON);
|
|
const fatalError = new FatalInputError('Invalid command syntax provided');
|
|
|
|
mockGeminiClient.sendMessageStream.mockImplementation(() => {
|
|
throw fatalError;
|
|
});
|
|
|
|
// Mock console.error to capture JSON error output
|
|
const consoleErrorJsonSpy = vi
|
|
.spyOn(console, 'error')
|
|
.mockImplementation(() => {});
|
|
|
|
let thrownError: Error | null = null;
|
|
try {
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Invalid syntax',
|
|
'prompt-id-fatal',
|
|
);
|
|
// Should not reach here
|
|
expect.fail('Expected process.exit to be called');
|
|
} catch (error) {
|
|
thrownError = error as Error;
|
|
}
|
|
|
|
// Should throw because of mocked process.exit with custom exit code
|
|
expect(thrownError?.message).toBe('process.exit(42) called');
|
|
|
|
expect(consoleErrorJsonSpy).toHaveBeenCalledWith(
|
|
JSON.stringify(
|
|
{
|
|
error: {
|
|
type: 'FatalInputError',
|
|
message: 'Invalid command syntax provided',
|
|
code: 42,
|
|
},
|
|
},
|
|
null,
|
|
2,
|
|
),
|
|
);
|
|
});
|
|
|
|
it('should execute a slash command that returns a prompt', async () => {
|
|
const mockCommand = {
|
|
name: 'testcommand',
|
|
description: 'a test command',
|
|
action: vi.fn().mockResolvedValue({
|
|
type: 'submit_prompt',
|
|
content: [{ text: 'Prompt from command' }],
|
|
}),
|
|
};
|
|
mockGetCommands.mockReturnValue([mockCommand]);
|
|
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Response from command' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 5 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'/testcommand',
|
|
'prompt-id-slash',
|
|
);
|
|
|
|
// Ensure the prompt sent to the model is from the command, not the raw input
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
|
|
[{ text: 'Prompt from command' }],
|
|
expect.any(AbortSignal),
|
|
'prompt-id-slash',
|
|
);
|
|
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('Response from command');
|
|
});
|
|
|
|
it('should throw FatalInputError if a command requires confirmation', async () => {
|
|
const mockCommand = {
|
|
name: 'confirm',
|
|
description: 'a command that needs confirmation',
|
|
action: vi.fn().mockResolvedValue({
|
|
type: 'confirm_shell_commands',
|
|
commands: ['rm -rf /'],
|
|
}),
|
|
};
|
|
mockGetCommands.mockReturnValue([mockCommand]);
|
|
|
|
await expect(
|
|
runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'/confirm',
|
|
'prompt-id-confirm',
|
|
),
|
|
).rejects.toThrow(
|
|
'Exiting due to a confirmation prompt requested by the command.',
|
|
);
|
|
});
|
|
|
|
it('should treat an unknown slash command as a regular prompt', async () => {
|
|
// No commands are mocked, so any slash command is "unknown"
|
|
mockGetCommands.mockReturnValue([]);
|
|
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Response to unknown' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 5 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'/unknowncommand',
|
|
'prompt-id-unknown',
|
|
);
|
|
|
|
// Ensure the raw input is sent to the model
|
|
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
|
|
[{ text: '/unknowncommand' }],
|
|
expect.any(AbortSignal),
|
|
'prompt-id-unknown',
|
|
);
|
|
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('Response to unknown');
|
|
});
|
|
|
|
it('should throw for unhandled command result types', async () => {
|
|
const mockCommand = {
|
|
name: 'noaction',
|
|
description: 'unhandled type',
|
|
action: vi.fn().mockResolvedValue({
|
|
type: 'unhandled',
|
|
}),
|
|
};
|
|
mockGetCommands.mockReturnValue([mockCommand]);
|
|
|
|
await expect(
|
|
runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'/noaction',
|
|
'prompt-id-unhandled',
|
|
),
|
|
).rejects.toThrow(
|
|
'Exiting due to command result that is not supported in non-interactive mode.',
|
|
);
|
|
});
|
|
|
|
it('should pass arguments to the slash command action', async () => {
|
|
const mockAction = vi.fn().mockResolvedValue({
|
|
type: 'submit_prompt',
|
|
content: [{ text: 'Prompt from command' }],
|
|
});
|
|
const mockCommand = {
|
|
name: 'testargs',
|
|
description: 'a test command',
|
|
action: mockAction,
|
|
};
|
|
mockGetCommands.mockReturnValue([mockCommand]);
|
|
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Acknowledged' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 1 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'/testargs arg1 arg2',
|
|
'prompt-id-args',
|
|
);
|
|
|
|
expect(mockAction).toHaveBeenCalledWith(expect.any(Object), 'arg1 arg2');
|
|
|
|
expect(processStdoutSpy).toHaveBeenCalledWith('Acknowledged');
|
|
});
|
|
|
|
it('should emit stream-json envelopes when output format is stream-json', async () => {
|
|
(mockConfig.getOutputFormat as vi.Mock).mockReturnValue('stream-json');
|
|
(mockConfig.getIncludePartialMessages as vi.Mock).mockReturnValue(false);
|
|
|
|
const writes: string[] = [];
|
|
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
|
|
if (typeof chunk === 'string') {
|
|
writes.push(chunk);
|
|
} else {
|
|
writes.push(Buffer.from(chunk).toString('utf8'));
|
|
}
|
|
return true;
|
|
});
|
|
|
|
const events: ServerGeminiStreamEvent[] = [
|
|
{ type: GeminiEventType.Content, value: 'Hello stream' },
|
|
{
|
|
type: GeminiEventType.Finished,
|
|
value: { reason: undefined, usageMetadata: { totalTokenCount: 4 } },
|
|
},
|
|
];
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents(events),
|
|
);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'Stream input',
|
|
'prompt-stream',
|
|
);
|
|
|
|
const envelopes = writes
|
|
.join('')
|
|
.split('\n')
|
|
.filter((line) => line.trim().length > 0)
|
|
.map((line) => JSON.parse(line));
|
|
|
|
expect(envelopes[0]).toMatchObject({
|
|
type: 'user',
|
|
message: { content: 'Stream input' },
|
|
});
|
|
const assistantEnvelope = envelopes.find((env) => env.type === 'assistant');
|
|
expect(assistantEnvelope).toBeTruthy();
|
|
expect(assistantEnvelope?.message?.content?.[0]).toMatchObject({
|
|
type: 'text',
|
|
text: 'Hello stream',
|
|
});
|
|
const resultEnvelope = envelopes.at(-1);
|
|
expect(resultEnvelope).toMatchObject({
|
|
type: 'result',
|
|
is_error: false,
|
|
num_turns: 1,
|
|
});
|
|
});
|
|
|
|
it('should include usage metadata and API duration in stream-json result', async () => {
|
|
(mockConfig.getOutputFormat as vi.Mock).mockReturnValue('stream-json');
|
|
(mockConfig.getIncludePartialMessages as vi.Mock).mockReturnValue(false);
|
|
|
|
const writes: string[] = [];
|
|
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
|
|
if (typeof chunk === 'string') {
|
|
writes.push(chunk);
|
|
} else {
|
|
writes.push(Buffer.from(chunk).toString('utf8'));
|
|
}
|
|
return true;
|
|
});
|
|
|
|
const usageMetadata = {
|
|
promptTokenCount: 11,
|
|
candidatesTokenCount: 5,
|
|
totalTokenCount: 16,
|
|
cachedContentTokenCount: 3,
|
|
};
|
|
mockGetDebugResponses.mockReturnValue([{ usageMetadata }]);
|
|
|
|
const nowSpy = vi.spyOn(Date, 'now');
|
|
let current = 0;
|
|
nowSpy.mockImplementation(() => {
|
|
current += 500;
|
|
return current;
|
|
});
|
|
|
|
mockGeminiClient.sendMessageStream.mockReturnValue(
|
|
createStreamFromEvents([
|
|
{ type: GeminiEventType.Content, value: 'All done' },
|
|
]),
|
|
);
|
|
|
|
await runNonInteractive(
|
|
mockConfig,
|
|
mockSettings,
|
|
'usage test',
|
|
'prompt-usage',
|
|
);
|
|
|
|
const envelopes = writes
|
|
.join('')
|
|
.split('\n')
|
|
.filter((line) => line.trim().length > 0)
|
|
.map((line) => JSON.parse(line));
|
|
const resultEnvelope = envelopes.at(-1);
|
|
expect(resultEnvelope?.type).toBe('result');
|
|
expect(resultEnvelope?.duration_api_ms).toBeGreaterThan(0);
|
|
expect(resultEnvelope?.usage).toEqual({
|
|
input_tokens: 11,
|
|
output_tokens: 5,
|
|
total_tokens: 16,
|
|
cache_read_input_tokens: 3,
|
|
});
|
|
|
|
nowSpy.mockRestore();
|
|
});
|
|
});
|