Compare commits

..

4 Commits

Author SHA1 Message Date
mingholy.lmh
dda03aaf53 fix: unable to quit when auth dialog is opened 2025-10-13 14:00:24 +08:00
tanzhenxin
270dda4aa7 fix: invalid tool_calls request due to improper cancellation (#790) 2025-10-13 09:25:31 +08:00
Fan
d4fa15dd53 remove topp default value 0.0 (#785) 2025-10-09 15:41:57 +08:00
tanzhenxin
0922437bd5 chore: pump version to 0.0.14 2025-09-29 14:31:14 +08:00
9 changed files with 119 additions and 27 deletions

View File

@@ -1,5 +1,13 @@
# Changelog
## 0.0.14
- Added plan mode support for task planning
- Fixed unreliable editCorrector that injects extra escape characters
- Fixed task tool dynamic updates
- Added Qwen3-VL-Plus token limits (256K input, 32K output) and highres support
- Enhanced dashScope cache control
## 0.0.13
- Added YOLO mode support for automatic vision model switching with CLI arguments and environment variables.

View File

@@ -913,7 +913,21 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
return;
}
// 1. Close other dialogs (highest priority)
/**
* For AuthDialog it is required to complete the authentication process,
* otherwise user cannot proceed to the next step.
* So a quit on AuthDialog should go with normal two press quit
* and without quit-confirm dialog.
*/
if (isAuthDialogOpen) {
setPressedOnce(true);
timerRef.current = setTimeout(() => {
setPressedOnce(false);
}, 500);
return;
}
//1. Close other dialogs (highest priority)
if (closeAnyOpenDialog()) {
return; // Dialog closed, end processing
}
@@ -934,6 +948,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
handleSlashCommand('/quit-confirm');
},
[
isAuthDialogOpen,
handleSlashCommand,
quitConfirmationRequest,
closeAnyOpenDialog,

View File

@@ -81,7 +81,7 @@ describe('QwenOAuthProgress', () => {
const output = lastFrame();
expect(output).toContain('MockSpinner(dots)');
expect(output).toContain('Waiting for Qwen OAuth authentication...');
expect(output).toContain('(Press ESC to cancel)');
expect(output).toContain('(Press ESC or CTRL+C to cancel)');
});
it('should render loading state with gray border', () => {
@@ -105,7 +105,7 @@ describe('QwenOAuthProgress', () => {
expect(output).toContain('MockSpinner(dots)');
expect(output).toContain('Waiting for authorization');
expect(output).toContain('Time remaining: 5:00');
expect(output).toContain('(Press ESC to cancel)');
expect(output).toContain('(Press ESC or CTRL+C to cancel)');
});
it('should display correct URL in Static component when QR code is generated', async () => {

View File

@@ -110,7 +110,7 @@ function StatusDisplay({
<Text color={Colors.Gray}>
Time remaining: {formatTime(timeRemaining)}
</Text>
<Text color={Colors.AccentPurple}>(Press ESC to cancel)</Text>
<Text color={Colors.AccentPurple}>(Press ESC or CTRL+C to cancel)</Text>
</Box>
</Box>
);
@@ -132,7 +132,7 @@ export function QwenOAuthProgress({
if (authStatus === 'timeout') {
// Any key press in timeout state should trigger cancel to return to auth dialog
onCancel();
} else if (key.escape) {
} else if (key.escape || (key.ctrl && input === 'c')) {
onCancel();
}
});
@@ -250,7 +250,9 @@ export function QwenOAuthProgress({
Time remaining: {Math.floor(timeRemaining / 60)}:
{(timeRemaining % 60).toString().padStart(2, '0')}
</Text>
<Text color={Colors.AccentPurple}>(Press ESC to cancel)</Text>
<Text color={Colors.AccentPurple}>
(Press ESC or CTRL+C to cancel)
</Text>
</Box>
</Box>
);

View File

@@ -61,16 +61,6 @@ export function useDialogClose(options: DialogCloseOptions) {
return true;
}
if (options.isAuthDialogOpen) {
// Mimic ESC behavior: only close if already authenticated (same as AuthDialog ESC logic)
if (options.selectedAuthType !== undefined) {
// Note: We don't await this since we want non-blocking behavior like ESC
void options.handleAuthSelect(undefined, SettingScope.User);
}
// Note: AuthDialog prevents ESC exit if not authenticated, we follow same logic
return true;
}
if (options.isEditorDialogOpen) {
// Mimic ESC behavior: call onExit() directly
options.exitEditorDialog();

View File

@@ -434,8 +434,6 @@ describe('Gemini Client (client.ts)', () => {
config: {
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0,
topP: 1,
tools: [
{
functionDeclarations: [
@@ -486,7 +484,6 @@ describe('Gemini Client (client.ts)', () => {
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.9,
topP: 1, // from default
topK: 20,
tools: [
{
@@ -2461,7 +2458,6 @@ ${JSON.stringify(
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.5,
topP: 1,
},
contents,
},

View File

@@ -115,10 +115,7 @@ export class GeminiClient {
private chat?: GeminiChat;
private contentGenerator?: ContentGenerator;
private readonly embeddingModel: string;
private readonly generateContentConfig: GenerateContentConfig = {
temperature: 0,
topP: 1,
};
private readonly generateContentConfig: GenerateContentConfig = {};
private sessionTurnCount = 0;
private readonly loopDetector: LoopDetectionService;

View File

@@ -161,6 +161,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9,
max_tokens: 1000,
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
mockOpenAIResponse,
@@ -238,6 +241,9 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
tools: mockTools,
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -274,6 +280,30 @@ describe('ContentGenerationPipeline', () => {
request,
);
});
it('should pass abort signal to OpenAI client when provided', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue({
choices: [{ message: { content: 'response' } }],
});
await pipeline.execute(request, 'test-id');
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
});
describe('executeStream', () => {
@@ -338,6 +368,9 @@ describe('ContentGenerationPipeline', () => {
stream: true,
stream_options: { include_usage: true },
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
@@ -470,6 +503,42 @@ describe('ContentGenerationPipeline', () => {
);
});
it('should pass abort signal to OpenAI client for streaming requests', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: 'chunk-1',
choices: [{ delta: { content: 'Hello' }, finish_reason: 'stop' }],
};
},
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIChunkToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockStream,
);
const resultGenerator = await pipeline.executeStream(request, 'test-id');
for await (const _result of resultGenerator) {
// Consume stream
}
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
it('should merge finishReason and usageMetadata from separate chunks', async () => {
// Arrange
const request: GenerateContentParameters = {
@@ -924,6 +993,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -960,6 +1032,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // From config
max_tokens: 1000, // From config
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -1009,6 +1084,9 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
metadata: { promptId: userPromptId },
}),
expect.objectContaining({
signal: undefined,
}),
);
});
});

View File

@@ -48,6 +48,9 @@ export class ContentGenerationPipeline {
async (openaiRequest, context) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as OpenAI.Chat.ChatCompletion;
const geminiResponse =
@@ -78,6 +81,9 @@ export class ContentGenerationPipeline {
// Stage 1: Create OpenAI stream
const stream = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
// Stage 2: Process stream with conversion and logging
@@ -302,9 +308,9 @@ export class ContentGenerationPipeline {
};
const params = {
// Parameters with request fallback and defaults
temperature: getParameterValue('temperature', 'temperature', 0.0),
top_p: getParameterValue('top_p', 'topP', 1.0),
// Parameters with request fallback but no defaults
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
...addParameterIfDefined('top_p', 'top_p', 'topP'),
// Max tokens (special case: different property names)
...addParameterIfDefined('max_tokens', 'max_tokens', 'maxOutputTokens'),