Compare commits

..

1 Commits

Author SHA1 Message Date
github-actions[bot]
b2316e3eb0 chore(release): v0.0.15-nightly.2 2025-10-05 00:10:53 +00:00
14 changed files with 40 additions and 124 deletions

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"workspaces": [
"packages/*"
],
@@ -13454,7 +13454,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"dependencies": {
"@google/genai": "1.9.0",
"@iarna/toml": "^2.2.5",
@@ -13662,7 +13662,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"dependencies": {
"@google/genai": "1.13.0",
"@lvce-editor/ripgrep": "^1.6.0",
@@ -13788,7 +13788,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -13800,7 +13800,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.15-nightly.2"
},
"scripts": {
"start": "node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.15-nightly.2"
},
"dependencies": {
"@google/genai": "1.9.0",

View File

@@ -913,21 +913,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
return;
}
/**
* For AuthDialog it is required to complete the authentication process,
* otherwise user cannot proceed to the next step.
* So a quit on AuthDialog should go with normal two press quit
* and without quit-confirm dialog.
*/
if (isAuthDialogOpen) {
setPressedOnce(true);
timerRef.current = setTimeout(() => {
setPressedOnce(false);
}, 500);
return;
}
//1. Close other dialogs (highest priority)
// 1. Close other dialogs (highest priority)
if (closeAnyOpenDialog()) {
return; // Dialog closed, end processing
}
@@ -948,7 +934,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
handleSlashCommand('/quit-confirm');
},
[
isAuthDialogOpen,
handleSlashCommand,
quitConfirmationRequest,
closeAnyOpenDialog,

View File

@@ -81,7 +81,7 @@ describe('QwenOAuthProgress', () => {
const output = lastFrame();
expect(output).toContain('MockSpinner(dots)');
expect(output).toContain('Waiting for Qwen OAuth authentication...');
expect(output).toContain('(Press ESC or CTRL+C to cancel)');
expect(output).toContain('(Press ESC to cancel)');
});
it('should render loading state with gray border', () => {
@@ -105,7 +105,7 @@ describe('QwenOAuthProgress', () => {
expect(output).toContain('MockSpinner(dots)');
expect(output).toContain('Waiting for authorization');
expect(output).toContain('Time remaining: 5:00');
expect(output).toContain('(Press ESC or CTRL+C to cancel)');
expect(output).toContain('(Press ESC to cancel)');
});
it('should display correct URL in Static component when QR code is generated', async () => {

View File

@@ -110,7 +110,7 @@ function StatusDisplay({
<Text color={Colors.Gray}>
Time remaining: {formatTime(timeRemaining)}
</Text>
<Text color={Colors.AccentPurple}>(Press ESC or CTRL+C to cancel)</Text>
<Text color={Colors.AccentPurple}>(Press ESC to cancel)</Text>
</Box>
</Box>
);
@@ -132,7 +132,7 @@ export function QwenOAuthProgress({
if (authStatus === 'timeout') {
// Any key press in timeout state should trigger cancel to return to auth dialog
onCancel();
} else if (key.escape || (key.ctrl && input === 'c')) {
} else if (key.escape) {
onCancel();
}
});
@@ -250,9 +250,7 @@ export function QwenOAuthProgress({
Time remaining: {Math.floor(timeRemaining / 60)}:
{(timeRemaining % 60).toString().padStart(2, '0')}
</Text>
<Text color={Colors.AccentPurple}>
(Press ESC or CTRL+C to cancel)
</Text>
<Text color={Colors.AccentPurple}>(Press ESC to cancel)</Text>
</Box>
</Box>
);

View File

@@ -61,6 +61,16 @@ export function useDialogClose(options: DialogCloseOptions) {
return true;
}
if (options.isAuthDialogOpen) {
// Mimic ESC behavior: only close if already authenticated (same as AuthDialog ESC logic)
if (options.selectedAuthType !== undefined) {
// Note: We don't await this since we want non-blocking behavior like ESC
void options.handleAuthSelect(undefined, SettingScope.User);
}
// Note: AuthDialog prevents ESC exit if not authenticated, we follow same logic
return true;
}
if (options.isEditorDialogOpen) {
// Mimic ESC behavior: call onExit() directly
options.exitEditorDialog();

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -434,6 +434,8 @@ describe('Gemini Client (client.ts)', () => {
config: {
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0,
topP: 1,
tools: [
{
functionDeclarations: [
@@ -484,6 +486,7 @@ describe('Gemini Client (client.ts)', () => {
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.9,
topP: 1, // from default
topK: 20,
tools: [
{
@@ -2458,6 +2461,7 @@ ${JSON.stringify(
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.5,
topP: 1,
},
contents,
},

View File

@@ -115,7 +115,10 @@ export class GeminiClient {
private chat?: GeminiChat;
private contentGenerator?: ContentGenerator;
private readonly embeddingModel: string;
private readonly generateContentConfig: GenerateContentConfig = {};
private readonly generateContentConfig: GenerateContentConfig = {
temperature: 0,
topP: 1,
};
private sessionTurnCount = 0;
private readonly loopDetector: LoopDetectionService;

View File

@@ -161,9 +161,6 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9,
max_tokens: 1000,
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
mockOpenAIResponse,
@@ -241,9 +238,6 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
tools: mockTools,
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -280,30 +274,6 @@ describe('ContentGenerationPipeline', () => {
request,
);
});
it('should pass abort signal to OpenAI client when provided', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue({
choices: [{ message: { content: 'response' } }],
});
await pipeline.execute(request, 'test-id');
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
});
describe('executeStream', () => {
@@ -368,9 +338,6 @@ describe('ContentGenerationPipeline', () => {
stream: true,
stream_options: { include_usage: true },
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
@@ -503,42 +470,6 @@ describe('ContentGenerationPipeline', () => {
);
});
it('should pass abort signal to OpenAI client for streaming requests', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: 'chunk-1',
choices: [{ delta: { content: 'Hello' }, finish_reason: 'stop' }],
};
},
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIChunkToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockStream,
);
const resultGenerator = await pipeline.executeStream(request, 'test-id');
for await (const _result of resultGenerator) {
// Consume stream
}
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
it('should merge finishReason and usageMetadata from separate chunks', async () => {
// Arrange
const request: GenerateContentParameters = {
@@ -993,9 +924,6 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -1032,9 +960,6 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // From config
max_tokens: 1000, // From config
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -1084,9 +1009,6 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
metadata: { promptId: userPromptId },
}),
expect.objectContaining({
signal: undefined,
}),
);
});
});

View File

@@ -48,9 +48,6 @@ export class ContentGenerationPipeline {
async (openaiRequest, context) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as OpenAI.Chat.ChatCompletion;
const geminiResponse =
@@ -81,9 +78,6 @@ export class ContentGenerationPipeline {
// Stage 1: Create OpenAI stream
const stream = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
// Stage 2: Process stream with conversion and logging
@@ -308,9 +302,9 @@ export class ContentGenerationPipeline {
};
const params = {
// Parameters with request fallback but no defaults
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
...addParameterIfDefined('top_p', 'top_p', 'topP'),
// Parameters with request fallback and defaults
temperature: getParameterValue('temperature', 'temperature', 0.0),
top_p: getParameterValue('top_p', 'topP', 1.0),
// Max tokens (special case: different property names)
...addParameterIfDefined('max_tokens', 'max_tokens', 'maxOutputTokens'),

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.0.14",
"version": "0.0.15-nightly.2",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {