Compare commits

...

3 Commits

Author SHA1 Message Date
github-actions[bot]
2f46d2d017 chore(release): v0.0.12 2025-09-19 11:57:53 +00:00
Mingholy
9a56560eb4 fix: arrow keys on windows (#661) 2025-09-19 19:44:57 +08:00
Mingholy
da0863b943 fix: missing tool call chunks for openai logging (#657) 2025-09-19 15:19:30 +08:00
10 changed files with 273 additions and 56 deletions

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.11",
"version": "0.0.12",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.0.11",
"version": "0.0.12",
"workspaces": [
"packages/*"
],
@@ -13454,7 +13454,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.0.11",
"version": "0.0.12",
"dependencies": {
"@google/genai": "1.9.0",
"@iarna/toml": "^2.2.5",
@@ -13662,7 +13662,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.0.11",
"version": "0.0.12",
"dependencies": {
"@google/genai": "1.13.0",
"@lvce-editor/ripgrep": "^1.6.0",
@@ -13788,7 +13788,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.11",
"version": "0.0.12",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -13800,7 +13800,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.0.11",
"version": "0.0.12",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.11",
"version": "0.0.12",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.12"
},
"scripts": {
"start": "node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.11",
"version": "0.0.12",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.12"
},
"dependencies": {
"@google/genai": "1.9.0",

View File

@@ -526,7 +526,7 @@ describe('KeypressContext - Kitty Protocol', () => {
});
await waitFor(() => {
expect(keyHandler).toHaveBeenCalledTimes(2); // 1 paste event + 1 paste event for 'after'
expect(keyHandler).toHaveBeenCalledTimes(6); // 1 paste event + 5 individual chars for 'after'
});
// Should emit paste event first
@@ -538,12 +538,40 @@ describe('KeypressContext - Kitty Protocol', () => {
}),
);
// Then process 'after' as a paste event (since it's > 2 chars)
// Then process 'after' as individual characters (since it doesn't contain return)
expect(keyHandler).toHaveBeenNthCalledWith(
2,
expect.objectContaining({
paste: true,
sequence: 'after',
name: 'a',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
3,
expect.objectContaining({
name: 'f',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
4,
expect.objectContaining({
name: 't',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
5,
expect.objectContaining({
name: 'e',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
6,
expect.objectContaining({
name: 'r',
paste: false,
}),
);
});
@@ -571,7 +599,7 @@ describe('KeypressContext - Kitty Protocol', () => {
});
await waitFor(() => {
expect(keyHandler).toHaveBeenCalledTimes(14); // Adjusted based on actual behavior
expect(keyHandler).toHaveBeenCalledTimes(16); // 5 + 1 + 6 + 1 + 3 = 16 calls
});
// Check the sequence: 'start' (5 chars) + paste1 + 'middle' (6 chars) + paste2 + 'end' (3 chars as paste)
@@ -643,13 +671,18 @@ describe('KeypressContext - Kitty Protocol', () => {
}),
);
// 'end' as paste event (since it's > 2 chars)
// 'end' as individual characters (since it doesn't contain return)
expect(keyHandler).toHaveBeenNthCalledWith(
callIndex++,
expect.objectContaining({
paste: true,
sequence: 'end',
}),
expect.objectContaining({ name: 'e' }),
);
expect(keyHandler).toHaveBeenNthCalledWith(
callIndex++,
expect.objectContaining({ name: 'n' }),
);
expect(keyHandler).toHaveBeenNthCalledWith(
callIndex++,
expect.objectContaining({ name: 'd' }),
);
});
@@ -738,16 +771,18 @@ describe('KeypressContext - Kitty Protocol', () => {
});
await waitFor(() => {
// With the current implementation, fragmented data gets processed differently
// The first fragment '\x1b[20' gets processed as individual characters
// The second fragment '0~content\x1b[2' gets processed as paste + individual chars
// The third fragment '01~' gets processed as individual characters
expect(keyHandler).toHaveBeenCalled();
// With the current implementation, fragmented paste markers get reconstructed
// into a single paste event for 'content'
expect(keyHandler).toHaveBeenCalledTimes(1);
});
// The current implementation processes fragmented paste markers as separate events
// rather than reconstructing them into a single paste event
expect(keyHandler.mock.calls.length).toBeGreaterThan(1);
// Should reconstruct the fragmented paste markers into a single paste event
expect(keyHandler).toHaveBeenCalledWith(
expect.objectContaining({
paste: true,
sequence: 'content',
}),
);
});
});
@@ -851,19 +886,38 @@ describe('KeypressContext - Kitty Protocol', () => {
stdin.emit('data', Buffer.from('lo'));
});
// With the current implementation, data is processed as it arrives
// First chunk 'hel' is treated as paste (multi-character)
// With the current implementation, data is processed as individual characters
// since 'hel' doesn't contain return (0x0d)
expect(keyHandler).toHaveBeenNthCalledWith(
1,
expect.objectContaining({
paste: true,
sequence: 'hel',
name: 'h',
sequence: 'h',
paste: false,
}),
);
// Second chunk 'lo' is processed as individual characters
expect(keyHandler).toHaveBeenNthCalledWith(
2,
expect.objectContaining({
name: 'e',
sequence: 'e',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
3,
expect.objectContaining({
name: 'l',
sequence: 'l',
paste: false,
}),
);
// Second chunk 'lo' is also processed as individual characters
expect(keyHandler).toHaveBeenNthCalledWith(
4,
expect.objectContaining({
name: 'l',
sequence: 'l',
@@ -872,7 +926,7 @@ describe('KeypressContext - Kitty Protocol', () => {
);
expect(keyHandler).toHaveBeenNthCalledWith(
3,
5,
expect.objectContaining({
name: 'o',
sequence: 'o',
@@ -880,7 +934,7 @@ describe('KeypressContext - Kitty Protocol', () => {
}),
);
expect(keyHandler).toHaveBeenCalledTimes(3);
expect(keyHandler).toHaveBeenCalledTimes(5);
} finally {
vi.useRealTimers();
}
@@ -907,14 +961,20 @@ describe('KeypressContext - Kitty Protocol', () => {
});
// Should flush immediately without waiting for timeout
// Large data gets treated as paste event
expect(keyHandler).toHaveBeenCalledTimes(1);
expect(keyHandler).toHaveBeenCalledWith(
expect.objectContaining({
paste: true,
sequence: largeData,
}),
);
// Large data without return gets treated as individual characters
expect(keyHandler).toHaveBeenCalledTimes(65);
// Each character should be processed individually
for (let i = 0; i < 65; i++) {
expect(keyHandler).toHaveBeenNthCalledWith(
i + 1,
expect.objectContaining({
name: 'x',
sequence: 'x',
paste: false,
}),
);
}
// Advancing timer should not cause additional calls
const callCountBefore = keyHandler.mock.calls.length;

View File

@@ -407,7 +407,11 @@ export function KeypressProvider({
return;
}
if (rawDataBuffer.length <= 2 || isPaste) {
if (
(rawDataBuffer.length <= 2 && rawDataBuffer.includes(0x0d)) ||
!rawDataBuffer.includes(0x0d) ||
isPaste
) {
keypressStream.write(rawDataBuffer);
} else {
// Flush raw data buffer as a paste event

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.0.11",
"version": "0.0.12",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -1105,5 +1105,164 @@ describe('ContentGenerationPipeline', () => {
expect.any(Array),
);
});
it('should collect all OpenAI chunks for logging even when Gemini responses are filtered', async () => {
// Create chunks that would produce empty Gemini responses (partial tool calls)
const partialToolCallChunk1: OpenAI.Chat.ChatCompletionChunk = {
id: 'chunk-1',
object: 'chat.completion.chunk',
created: Date.now(),
model: 'test-model',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
index: 0,
id: 'call_123',
type: 'function',
function: { name: 'test_function', arguments: '{"par' },
},
],
},
finish_reason: null,
},
],
};
const partialToolCallChunk2: OpenAI.Chat.ChatCompletionChunk = {
id: 'chunk-2',
object: 'chat.completion.chunk',
created: Date.now(),
model: 'test-model',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
index: 0,
function: { arguments: 'am": "value"}' },
},
],
},
finish_reason: null,
},
],
};
const finishChunk: OpenAI.Chat.ChatCompletionChunk = {
id: 'chunk-3',
object: 'chat.completion.chunk',
created: Date.now(),
model: 'test-model',
choices: [
{
index: 0,
delta: {},
finish_reason: 'tool_calls',
},
],
};
// Mock empty Gemini responses for partial chunks (they get filtered)
const emptyGeminiResponse1 = new GenerateContentResponse();
emptyGeminiResponse1.candidates = [
{
content: { parts: [], role: 'model' },
index: 0,
safetyRatings: [],
},
];
const emptyGeminiResponse2 = new GenerateContentResponse();
emptyGeminiResponse2.candidates = [
{
content: { parts: [], role: 'model' },
index: 0,
safetyRatings: [],
},
];
// Mock final Gemini response with tool call
const finalGeminiResponse = new GenerateContentResponse();
finalGeminiResponse.candidates = [
{
content: {
parts: [
{
functionCall: {
id: 'call_123',
name: 'test_function',
args: { param: 'value' },
},
},
],
role: 'model',
},
finishReason: FinishReason.STOP,
index: 0,
safetyRatings: [],
},
];
// Setup converter mocks
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([
{ role: 'user', content: 'test' },
]);
(mockConverter.convertOpenAIChunkToGemini as Mock)
.mockReturnValueOnce(emptyGeminiResponse1) // First partial chunk -> empty response
.mockReturnValueOnce(emptyGeminiResponse2) // Second partial chunk -> empty response
.mockReturnValueOnce(finalGeminiResponse); // Finish chunk -> complete response
// Mock stream
const mockStream = {
async *[Symbol.asyncIterator]() {
yield partialToolCallChunk1;
yield partialToolCallChunk2;
yield finishChunk;
},
};
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockStream,
);
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ role: 'user', parts: [{ text: 'test' }] }],
};
// Collect responses
const responses: GenerateContentResponse[] = [];
const resultGenerator = await pipeline.executeStream(
request,
'test-prompt-id',
);
for await (const response of resultGenerator) {
responses.push(response);
}
// Should only yield the final response (empty ones are filtered)
expect(responses).toHaveLength(1);
expect(responses[0]).toBe(finalGeminiResponse);
// Verify telemetry was called with ALL OpenAI chunks, including the filtered ones
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
model: 'test-model',
duration: expect.any(Number),
userPromptId: 'test-prompt-id',
authType: 'openai',
}),
[finalGeminiResponse], // Only the non-empty Gemini response
expect.objectContaining({
model: 'test-model',
messages: [{ role: 'user', content: 'test' }],
}),
[partialToolCallChunk1, partialToolCallChunk2, finishChunk], // ALL OpenAI chunks
);
});
});
});

View File

@@ -118,6 +118,9 @@ export class ContentGenerationPipeline {
try {
// Stage 2a: Convert and yield each chunk while preserving original
for await (const chunk of stream) {
// Always collect OpenAI chunks for logging, regardless of Gemini conversion result
collectedOpenAIChunks.push(chunk);
const response = this.converter.convertOpenAIChunkToGemini(chunk);
// Stage 2b: Filter empty responses to avoid downstream issues
@@ -132,9 +135,7 @@ export class ContentGenerationPipeline {
// Stage 2c: Handle chunk merging for providers that send finishReason and usageMetadata separately
const shouldYield = this.handleChunkMerging(
response,
chunk,
collectedGeminiResponses,
collectedOpenAIChunks,
(mergedResponse) => {
pendingFinishResponse = mergedResponse;
},
@@ -182,17 +183,13 @@ export class ContentGenerationPipeline {
* finishReason and the most up-to-date usage information from any provider pattern.
*
* @param response Current Gemini response
* @param chunk Current OpenAI chunk
* @param collectedGeminiResponses Array to collect responses for logging
* @param collectedOpenAIChunks Array to collect chunks for logging
* @param setPendingFinish Callback to set pending finish response
* @returns true if the response should be yielded, false if it should be held for merging
*/
private handleChunkMerging(
response: GenerateContentResponse,
chunk: OpenAI.Chat.ChatCompletionChunk,
collectedGeminiResponses: GenerateContentResponse[],
collectedOpenAIChunks: OpenAI.Chat.ChatCompletionChunk[],
setPendingFinish: (response: GenerateContentResponse) => void,
): boolean {
const isFinishChunk = response.candidates?.[0]?.finishReason;
@@ -206,7 +203,6 @@ export class ContentGenerationPipeline {
if (isFinishChunk) {
// This is a finish reason chunk
collectedGeminiResponses.push(response);
collectedOpenAIChunks.push(chunk);
setPendingFinish(response);
return false; // Don't yield yet, wait for potential subsequent chunks to merge
} else if (hasPendingFinish) {
@@ -228,7 +224,6 @@ export class ContentGenerationPipeline {
// Update the collected responses with the merged response
collectedGeminiResponses[collectedGeminiResponses.length - 1] =
mergedResponse;
collectedOpenAIChunks.push(chunk);
setPendingFinish(mergedResponse);
return true; // Yield the merged response
@@ -236,7 +231,6 @@ export class ContentGenerationPipeline {
// Normal chunk - collect and yield
collectedGeminiResponses.push(response);
collectedOpenAIChunks.push(chunk);
return true;
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.11",
"version": "0.0.12",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.0.11",
"version": "0.0.12",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {