mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-28 04:29:15 +00:00
Compare commits
3 Commits
mingholy/f
...
dev/topp-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77bae3ffc0 | ||
|
|
0922437bd5 | ||
|
|
9a0cb64a34 |
@@ -1,5 +1,13 @@
|
||||
# Changelog
|
||||
|
||||
## 0.0.14
|
||||
|
||||
- Added plan mode support for task planning
|
||||
- Fixed unreliable editCorrector that injects extra escape characters
|
||||
- Fixed task tool dynamic updates
|
||||
- Added Qwen3-VL-Plus token limits (256K input, 32K output) and highres support
|
||||
- Enhanced dashScope cache control
|
||||
|
||||
## 0.0.13
|
||||
|
||||
- Added YOLO mode support for automatic vision model switching with CLI arguments and environment variables.
|
||||
|
||||
12
package-lock.json
generated
12
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -13454,7 +13454,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -13662,7 +13662,7 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.13.0",
|
||||
"@lvce-editor/ripgrep": "^1.6.0",
|
||||
@@ -13788,7 +13788,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
@@ -13800,7 +13800,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.13"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node scripts/start.js",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,7 +25,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.13"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
|
||||
@@ -54,11 +54,7 @@ const MockedGeminiClientClass = vi.hoisted(() =>
|
||||
const MockedUserPromptEvent = vi.hoisted(() =>
|
||||
vi.fn().mockImplementation(() => {}),
|
||||
);
|
||||
const MockedApiCancelEvent = vi.hoisted(() =>
|
||||
vi.fn().mockImplementation(() => {}),
|
||||
);
|
||||
const mockParseAndFormatApiError = vi.hoisted(() => vi.fn());
|
||||
const mockLogApiCancel = vi.hoisted(() => vi.fn());
|
||||
|
||||
// Vision auto-switch mocks (hoisted)
|
||||
const mockHandleVisionSwitch = vi.hoisted(() =>
|
||||
@@ -75,9 +71,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
GitService: vi.fn(),
|
||||
GeminiClient: MockedGeminiClientClass,
|
||||
UserPromptEvent: MockedUserPromptEvent,
|
||||
ApiCancelEvent: MockedApiCancelEvent,
|
||||
parseAndFormatApiError: mockParseAndFormatApiError,
|
||||
logApiCancel: mockLogApiCancel,
|
||||
};
|
||||
});
|
||||
|
||||
|
||||
@@ -31,8 +31,6 @@ import {
|
||||
ConversationFinishedEvent,
|
||||
ApprovalMode,
|
||||
parseAndFormatApiError,
|
||||
logApiCancel,
|
||||
ApiCancelEvent,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { type Part, type PartListUnion, FinishReason } from '@google/genai';
|
||||
import type {
|
||||
@@ -225,16 +223,6 @@ export const useGeminiStream = (
|
||||
turnCancelledRef.current = true;
|
||||
isSubmittingQueryRef.current = false;
|
||||
abortControllerRef.current?.abort();
|
||||
|
||||
// Log API cancellation
|
||||
const prompt_id = config.getSessionId() + '########' + getPromptCount();
|
||||
const cancellationEvent = new ApiCancelEvent(
|
||||
config.getModel(),
|
||||
prompt_id,
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
);
|
||||
logApiCancel(config, cancellationEvent);
|
||||
|
||||
if (pendingHistoryItemRef.current) {
|
||||
addItem(pendingHistoryItemRef.current, Date.now());
|
||||
}
|
||||
@@ -254,8 +242,6 @@ export const useGeminiStream = (
|
||||
setPendingHistoryItem,
|
||||
onCancelSubmit,
|
||||
pendingHistoryItemRef,
|
||||
config,
|
||||
getPromptCount,
|
||||
]);
|
||||
|
||||
useKeypress(
|
||||
@@ -462,7 +448,6 @@ export const useGeminiStream = (
|
||||
if (turnCancelledRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pendingHistoryItemRef.current) {
|
||||
if (pendingHistoryItemRef.current.type === 'tool_group') {
|
||||
const updatedTools = pendingHistoryItemRef.current.tools.map(
|
||||
|
||||
@@ -365,7 +365,6 @@ class Session {
|
||||
function_name: fc.name ?? '',
|
||||
function_args: args,
|
||||
duration_ms: durationMs,
|
||||
status: 'error',
|
||||
success: false,
|
||||
error: error.message,
|
||||
tool_type:
|
||||
@@ -484,7 +483,6 @@ class Session {
|
||||
function_name: fc.name,
|
||||
function_args: args,
|
||||
duration_ms: durationMs,
|
||||
status: 'success',
|
||||
success: true,
|
||||
prompt_id: promptId,
|
||||
tool_type:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -434,8 +434,6 @@ describe('Gemini Client (client.ts)', () => {
|
||||
config: {
|
||||
abortSignal,
|
||||
systemInstruction: getCoreSystemPrompt(''),
|
||||
temperature: 0,
|
||||
topP: 1,
|
||||
tools: [
|
||||
{
|
||||
functionDeclarations: [
|
||||
@@ -486,7 +484,6 @@ describe('Gemini Client (client.ts)', () => {
|
||||
abortSignal,
|
||||
systemInstruction: getCoreSystemPrompt(''),
|
||||
temperature: 0.9,
|
||||
topP: 1, // from default
|
||||
topK: 20,
|
||||
tools: [
|
||||
{
|
||||
@@ -2461,7 +2458,6 @@ ${JSON.stringify(
|
||||
abortSignal,
|
||||
systemInstruction: getCoreSystemPrompt(''),
|
||||
temperature: 0.5,
|
||||
topP: 1,
|
||||
},
|
||||
contents,
|
||||
},
|
||||
|
||||
@@ -115,10 +115,7 @@ export class GeminiClient {
|
||||
private chat?: GeminiChat;
|
||||
private contentGenerator?: ContentGenerator;
|
||||
private readonly embeddingModel: string;
|
||||
private readonly generateContentConfig: GenerateContentConfig = {
|
||||
temperature: 0,
|
||||
topP: 1,
|
||||
};
|
||||
private readonly generateContentConfig: GenerateContentConfig = {};
|
||||
private sessionTurnCount = 0;
|
||||
|
||||
private readonly loopDetector: LoopDetectionService;
|
||||
|
||||
@@ -401,7 +401,7 @@ export class CoreToolScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
const cancelledCall = {
|
||||
return {
|
||||
request: currentCall.request,
|
||||
tool: toolInstance,
|
||||
invocation,
|
||||
@@ -426,8 +426,6 @@ export class CoreToolScheduler {
|
||||
durationMs,
|
||||
outcome,
|
||||
} as CancelledToolCall;
|
||||
|
||||
return cancelledCall;
|
||||
}
|
||||
case 'validating':
|
||||
return {
|
||||
|
||||
@@ -12,6 +12,7 @@ import type { Config } from '../../config/config.js';
|
||||
import { OpenAIContentGenerator } from './openaiContentGenerator.js';
|
||||
import {
|
||||
DashScopeOpenAICompatibleProvider,
|
||||
DeepSeekOpenAICompatibleProvider,
|
||||
OpenRouterOpenAICompatibleProvider,
|
||||
type OpenAICompatibleProvider,
|
||||
DefaultOpenAICompatibleProvider,
|
||||
@@ -23,6 +24,7 @@ export { ContentGenerationPipeline, type PipelineConfig } from './pipeline.js';
|
||||
export {
|
||||
type OpenAICompatibleProvider,
|
||||
DashScopeOpenAICompatibleProvider,
|
||||
DeepSeekOpenAICompatibleProvider,
|
||||
OpenRouterOpenAICompatibleProvider,
|
||||
} from './provider/index.js';
|
||||
|
||||
@@ -61,6 +63,13 @@ export function determineProvider(
|
||||
);
|
||||
}
|
||||
|
||||
if (DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config)) {
|
||||
return new DeepSeekOpenAICompatibleProvider(
|
||||
contentGeneratorConfig,
|
||||
cliConfig,
|
||||
);
|
||||
}
|
||||
|
||||
// Check for OpenRouter provider
|
||||
if (OpenRouterOpenAICompatibleProvider.isOpenRouterProvider(config)) {
|
||||
return new OpenRouterOpenAICompatibleProvider(
|
||||
|
||||
@@ -221,12 +221,6 @@ export class ContentGenerationPipeline {
|
||||
mergedResponse.usageMetadata = lastResponse.usageMetadata;
|
||||
}
|
||||
|
||||
// Copy other essential properties from the current response
|
||||
mergedResponse.responseId = response.responseId;
|
||||
mergedResponse.createTime = response.createTime;
|
||||
mergedResponse.modelVersion = response.modelVersion;
|
||||
mergedResponse.promptFeedback = response.promptFeedback;
|
||||
|
||||
// Update the collected responses with the merged response
|
||||
collectedGeminiResponses[collectedGeminiResponses.length - 1] =
|
||||
mergedResponse;
|
||||
@@ -254,26 +248,23 @@ export class ContentGenerationPipeline {
|
||||
...this.buildSamplingParameters(request),
|
||||
};
|
||||
|
||||
// Let provider enhance the request (e.g., add metadata, cache control)
|
||||
const enhancedRequest = this.config.provider.buildRequest(
|
||||
baseRequest,
|
||||
userPromptId,
|
||||
);
|
||||
// Add streaming options if present
|
||||
if (streaming) {
|
||||
(
|
||||
baseRequest as unknown as OpenAI.Chat.ChatCompletionCreateParamsStreaming
|
||||
).stream = true;
|
||||
baseRequest.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
// Add tools if present
|
||||
if (request.config?.tools) {
|
||||
enhancedRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
|
||||
baseRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
|
||||
request.config.tools,
|
||||
);
|
||||
}
|
||||
|
||||
// Add streaming options if needed
|
||||
if (streaming) {
|
||||
enhancedRequest.stream = true;
|
||||
enhancedRequest.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
return enhancedRequest;
|
||||
// Let provider enhance the request (e.g., add metadata, cache control)
|
||||
return this.config.provider.buildRequest(baseRequest, userPromptId);
|
||||
}
|
||||
|
||||
private buildSamplingParameters(
|
||||
@@ -311,9 +302,9 @@ export class ContentGenerationPipeline {
|
||||
};
|
||||
|
||||
const params = {
|
||||
// Parameters with request fallback and defaults
|
||||
temperature: getParameterValue('temperature', 'temperature', 0.0),
|
||||
top_p: getParameterValue('top_p', 'topP', 1.0),
|
||||
// Parameters with request fallback but no defaults
|
||||
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
|
||||
...addParameterIfDefined('top_p', 'top_p', 'topP'),
|
||||
|
||||
// Max tokens (special case: different property names)
|
||||
...addParameterIfDefined('max_tokens', 'max_tokens', 'maxOutputTokens'),
|
||||
|
||||
@@ -17,6 +17,7 @@ import { DashScopeOpenAICompatibleProvider } from './dashscope.js';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { AuthType } from '../../contentGenerator.js';
|
||||
import type { ChatCompletionToolWithCache } from './types.js';
|
||||
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
|
||||
|
||||
// Mock OpenAI
|
||||
@@ -253,17 +254,110 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
},
|
||||
]);
|
||||
|
||||
// Last message should NOT have cache control for non-streaming
|
||||
// Last message should NOT have cache control for non-streaming requests
|
||||
const lastMessage = result.messages[1];
|
||||
expect(lastMessage.role).toBe('user');
|
||||
expect(lastMessage.content).toBe('Hello!');
|
||||
});
|
||||
|
||||
it('should add cache control to both system and last messages for streaming requests', () => {
|
||||
const request = { ...baseRequest, stream: true };
|
||||
const result = provider.buildRequest(request, 'test-prompt-id');
|
||||
it('should add cache control to system message only for non-streaming requests with tools', () => {
|
||||
const requestWithTool: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
...baseRequest,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'First tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Second tool output',
|
||||
tool_call_id: 'call_2',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'mockTool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
],
|
||||
stream: false,
|
||||
};
|
||||
|
||||
expect(result.messages).toHaveLength(2);
|
||||
const result = provider.buildRequest(requestWithTool, 'test-prompt-id');
|
||||
|
||||
expect(result.messages).toHaveLength(4);
|
||||
|
||||
const systemMessage = result.messages[0];
|
||||
expect(systemMessage.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'You are a helpful assistant.',
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
]);
|
||||
|
||||
// Tool messages should remain unchanged
|
||||
const firstToolMessage = result.messages[1];
|
||||
expect(firstToolMessage.role).toBe('tool');
|
||||
expect(firstToolMessage.content).toBe('First tool output');
|
||||
|
||||
const secondToolMessage = result.messages[2];
|
||||
expect(secondToolMessage.role).toBe('tool');
|
||||
expect(secondToolMessage.content).toBe('Second tool output');
|
||||
|
||||
// Last message should NOT have cache control for non-streaming requests
|
||||
const lastMessage = result.messages[3];
|
||||
expect(lastMessage.role).toBe('user');
|
||||
expect(lastMessage.content).toBe('Hello!');
|
||||
|
||||
// Tools should NOT have cache control for non-streaming requests
|
||||
const tools = result.tools as ChatCompletionToolWithCache[];
|
||||
expect(tools).toBeDefined();
|
||||
expect(tools).toHaveLength(1);
|
||||
expect(tools[0].cache_control).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should add cache control to system, last history message, and last tool definition for streaming requests', () => {
|
||||
const request = { ...baseRequest, stream: true };
|
||||
const requestWithToolMessage: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
...request,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'First tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Second tool output',
|
||||
tool_call_id: 'call_2',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'mockTool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(
|
||||
requestWithToolMessage,
|
||||
'test-prompt-id',
|
||||
);
|
||||
|
||||
expect(result.messages).toHaveLength(4);
|
||||
|
||||
// System message should have cache control
|
||||
const systemMessage = result.messages[0];
|
||||
@@ -275,8 +369,17 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
},
|
||||
]);
|
||||
|
||||
// Last message should also have cache control for streaming
|
||||
const lastMessage = result.messages[1];
|
||||
// Tool messages should remain unchanged
|
||||
const firstToolMessage = result.messages[1];
|
||||
expect(firstToolMessage.role).toBe('tool');
|
||||
expect(firstToolMessage.content).toBe('First tool output');
|
||||
|
||||
const secondToolMessage = result.messages[2];
|
||||
expect(secondToolMessage.role).toBe('tool');
|
||||
expect(secondToolMessage.content).toBe('Second tool output');
|
||||
|
||||
// Last message should also have cache control
|
||||
const lastMessage = result.messages[3];
|
||||
expect(lastMessage.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
@@ -284,6 +387,40 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
]);
|
||||
|
||||
const tools = result.tools as ChatCompletionToolWithCache[];
|
||||
expect(tools).toBeDefined();
|
||||
expect(tools).toHaveLength(1);
|
||||
expect(tools[0].cache_control).toEqual({ type: 'ephemeral' });
|
||||
});
|
||||
|
||||
it('should not add cache control to tool messages when request.tools is undefined', () => {
|
||||
const requestWithoutConfiguredTools: OpenAI.Chat.ChatCompletionCreateParams =
|
||||
{
|
||||
...baseRequest,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(
|
||||
requestWithoutConfiguredTools,
|
||||
'test-prompt-id',
|
||||
);
|
||||
|
||||
expect(result.messages).toHaveLength(3);
|
||||
|
||||
const toolMessage = result.messages[1];
|
||||
expect(toolMessage.role).toBe('tool');
|
||||
expect(toolMessage.content).toBe('Tool output');
|
||||
|
||||
expect(result.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should include metadata in the request', () => {
|
||||
|
||||
@@ -9,6 +9,7 @@ import type {
|
||||
DashScopeRequestMetadata,
|
||||
ChatCompletionContentPartTextWithCache,
|
||||
ChatCompletionContentPartWithCache,
|
||||
ChatCompletionToolWithCache,
|
||||
} from './types.js';
|
||||
|
||||
export class DashScopeOpenAICompatibleProvider
|
||||
@@ -70,7 +71,8 @@ export class DashScopeOpenAICompatibleProvider
|
||||
* Build and configure the request for DashScope API.
|
||||
*
|
||||
* This method applies DashScope-specific configurations including:
|
||||
* - Cache control for system and user messages
|
||||
* - Cache control for the system message, last tool message (when tools are configured),
|
||||
* and the latest history message
|
||||
* - Output token limits based on model capabilities
|
||||
* - Vision model specific parameters (vl_high_resolution_images)
|
||||
* - Request metadata for session tracking
|
||||
@@ -84,13 +86,17 @@ export class DashScopeOpenAICompatibleProvider
|
||||
userPromptId: string,
|
||||
): OpenAI.Chat.ChatCompletionCreateParams {
|
||||
let messages = request.messages;
|
||||
let tools = request.tools;
|
||||
|
||||
// Apply DashScope cache control only if not disabled
|
||||
if (!this.shouldDisableCacheControl()) {
|
||||
// Add cache control to system and last messages for DashScope providers
|
||||
// Only add cache control to system message for non-streaming requests
|
||||
const cacheTarget = request.stream ? 'both' : 'system';
|
||||
messages = this.addDashScopeCacheControl(messages, cacheTarget);
|
||||
const { messages: updatedMessages, tools: updatedTools } =
|
||||
this.addDashScopeCacheControl(
|
||||
request,
|
||||
request.stream ? 'all' : 'system_only',
|
||||
);
|
||||
messages = updatedMessages;
|
||||
tools = updatedTools;
|
||||
}
|
||||
|
||||
// Apply output token limits based on model capabilities
|
||||
@@ -104,6 +110,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return {
|
||||
...requestWithTokenLimits,
|
||||
messages,
|
||||
...(tools ? { tools } : {}),
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
/* @ts-expect-error dashscope exclusive */
|
||||
vl_high_resolution_images: true,
|
||||
@@ -113,6 +120,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return {
|
||||
...requestWithTokenLimits, // Preserve all original parameters including sampling params and adjusted max_tokens
|
||||
messages,
|
||||
...(tools ? { tools } : {}),
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
} as OpenAI.Chat.ChatCompletionCreateParams;
|
||||
}
|
||||
@@ -130,75 +138,67 @@ export class DashScopeOpenAICompatibleProvider
|
||||
* Add cache control flag to specified message(s) for DashScope providers
|
||||
*/
|
||||
private addDashScopeCacheControl(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last' | 'both' = 'both',
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
if (messages.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
request: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
cacheControl: 'system_only' | 'all',
|
||||
): {
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[];
|
||||
tools?: ChatCompletionToolWithCache[];
|
||||
} {
|
||||
const messages = request.messages;
|
||||
|
||||
let updatedMessages = [...messages];
|
||||
const systemIndex = messages.findIndex((msg) => msg.role === 'system');
|
||||
const lastIndex = messages.length - 1;
|
||||
|
||||
// Add cache control to system message if requested
|
||||
if (target === 'system' || target === 'both') {
|
||||
updatedMessages = this.addCacheControlToMessage(
|
||||
updatedMessages,
|
||||
'system',
|
||||
);
|
||||
}
|
||||
const updatedMessages =
|
||||
messages.length === 0
|
||||
? messages
|
||||
: messages.map((message, index) => {
|
||||
const shouldAddCacheControl = Boolean(
|
||||
(index === systemIndex && systemIndex !== -1) ||
|
||||
(index === lastIndex && cacheControl === 'all'),
|
||||
);
|
||||
|
||||
// Add cache control to last message if requested
|
||||
if (target === 'last' || target === 'both') {
|
||||
updatedMessages = this.addCacheControlToMessage(updatedMessages, 'last');
|
||||
}
|
||||
if (
|
||||
!shouldAddCacheControl ||
|
||||
!('content' in message) ||
|
||||
message.content === null ||
|
||||
message.content === undefined
|
||||
) {
|
||||
return message;
|
||||
}
|
||||
|
||||
return updatedMessages;
|
||||
return {
|
||||
...message,
|
||||
content: this.addCacheControlToContent(message.content),
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
});
|
||||
|
||||
const updatedTools =
|
||||
cacheControl === 'all' && request.tools?.length
|
||||
? this.addCacheControlToTools(request.tools)
|
||||
: (request.tools as ChatCompletionToolWithCache[] | undefined);
|
||||
|
||||
return {
|
||||
messages: updatedMessages,
|
||||
tools: updatedTools,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to add cache control to a specific message
|
||||
*/
|
||||
private addCacheControlToMessage(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last',
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
const updatedMessages = [...messages];
|
||||
const messageIndex = this.findTargetMessageIndex(messages, target);
|
||||
|
||||
if (messageIndex === -1) {
|
||||
return updatedMessages;
|
||||
private addCacheControlToTools(
|
||||
tools: OpenAI.Chat.ChatCompletionTool[],
|
||||
): ChatCompletionToolWithCache[] {
|
||||
if (tools.length === 0) {
|
||||
return tools as ChatCompletionToolWithCache[];
|
||||
}
|
||||
|
||||
const message = updatedMessages[messageIndex];
|
||||
const updatedTools = [...tools] as ChatCompletionToolWithCache[];
|
||||
const lastToolIndex = tools.length - 1;
|
||||
updatedTools[lastToolIndex] = {
|
||||
...updatedTools[lastToolIndex],
|
||||
cache_control: { type: 'ephemeral' },
|
||||
};
|
||||
|
||||
// Only process messages that have content
|
||||
if (
|
||||
'content' in message &&
|
||||
message.content !== null &&
|
||||
message.content !== undefined
|
||||
) {
|
||||
const updatedContent = this.addCacheControlToContent(message.content);
|
||||
updatedMessages[messageIndex] = {
|
||||
...message,
|
||||
content: updatedContent,
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
}
|
||||
|
||||
return updatedMessages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the index of the target message (system or last)
|
||||
*/
|
||||
private findTargetMessageIndex(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last',
|
||||
): number {
|
||||
if (target === 'system') {
|
||||
return messages.findIndex((msg) => msg.role === 'system');
|
||||
} else {
|
||||
return messages.length - 1;
|
||||
}
|
||||
return updatedTools;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type OpenAI from 'openai';
|
||||
import { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
|
||||
// Mock OpenAI client to avoid real network calls
|
||||
vi.mock('openai', () => ({
|
||||
default: vi.fn().mockImplementation((config) => ({
|
||||
config,
|
||||
})),
|
||||
}));
|
||||
|
||||
describe('DeepSeekOpenAICompatibleProvider', () => {
|
||||
let provider: DeepSeekOpenAICompatibleProvider;
|
||||
let mockContentGeneratorConfig: ContentGeneratorConfig;
|
||||
let mockCliConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockContentGeneratorConfig = {
|
||||
apiKey: 'test-api-key',
|
||||
baseUrl: 'https://api.deepseek.com/v1',
|
||||
model: 'deepseek-chat',
|
||||
} as ContentGeneratorConfig;
|
||||
|
||||
mockCliConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
provider = new DeepSeekOpenAICompatibleProvider(
|
||||
mockContentGeneratorConfig,
|
||||
mockCliConfig,
|
||||
);
|
||||
});
|
||||
|
||||
describe('isDeepSeekProvider', () => {
|
||||
it('returns true when baseUrl includes deepseek', () => {
|
||||
const result = DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(
|
||||
mockContentGeneratorConfig,
|
||||
);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false for non deepseek baseUrl', () => {
|
||||
const config = {
|
||||
...mockContentGeneratorConfig,
|
||||
baseUrl: 'https://api.example.com/v1',
|
||||
} as ContentGeneratorConfig;
|
||||
|
||||
const result =
|
||||
DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildRequest', () => {
|
||||
const userPromptId = 'prompt-123';
|
||||
|
||||
it('converts array content into a string', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: ' world' },
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(originalRequest, userPromptId);
|
||||
|
||||
expect(result.messages).toHaveLength(1);
|
||||
expect(result.messages?.[0]).toEqual({
|
||||
role: 'user',
|
||||
content: 'Hello world',
|
||||
});
|
||||
expect(originalRequest.messages?.[0].content).toEqual([
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: ' world' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('leaves string content unchanged', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: 'Hello world',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(originalRequest, userPromptId);
|
||||
|
||||
expect(result.messages?.[0].content).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('throws when encountering non-text multimodal parts', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: 'https://example.com/image.png' },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
expect(() =>
|
||||
provider.buildRequest(originalRequest, userPromptId),
|
||||
).toThrow(/only supports text content/i);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,79 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type OpenAI from 'openai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { DefaultOpenAICompatibleProvider } from './default.js';
|
||||
|
||||
export class DeepSeekOpenAICompatibleProvider extends DefaultOpenAICompatibleProvider {
|
||||
constructor(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
cliConfig: Config,
|
||||
) {
|
||||
super(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
|
||||
static isDeepSeekProvider(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
): boolean {
|
||||
const baseUrl = contentGeneratorConfig.baseUrl ?? '';
|
||||
|
||||
return baseUrl.toLowerCase().includes('api.deepseek.com');
|
||||
}
|
||||
|
||||
override buildRequest(
|
||||
request: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
userPromptId: string,
|
||||
): OpenAI.Chat.ChatCompletionCreateParams {
|
||||
const baseRequest = super.buildRequest(request, userPromptId);
|
||||
if (!baseRequest.messages?.length) {
|
||||
return baseRequest;
|
||||
}
|
||||
|
||||
const messages = baseRequest.messages.map((message) => {
|
||||
if (!('content' in message)) {
|
||||
return message;
|
||||
}
|
||||
|
||||
const { content } = message;
|
||||
|
||||
if (
|
||||
typeof content === 'string' ||
|
||||
content === null ||
|
||||
content === undefined
|
||||
) {
|
||||
return message;
|
||||
}
|
||||
|
||||
if (!Array.isArray(content)) {
|
||||
return message;
|
||||
}
|
||||
|
||||
const text = content
|
||||
.map((part) => {
|
||||
if (part.type !== 'text') {
|
||||
throw new Error(
|
||||
`DeepSeek provider only supports text content. Found non-text part of type '${part.type}' in message with role '${message.role}'.`,
|
||||
);
|
||||
}
|
||||
|
||||
return part.text ?? '';
|
||||
})
|
||||
.join('');
|
||||
|
||||
return {
|
||||
...message,
|
||||
content: text,
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
});
|
||||
|
||||
return {
|
||||
...baseRequest,
|
||||
messages,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
export { DashScopeOpenAICompatibleProvider } from './dashscope.js';
|
||||
export { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
|
||||
export { OpenRouterOpenAICompatibleProvider } from './openrouter.js';
|
||||
export { DefaultOpenAICompatibleProvider } from './default.js';
|
||||
export type {
|
||||
|
||||
@@ -11,6 +11,10 @@ export type ChatCompletionContentPartWithCache =
|
||||
| OpenAI.Chat.ChatCompletionContentPartImage
|
||||
| OpenAI.Chat.ChatCompletionContentPartRefusal;
|
||||
|
||||
export type ChatCompletionToolWithCache = OpenAI.Chat.ChatCompletionTool & {
|
||||
cache_control?: { type: 'ephemeral' };
|
||||
};
|
||||
|
||||
export interface OpenAICompatibleProvider {
|
||||
buildHeaders(): Record<string, string | undefined>;
|
||||
buildClient(): OpenAI;
|
||||
|
||||
@@ -84,7 +84,6 @@ export interface ToolCallRequestInfo {
|
||||
args: Record<string, unknown>;
|
||||
isClientInitiated: boolean;
|
||||
prompt_id: string;
|
||||
response_id?: string;
|
||||
}
|
||||
|
||||
export interface ToolCallResponseInfo {
|
||||
@@ -203,7 +202,6 @@ export class Turn {
|
||||
readonly pendingToolCalls: ToolCallRequestInfo[];
|
||||
private debugResponses: GenerateContentResponse[];
|
||||
finishReason: FinishReason | undefined;
|
||||
private currentResponseId?: string;
|
||||
|
||||
constructor(
|
||||
private readonly chat: GeminiChat,
|
||||
@@ -249,11 +247,6 @@ export class Turn {
|
||||
|
||||
this.debugResponses.push(resp);
|
||||
|
||||
// Track the current response ID for tool call correlation
|
||||
if (resp.responseId) {
|
||||
this.currentResponseId = resp.responseId;
|
||||
}
|
||||
|
||||
const thoughtPart = resp.candidates?.[0]?.content?.parts?.[0];
|
||||
if (thoughtPart?.thought) {
|
||||
// Thought always has a bold "subject" part enclosed in double asterisks
|
||||
@@ -353,7 +346,6 @@ export class Turn {
|
||||
args,
|
||||
isClientInitiated: false,
|
||||
prompt_id: this.prompt_id,
|
||||
response_id: this.currentResponseId,
|
||||
};
|
||||
|
||||
this.pendingToolCalls.push(toolCallRequest);
|
||||
|
||||
@@ -381,7 +381,6 @@ export class SubAgentScope {
|
||||
let roundText = '';
|
||||
let lastUsage: GenerateContentResponseUsageMetadata | undefined =
|
||||
undefined;
|
||||
let currentResponseId: string | undefined = undefined;
|
||||
for await (const streamEvent of responseStream) {
|
||||
if (abortController.signal.aborted) {
|
||||
this.terminateMode = SubagentTerminateMode.CANCELLED;
|
||||
@@ -396,10 +395,6 @@ export class SubAgentScope {
|
||||
// Handle chunk events
|
||||
if (streamEvent.type === 'chunk') {
|
||||
const resp = streamEvent.value;
|
||||
// Track the response ID for tool call correlation
|
||||
if (resp.responseId) {
|
||||
currentResponseId = resp.responseId;
|
||||
}
|
||||
if (resp.functionCalls) functionCalls.push(...resp.functionCalls);
|
||||
const content = resp.candidates?.[0]?.content;
|
||||
const parts = content?.parts || [];
|
||||
@@ -460,7 +455,6 @@ export class SubAgentScope {
|
||||
abortController,
|
||||
promptId,
|
||||
turnCounter,
|
||||
currentResponseId,
|
||||
);
|
||||
} else {
|
||||
// No tool calls — treat this as the model's final answer.
|
||||
@@ -549,7 +543,6 @@ export class SubAgentScope {
|
||||
* @param {FunctionCall[]} functionCalls - An array of `FunctionCall` objects to process.
|
||||
* @param {ToolRegistry} toolRegistry - The tool registry to look up and execute tools.
|
||||
* @param {AbortController} abortController - An `AbortController` to signal cancellation of tool executions.
|
||||
* @param {string} responseId - Optional API response ID for correlation with tool calls.
|
||||
* @returns {Promise<Content[]>} A promise that resolves to an array of `Content` parts representing the tool responses,
|
||||
* which are then used to update the chat history.
|
||||
*/
|
||||
@@ -558,7 +551,6 @@ export class SubAgentScope {
|
||||
abortController: AbortController,
|
||||
promptId: string,
|
||||
currentRound: number,
|
||||
responseId?: string,
|
||||
): Promise<Content[]> {
|
||||
const toolResponseParts: Part[] = [];
|
||||
|
||||
@@ -712,7 +704,6 @@ export class SubAgentScope {
|
||||
args,
|
||||
isClientInitiated: true,
|
||||
prompt_id: promptId,
|
||||
response_id: responseId,
|
||||
};
|
||||
|
||||
const description = this.getToolDescription(toolName, args);
|
||||
|
||||
@@ -10,7 +10,6 @@ export const EVENT_USER_PROMPT = 'qwen-code.user_prompt';
|
||||
export const EVENT_TOOL_CALL = 'qwen-code.tool_call';
|
||||
export const EVENT_API_REQUEST = 'qwen-code.api_request';
|
||||
export const EVENT_API_ERROR = 'qwen-code.api_error';
|
||||
export const EVENT_API_CANCEL = 'qwen-code.api_cancel';
|
||||
export const EVENT_API_RESPONSE = 'qwen-code.api_response';
|
||||
export const EVENT_CLI_CONFIG = 'qwen-code.config';
|
||||
export const EVENT_FLASH_FALLBACK = 'qwen-code.flash_fallback';
|
||||
|
||||
@@ -17,7 +17,6 @@ export { SpanStatusCode, ValueType } from '@opentelemetry/api';
|
||||
export { SemanticAttributes } from '@opentelemetry/semantic-conventions';
|
||||
export {
|
||||
logApiError,
|
||||
logApiCancel,
|
||||
logApiRequest,
|
||||
logApiResponse,
|
||||
logChatCompression,
|
||||
@@ -36,7 +35,6 @@ export {
|
||||
} from './sdk.js';
|
||||
export {
|
||||
ApiErrorEvent,
|
||||
ApiCancelEvent,
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
ConversationFinishedEvent,
|
||||
@@ -56,5 +54,4 @@ export type {
|
||||
TelemetryEvent,
|
||||
} from './types.js';
|
||||
export * from './uiTelemetry.js';
|
||||
export { QwenLogger } from './qwen-logger/qwen-logger.js';
|
||||
export { DEFAULT_OTLP_ENDPOINT, DEFAULT_TELEMETRY_TARGET };
|
||||
|
||||
@@ -550,7 +550,6 @@ describe('loggers', () => {
|
||||
2,
|
||||
),
|
||||
duration_ms: 100,
|
||||
status: 'success',
|
||||
success: true,
|
||||
decision: ToolCallDecision.ACCEPT,
|
||||
prompt_id: 'prompt-id-1',
|
||||
@@ -620,7 +619,6 @@ describe('loggers', () => {
|
||||
2,
|
||||
),
|
||||
duration_ms: 100,
|
||||
status: 'error',
|
||||
success: false,
|
||||
decision: ToolCallDecision.REJECT,
|
||||
prompt_id: 'prompt-id-2',
|
||||
@@ -693,7 +691,6 @@ describe('loggers', () => {
|
||||
2,
|
||||
),
|
||||
duration_ms: 100,
|
||||
status: 'success',
|
||||
success: true,
|
||||
decision: ToolCallDecision.MODIFY,
|
||||
prompt_id: 'prompt-id-3',
|
||||
@@ -765,7 +762,6 @@ describe('loggers', () => {
|
||||
2,
|
||||
),
|
||||
duration_ms: 100,
|
||||
status: 'success',
|
||||
success: true,
|
||||
prompt_id: 'prompt-id-4',
|
||||
tool_type: 'native',
|
||||
@@ -838,7 +834,6 @@ describe('loggers', () => {
|
||||
2,
|
||||
),
|
||||
duration_ms: 100,
|
||||
status: 'error',
|
||||
success: false,
|
||||
error: 'test-error',
|
||||
'error.message': 'test-error',
|
||||
|
||||
@@ -12,7 +12,6 @@ import { safeJsonStringify } from '../utils/safeJsonStringify.js';
|
||||
import { UserAccountManager } from '../utils/userAccountManager.js';
|
||||
import {
|
||||
EVENT_API_ERROR,
|
||||
EVENT_API_CANCEL,
|
||||
EVENT_API_REQUEST,
|
||||
EVENT_API_RESPONSE,
|
||||
EVENT_CHAT_COMPRESSION,
|
||||
@@ -46,7 +45,6 @@ import { QwenLogger } from './qwen-logger/qwen-logger.js';
|
||||
import { isTelemetrySdkInitialized } from './sdk.js';
|
||||
import type {
|
||||
ApiErrorEvent,
|
||||
ApiCancelEvent,
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
ChatCompressionEvent,
|
||||
@@ -284,32 +282,6 @@ export function logApiError(config: Config, event: ApiErrorEvent): void {
|
||||
);
|
||||
}
|
||||
|
||||
export function logApiCancel(config: Config, event: ApiCancelEvent): void {
|
||||
const uiEvent = {
|
||||
...event,
|
||||
'event.name': EVENT_API_CANCEL,
|
||||
'event.timestamp': new Date().toISOString(),
|
||||
} as UiEvent;
|
||||
uiTelemetryService.addEvent(uiEvent);
|
||||
QwenLogger.getInstance(config)?.logApiCancelEvent(event);
|
||||
if (!isTelemetrySdkInitialized()) return;
|
||||
|
||||
const attributes: LogAttributes = {
|
||||
...getCommonAttributes(config),
|
||||
...event,
|
||||
'event.name': EVENT_API_CANCEL,
|
||||
'event.timestamp': new Date().toISOString(),
|
||||
model_name: event.model,
|
||||
};
|
||||
|
||||
const logger = logs.getLogger(SERVICE_NAME);
|
||||
const logRecord: LogRecord = {
|
||||
body: `API request cancelled for ${event.model}.`,
|
||||
attributes,
|
||||
};
|
||||
logger.emit(logRecord);
|
||||
}
|
||||
|
||||
export function logApiResponse(config: Config, event: ApiResponseEvent): void {
|
||||
const uiEvent = {
|
||||
...event,
|
||||
|
||||
@@ -15,7 +15,6 @@ import type {
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
ApiErrorEvent,
|
||||
ApiCancelEvent,
|
||||
FileOperationEvent,
|
||||
FlashFallbackEvent,
|
||||
LoopDetectedEvent,
|
||||
@@ -412,7 +411,6 @@ export class QwenLogger {
|
||||
{
|
||||
properties: {
|
||||
prompt_id: event.prompt_id,
|
||||
response_id: event.response_id,
|
||||
},
|
||||
snapshots: JSON.stringify({
|
||||
function_name: event.function_name,
|
||||
@@ -429,19 +427,6 @@ export class QwenLogger {
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logApiCancelEvent(event: ApiCancelEvent): void {
|
||||
const rumEvent = this.createActionEvent('api', 'api_cancel', {
|
||||
properties: {
|
||||
model: event.model,
|
||||
prompt_id: event.prompt_id,
|
||||
auth_type: event.auth_type,
|
||||
},
|
||||
});
|
||||
|
||||
this.enqueueLogEvent(rumEvent);
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logFileOperationEvent(event: FileOperationEvent): void {
|
||||
const rumEvent = this.createActionEvent(
|
||||
'file_operation',
|
||||
|
||||
@@ -127,13 +127,11 @@ export class ToolCallEvent implements BaseTelemetryEvent {
|
||||
function_name: string;
|
||||
function_args: Record<string, unknown>;
|
||||
duration_ms: number;
|
||||
status: 'success' | 'error' | 'cancelled';
|
||||
success: boolean; // Keep for backward compatibility
|
||||
success: boolean;
|
||||
decision?: ToolCallDecision;
|
||||
error?: string;
|
||||
error_type?: string;
|
||||
prompt_id: string;
|
||||
response_id?: string;
|
||||
tool_type: 'native' | 'mcp';
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
metadata?: { [key: string]: any };
|
||||
@@ -144,15 +142,13 @@ export class ToolCallEvent implements BaseTelemetryEvent {
|
||||
this.function_name = call.request.name;
|
||||
this.function_args = call.request.args;
|
||||
this.duration_ms = call.durationMs ?? 0;
|
||||
this.status = call.status;
|
||||
this.success = call.status === 'success'; // Keep for backward compatibility
|
||||
this.success = call.status === 'success';
|
||||
this.decision = call.outcome
|
||||
? getDecisionFromOutcome(call.outcome)
|
||||
: undefined;
|
||||
this.error = call.response.error?.message;
|
||||
this.error_type = call.response.errorType;
|
||||
this.prompt_id = call.request.prompt_id;
|
||||
this.response_id = call.request.response_id;
|
||||
this.tool_type =
|
||||
typeof call.tool !== 'undefined' && call.tool instanceof DiscoveredMCPTool
|
||||
? 'mcp'
|
||||
@@ -228,22 +224,6 @@ export class ApiErrorEvent implements BaseTelemetryEvent {
|
||||
}
|
||||
}
|
||||
|
||||
export class ApiCancelEvent implements BaseTelemetryEvent {
|
||||
'event.name': 'api_cancel';
|
||||
'event.timestamp': string;
|
||||
model: string;
|
||||
prompt_id: string;
|
||||
auth_type?: string;
|
||||
|
||||
constructor(model: string, prompt_id: string, auth_type?: string) {
|
||||
this['event.name'] = 'api_cancel';
|
||||
this['event.timestamp'] = new Date().toISOString();
|
||||
this.model = model;
|
||||
this.prompt_id = prompt_id;
|
||||
this.auth_type = auth_type;
|
||||
}
|
||||
}
|
||||
|
||||
export class ApiResponseEvent implements BaseTelemetryEvent {
|
||||
'event.name': 'api_response';
|
||||
'event.timestamp': string; // ISO 8601
|
||||
@@ -562,7 +542,6 @@ export type TelemetryEvent =
|
||||
| ToolCallEvent
|
||||
| ApiRequestEvent
|
||||
| ApiErrorEvent
|
||||
| ApiCancelEvent
|
||||
| ApiResponseEvent
|
||||
| FlashFallbackEvent
|
||||
| LoopDetectedEvent
|
||||
|
||||
@@ -15,7 +15,6 @@ import {
|
||||
EVENT_TOOL_CALL,
|
||||
} from './constants.js';
|
||||
import type {
|
||||
CancelledToolCall,
|
||||
CompletedToolCall,
|
||||
ErroredToolCall,
|
||||
SuccessfulToolCall,
|
||||
@@ -26,7 +25,7 @@ import { MockTool } from '../test-utils/tools.js';
|
||||
|
||||
const createFakeCompletedToolCall = (
|
||||
name: string,
|
||||
success: boolean | 'cancelled',
|
||||
success: boolean,
|
||||
duration = 100,
|
||||
outcome?: ToolConfirmationOutcome,
|
||||
error?: Error,
|
||||
@@ -40,7 +39,7 @@ const createFakeCompletedToolCall = (
|
||||
};
|
||||
const tool = new MockTool(name);
|
||||
|
||||
if (success === true) {
|
||||
if (success) {
|
||||
return {
|
||||
status: 'success',
|
||||
request,
|
||||
@@ -64,30 +63,6 @@ const createFakeCompletedToolCall = (
|
||||
durationMs: duration,
|
||||
outcome,
|
||||
} as SuccessfulToolCall;
|
||||
} else if (success === 'cancelled') {
|
||||
return {
|
||||
status: 'cancelled',
|
||||
request,
|
||||
tool,
|
||||
invocation: tool.build({ param: 'test' }),
|
||||
response: {
|
||||
callId: request.callId,
|
||||
responseParts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: request.callId,
|
||||
name,
|
||||
response: { error: 'Tool cancelled' },
|
||||
},
|
||||
},
|
||||
],
|
||||
error: new Error('Tool cancelled'),
|
||||
errorType: ToolErrorType.UNKNOWN,
|
||||
resultDisplay: 'Cancelled!',
|
||||
},
|
||||
durationMs: duration,
|
||||
outcome,
|
||||
} as CancelledToolCall;
|
||||
} else {
|
||||
return {
|
||||
status: 'error',
|
||||
@@ -436,40 +411,6 @@ describe('UiTelemetryService', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should process a single cancelled ToolCallEvent', () => {
|
||||
const toolCall = createFakeCompletedToolCall(
|
||||
'test_tool',
|
||||
'cancelled',
|
||||
180,
|
||||
ToolConfirmationOutcome.Cancel,
|
||||
);
|
||||
service.addEvent({
|
||||
...structuredClone(new ToolCallEvent(toolCall)),
|
||||
'event.name': EVENT_TOOL_CALL,
|
||||
} as ToolCallEvent & { 'event.name': typeof EVENT_TOOL_CALL });
|
||||
|
||||
const metrics = service.getMetrics();
|
||||
const { tools } = metrics;
|
||||
|
||||
expect(tools.totalCalls).toBe(1);
|
||||
expect(tools.totalSuccess).toBe(0);
|
||||
expect(tools.totalFail).toBe(1);
|
||||
expect(tools.totalDurationMs).toBe(180);
|
||||
expect(tools.totalDecisions[ToolCallDecision.REJECT]).toBe(1);
|
||||
expect(tools.byName['test_tool']).toEqual({
|
||||
count: 1,
|
||||
success: 0,
|
||||
fail: 1,
|
||||
durationMs: 180,
|
||||
decisions: {
|
||||
[ToolCallDecision.ACCEPT]: 0,
|
||||
[ToolCallDecision.REJECT]: 1,
|
||||
[ToolCallDecision.MODIFY]: 0,
|
||||
[ToolCallDecision.AUTO_ACCEPT]: 0,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should process a ToolCallEvent with modify decision', () => {
|
||||
const toolCall = createFakeCompletedToolCall(
|
||||
'test_tool',
|
||||
@@ -696,34 +637,6 @@ describe('UiTelemetryService', () => {
|
||||
expect(service.getLastPromptTokenCount()).toBe(0);
|
||||
expect(spy).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should correctly set status field for success/error/cancelled calls', () => {
|
||||
const successCall = createFakeCompletedToolCall(
|
||||
'success_tool',
|
||||
true,
|
||||
100,
|
||||
);
|
||||
const errorCall = createFakeCompletedToolCall('error_tool', false, 150);
|
||||
const cancelledCall = createFakeCompletedToolCall(
|
||||
'cancelled_tool',
|
||||
'cancelled',
|
||||
200,
|
||||
);
|
||||
|
||||
const successEvent = new ToolCallEvent(successCall);
|
||||
const errorEvent = new ToolCallEvent(errorCall);
|
||||
const cancelledEvent = new ToolCallEvent(cancelledCall);
|
||||
|
||||
// Verify status field is correctly set
|
||||
expect(successEvent.status).toBe('success');
|
||||
expect(errorEvent.status).toBe('error');
|
||||
expect(cancelledEvent.status).toBe('cancelled');
|
||||
|
||||
// Verify backward compatibility with success field
|
||||
expect(successEvent.success).toBe(true);
|
||||
expect(errorEvent.success).toBe(false);
|
||||
expect(cancelledEvent.success).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tool Call Event with Line Count Metadata', () => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.14",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
Reference in New Issue
Block a user