mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-24 02:29:13 +00:00
Compare commits
5 Commits
v0.0.14-ni
...
release/v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
56f394e46d | ||
|
|
9a0cb64a34 | ||
|
|
9fce177bd8 | ||
|
|
f7841338c4 | ||
|
|
c405434c41 |
12
package-lock.json
generated
12
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -13454,7 +13454,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -13662,7 +13662,7 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.13.0",
|
||||
"@lvce-editor/ripgrep": "^1.6.0",
|
||||
@@ -13788,7 +13788,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
@@ -13800,7 +13800,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14-nightly.0"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node scripts/start.js",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,7 +25,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14-nightly.0"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -239,6 +239,7 @@ describe('Gemini Client (client.ts)', () => {
|
||||
};
|
||||
const mockSubagentManager = {
|
||||
listSubagents: vi.fn().mockResolvedValue([]),
|
||||
addChangeListener: vi.fn().mockReturnValue(() => {}),
|
||||
};
|
||||
mockConfigObject = {
|
||||
getContentGeneratorConfig: vi
|
||||
|
||||
@@ -12,6 +12,7 @@ import type { Config } from '../../config/config.js';
|
||||
import { OpenAIContentGenerator } from './openaiContentGenerator.js';
|
||||
import {
|
||||
DashScopeOpenAICompatibleProvider,
|
||||
DeepSeekOpenAICompatibleProvider,
|
||||
OpenRouterOpenAICompatibleProvider,
|
||||
type OpenAICompatibleProvider,
|
||||
DefaultOpenAICompatibleProvider,
|
||||
@@ -23,6 +24,7 @@ export { ContentGenerationPipeline, type PipelineConfig } from './pipeline.js';
|
||||
export {
|
||||
type OpenAICompatibleProvider,
|
||||
DashScopeOpenAICompatibleProvider,
|
||||
DeepSeekOpenAICompatibleProvider,
|
||||
OpenRouterOpenAICompatibleProvider,
|
||||
} from './provider/index.js';
|
||||
|
||||
@@ -61,6 +63,13 @@ export function determineProvider(
|
||||
);
|
||||
}
|
||||
|
||||
if (DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config)) {
|
||||
return new DeepSeekOpenAICompatibleProvider(
|
||||
contentGeneratorConfig,
|
||||
cliConfig,
|
||||
);
|
||||
}
|
||||
|
||||
// Check for OpenRouter provider
|
||||
if (OpenRouterOpenAICompatibleProvider.isOpenRouterProvider(config)) {
|
||||
return new OpenRouterOpenAICompatibleProvider(
|
||||
|
||||
@@ -248,26 +248,23 @@ export class ContentGenerationPipeline {
|
||||
...this.buildSamplingParameters(request),
|
||||
};
|
||||
|
||||
// Let provider enhance the request (e.g., add metadata, cache control)
|
||||
const enhancedRequest = this.config.provider.buildRequest(
|
||||
baseRequest,
|
||||
userPromptId,
|
||||
);
|
||||
// Add streaming options if present
|
||||
if (streaming) {
|
||||
(
|
||||
baseRequest as unknown as OpenAI.Chat.ChatCompletionCreateParamsStreaming
|
||||
).stream = true;
|
||||
baseRequest.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
// Add tools if present
|
||||
if (request.config?.tools) {
|
||||
enhancedRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
|
||||
baseRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
|
||||
request.config.tools,
|
||||
);
|
||||
}
|
||||
|
||||
// Add streaming options if needed
|
||||
if (streaming) {
|
||||
enhancedRequest.stream = true;
|
||||
enhancedRequest.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
return enhancedRequest;
|
||||
// Let provider enhance the request (e.g., add metadata, cache control)
|
||||
return this.config.provider.buildRequest(baseRequest, userPromptId);
|
||||
}
|
||||
|
||||
private buildSamplingParameters(
|
||||
|
||||
@@ -17,6 +17,7 @@ import { DashScopeOpenAICompatibleProvider } from './dashscope.js';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { AuthType } from '../../contentGenerator.js';
|
||||
import type { ChatCompletionToolWithCache } from './types.js';
|
||||
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
|
||||
|
||||
// Mock OpenAI
|
||||
@@ -253,17 +254,110 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
},
|
||||
]);
|
||||
|
||||
// Last message should NOT have cache control for non-streaming
|
||||
// Last message should NOT have cache control for non-streaming requests
|
||||
const lastMessage = result.messages[1];
|
||||
expect(lastMessage.role).toBe('user');
|
||||
expect(lastMessage.content).toBe('Hello!');
|
||||
});
|
||||
|
||||
it('should add cache control to both system and last messages for streaming requests', () => {
|
||||
const request = { ...baseRequest, stream: true };
|
||||
const result = provider.buildRequest(request, 'test-prompt-id');
|
||||
it('should add cache control to system message only for non-streaming requests with tools', () => {
|
||||
const requestWithTool: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
...baseRequest,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'First tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Second tool output',
|
||||
tool_call_id: 'call_2',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'mockTool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
],
|
||||
stream: false,
|
||||
};
|
||||
|
||||
expect(result.messages).toHaveLength(2);
|
||||
const result = provider.buildRequest(requestWithTool, 'test-prompt-id');
|
||||
|
||||
expect(result.messages).toHaveLength(4);
|
||||
|
||||
const systemMessage = result.messages[0];
|
||||
expect(systemMessage.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'You are a helpful assistant.',
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
]);
|
||||
|
||||
// Tool messages should remain unchanged
|
||||
const firstToolMessage = result.messages[1];
|
||||
expect(firstToolMessage.role).toBe('tool');
|
||||
expect(firstToolMessage.content).toBe('First tool output');
|
||||
|
||||
const secondToolMessage = result.messages[2];
|
||||
expect(secondToolMessage.role).toBe('tool');
|
||||
expect(secondToolMessage.content).toBe('Second tool output');
|
||||
|
||||
// Last message should NOT have cache control for non-streaming requests
|
||||
const lastMessage = result.messages[3];
|
||||
expect(lastMessage.role).toBe('user');
|
||||
expect(lastMessage.content).toBe('Hello!');
|
||||
|
||||
// Tools should NOT have cache control for non-streaming requests
|
||||
const tools = result.tools as ChatCompletionToolWithCache[];
|
||||
expect(tools).toBeDefined();
|
||||
expect(tools).toHaveLength(1);
|
||||
expect(tools[0].cache_control).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should add cache control to system, last history message, and last tool definition for streaming requests', () => {
|
||||
const request = { ...baseRequest, stream: true };
|
||||
const requestWithToolMessage: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
...request,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'First tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Second tool output',
|
||||
tool_call_id: 'call_2',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'mockTool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(
|
||||
requestWithToolMessage,
|
||||
'test-prompt-id',
|
||||
);
|
||||
|
||||
expect(result.messages).toHaveLength(4);
|
||||
|
||||
// System message should have cache control
|
||||
const systemMessage = result.messages[0];
|
||||
@@ -275,8 +369,17 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
},
|
||||
]);
|
||||
|
||||
// Last message should also have cache control for streaming
|
||||
const lastMessage = result.messages[1];
|
||||
// Tool messages should remain unchanged
|
||||
const firstToolMessage = result.messages[1];
|
||||
expect(firstToolMessage.role).toBe('tool');
|
||||
expect(firstToolMessage.content).toBe('First tool output');
|
||||
|
||||
const secondToolMessage = result.messages[2];
|
||||
expect(secondToolMessage.role).toBe('tool');
|
||||
expect(secondToolMessage.content).toBe('Second tool output');
|
||||
|
||||
// Last message should also have cache control
|
||||
const lastMessage = result.messages[3];
|
||||
expect(lastMessage.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
@@ -284,6 +387,40 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
]);
|
||||
|
||||
const tools = result.tools as ChatCompletionToolWithCache[];
|
||||
expect(tools).toBeDefined();
|
||||
expect(tools).toHaveLength(1);
|
||||
expect(tools[0].cache_control).toEqual({ type: 'ephemeral' });
|
||||
});
|
||||
|
||||
it('should not add cache control to tool messages when request.tools is undefined', () => {
|
||||
const requestWithoutConfiguredTools: OpenAI.Chat.ChatCompletionCreateParams =
|
||||
{
|
||||
...baseRequest,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(
|
||||
requestWithoutConfiguredTools,
|
||||
'test-prompt-id',
|
||||
);
|
||||
|
||||
expect(result.messages).toHaveLength(3);
|
||||
|
||||
const toolMessage = result.messages[1];
|
||||
expect(toolMessage.role).toBe('tool');
|
||||
expect(toolMessage.content).toBe('Tool output');
|
||||
|
||||
expect(result.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should include metadata in the request', () => {
|
||||
@@ -688,6 +825,60 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
).toBe(true); // Vision-specific parameter should be preserved
|
||||
});
|
||||
|
||||
it('should set high resolution flag for qwen3-vl-plus', () => {
|
||||
const request: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'qwen3-vl-plus',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Please inspect the image.' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: 'https://example.com/vl.jpg' },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
max_tokens: 50000,
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(request, 'test-prompt-id');
|
||||
|
||||
expect(result.max_tokens).toBe(32768);
|
||||
expect(
|
||||
(result as { vl_high_resolution_images?: boolean })
|
||||
.vl_high_resolution_images,
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('should set high resolution flag for the vision-model alias', () => {
|
||||
const request: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'vision-model',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Alias payload' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: 'https://example.com/alias.png' },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
max_tokens: 9000,
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(request, 'test-prompt-id');
|
||||
|
||||
expect(result.max_tokens).toBe(8192);
|
||||
expect(
|
||||
(result as { vl_high_resolution_images?: boolean })
|
||||
.vl_high_resolution_images,
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle streaming requests with output token limits', () => {
|
||||
const request: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'qwen3-coder-plus',
|
||||
|
||||
@@ -9,6 +9,7 @@ import type {
|
||||
DashScopeRequestMetadata,
|
||||
ChatCompletionContentPartTextWithCache,
|
||||
ChatCompletionContentPartWithCache,
|
||||
ChatCompletionToolWithCache,
|
||||
} from './types.js';
|
||||
|
||||
export class DashScopeOpenAICompatibleProvider
|
||||
@@ -70,7 +71,8 @@ export class DashScopeOpenAICompatibleProvider
|
||||
* Build and configure the request for DashScope API.
|
||||
*
|
||||
* This method applies DashScope-specific configurations including:
|
||||
* - Cache control for system and user messages
|
||||
* - Cache control for the system message, last tool message (when tools are configured),
|
||||
* and the latest history message
|
||||
* - Output token limits based on model capabilities
|
||||
* - Vision model specific parameters (vl_high_resolution_images)
|
||||
* - Request metadata for session tracking
|
||||
@@ -84,13 +86,17 @@ export class DashScopeOpenAICompatibleProvider
|
||||
userPromptId: string,
|
||||
): OpenAI.Chat.ChatCompletionCreateParams {
|
||||
let messages = request.messages;
|
||||
let tools = request.tools;
|
||||
|
||||
// Apply DashScope cache control only if not disabled
|
||||
if (!this.shouldDisableCacheControl()) {
|
||||
// Add cache control to system and last messages for DashScope providers
|
||||
// Only add cache control to system message for non-streaming requests
|
||||
const cacheTarget = request.stream ? 'both' : 'system';
|
||||
messages = this.addDashScopeCacheControl(messages, cacheTarget);
|
||||
const { messages: updatedMessages, tools: updatedTools } =
|
||||
this.addDashScopeCacheControl(
|
||||
request,
|
||||
request.stream ? 'all' : 'system_only',
|
||||
);
|
||||
messages = updatedMessages;
|
||||
tools = updatedTools;
|
||||
}
|
||||
|
||||
// Apply output token limits based on model capabilities
|
||||
@@ -100,10 +106,11 @@ export class DashScopeOpenAICompatibleProvider
|
||||
request.model,
|
||||
);
|
||||
|
||||
if (request.model.startsWith('qwen-vl')) {
|
||||
if (this.isVisionModel(request.model)) {
|
||||
return {
|
||||
...requestWithTokenLimits,
|
||||
messages,
|
||||
...(tools ? { tools } : {}),
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
/* @ts-expect-error dashscope exclusive */
|
||||
vl_high_resolution_images: true,
|
||||
@@ -113,6 +120,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return {
|
||||
...requestWithTokenLimits, // Preserve all original parameters including sampling params and adjusted max_tokens
|
||||
messages,
|
||||
...(tools ? { tools } : {}),
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
} as OpenAI.Chat.ChatCompletionCreateParams;
|
||||
}
|
||||
@@ -130,75 +138,67 @@ export class DashScopeOpenAICompatibleProvider
|
||||
* Add cache control flag to specified message(s) for DashScope providers
|
||||
*/
|
||||
private addDashScopeCacheControl(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last' | 'both' = 'both',
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
if (messages.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
request: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
cacheControl: 'system_only' | 'all',
|
||||
): {
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[];
|
||||
tools?: ChatCompletionToolWithCache[];
|
||||
} {
|
||||
const messages = request.messages;
|
||||
|
||||
let updatedMessages = [...messages];
|
||||
const systemIndex = messages.findIndex((msg) => msg.role === 'system');
|
||||
const lastIndex = messages.length - 1;
|
||||
|
||||
// Add cache control to system message if requested
|
||||
if (target === 'system' || target === 'both') {
|
||||
updatedMessages = this.addCacheControlToMessage(
|
||||
updatedMessages,
|
||||
'system',
|
||||
);
|
||||
}
|
||||
const updatedMessages =
|
||||
messages.length === 0
|
||||
? messages
|
||||
: messages.map((message, index) => {
|
||||
const shouldAddCacheControl = Boolean(
|
||||
(index === systemIndex && systemIndex !== -1) ||
|
||||
(index === lastIndex && cacheControl === 'all'),
|
||||
);
|
||||
|
||||
// Add cache control to last message if requested
|
||||
if (target === 'last' || target === 'both') {
|
||||
updatedMessages = this.addCacheControlToMessage(updatedMessages, 'last');
|
||||
}
|
||||
if (
|
||||
!shouldAddCacheControl ||
|
||||
!('content' in message) ||
|
||||
message.content === null ||
|
||||
message.content === undefined
|
||||
) {
|
||||
return message;
|
||||
}
|
||||
|
||||
return updatedMessages;
|
||||
return {
|
||||
...message,
|
||||
content: this.addCacheControlToContent(message.content),
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
});
|
||||
|
||||
const updatedTools =
|
||||
cacheControl === 'all' && request.tools?.length
|
||||
? this.addCacheControlToTools(request.tools)
|
||||
: (request.tools as ChatCompletionToolWithCache[] | undefined);
|
||||
|
||||
return {
|
||||
messages: updatedMessages,
|
||||
tools: updatedTools,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to add cache control to a specific message
|
||||
*/
|
||||
private addCacheControlToMessage(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last',
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
const updatedMessages = [...messages];
|
||||
const messageIndex = this.findTargetMessageIndex(messages, target);
|
||||
|
||||
if (messageIndex === -1) {
|
||||
return updatedMessages;
|
||||
private addCacheControlToTools(
|
||||
tools: OpenAI.Chat.ChatCompletionTool[],
|
||||
): ChatCompletionToolWithCache[] {
|
||||
if (tools.length === 0) {
|
||||
return tools as ChatCompletionToolWithCache[];
|
||||
}
|
||||
|
||||
const message = updatedMessages[messageIndex];
|
||||
const updatedTools = [...tools] as ChatCompletionToolWithCache[];
|
||||
const lastToolIndex = tools.length - 1;
|
||||
updatedTools[lastToolIndex] = {
|
||||
...updatedTools[lastToolIndex],
|
||||
cache_control: { type: 'ephemeral' },
|
||||
};
|
||||
|
||||
// Only process messages that have content
|
||||
if (
|
||||
'content' in message &&
|
||||
message.content !== null &&
|
||||
message.content !== undefined
|
||||
) {
|
||||
const updatedContent = this.addCacheControlToContent(message.content);
|
||||
updatedMessages[messageIndex] = {
|
||||
...message,
|
||||
content: updatedContent,
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
}
|
||||
|
||||
return updatedMessages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the index of the target message (system or last)
|
||||
*/
|
||||
private findTargetMessageIndex(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last',
|
||||
): number {
|
||||
if (target === 'system') {
|
||||
return messages.findIndex((msg) => msg.role === 'system');
|
||||
} else {
|
||||
return messages.length - 1;
|
||||
}
|
||||
return updatedTools;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -267,6 +267,28 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return contentArray;
|
||||
}
|
||||
|
||||
private isVisionModel(model: string | undefined): boolean {
|
||||
if (!model) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const normalized = model.toLowerCase();
|
||||
|
||||
if (normalized === 'vision-model') {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (normalized.startsWith('qwen-vl')) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (normalized.startsWith('qwen3-vl-plus')) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply output token limit to a request's max_tokens parameter.
|
||||
*
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type OpenAI from 'openai';
|
||||
import { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
|
||||
// Mock OpenAI client to avoid real network calls
|
||||
vi.mock('openai', () => ({
|
||||
default: vi.fn().mockImplementation((config) => ({
|
||||
config,
|
||||
})),
|
||||
}));
|
||||
|
||||
describe('DeepSeekOpenAICompatibleProvider', () => {
|
||||
let provider: DeepSeekOpenAICompatibleProvider;
|
||||
let mockContentGeneratorConfig: ContentGeneratorConfig;
|
||||
let mockCliConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockContentGeneratorConfig = {
|
||||
apiKey: 'test-api-key',
|
||||
baseUrl: 'https://api.deepseek.com/v1',
|
||||
model: 'deepseek-chat',
|
||||
} as ContentGeneratorConfig;
|
||||
|
||||
mockCliConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
provider = new DeepSeekOpenAICompatibleProvider(
|
||||
mockContentGeneratorConfig,
|
||||
mockCliConfig,
|
||||
);
|
||||
});
|
||||
|
||||
describe('isDeepSeekProvider', () => {
|
||||
it('returns true when baseUrl includes deepseek', () => {
|
||||
const result = DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(
|
||||
mockContentGeneratorConfig,
|
||||
);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false for non deepseek baseUrl', () => {
|
||||
const config = {
|
||||
...mockContentGeneratorConfig,
|
||||
baseUrl: 'https://api.example.com/v1',
|
||||
} as ContentGeneratorConfig;
|
||||
|
||||
const result =
|
||||
DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildRequest', () => {
|
||||
const userPromptId = 'prompt-123';
|
||||
|
||||
it('converts array content into a string', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: ' world' },
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(originalRequest, userPromptId);
|
||||
|
||||
expect(result.messages).toHaveLength(1);
|
||||
expect(result.messages?.[0]).toEqual({
|
||||
role: 'user',
|
||||
content: 'Hello world',
|
||||
});
|
||||
expect(originalRequest.messages?.[0].content).toEqual([
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: ' world' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('leaves string content unchanged', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: 'Hello world',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(originalRequest, userPromptId);
|
||||
|
||||
expect(result.messages?.[0].content).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('throws when encountering non-text multimodal parts', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: 'https://example.com/image.png' },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
expect(() =>
|
||||
provider.buildRequest(originalRequest, userPromptId),
|
||||
).toThrow(/only supports text content/i);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,79 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type OpenAI from 'openai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { DefaultOpenAICompatibleProvider } from './default.js';
|
||||
|
||||
export class DeepSeekOpenAICompatibleProvider extends DefaultOpenAICompatibleProvider {
|
||||
constructor(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
cliConfig: Config,
|
||||
) {
|
||||
super(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
|
||||
static isDeepSeekProvider(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
): boolean {
|
||||
const baseUrl = contentGeneratorConfig.baseUrl ?? '';
|
||||
|
||||
return baseUrl.toLowerCase().includes('api.deepseek.com');
|
||||
}
|
||||
|
||||
override buildRequest(
|
||||
request: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
userPromptId: string,
|
||||
): OpenAI.Chat.ChatCompletionCreateParams {
|
||||
const baseRequest = super.buildRequest(request, userPromptId);
|
||||
if (!baseRequest.messages?.length) {
|
||||
return baseRequest;
|
||||
}
|
||||
|
||||
const messages = baseRequest.messages.map((message) => {
|
||||
if (!('content' in message)) {
|
||||
return message;
|
||||
}
|
||||
|
||||
const { content } = message;
|
||||
|
||||
if (
|
||||
typeof content === 'string' ||
|
||||
content === null ||
|
||||
content === undefined
|
||||
) {
|
||||
return message;
|
||||
}
|
||||
|
||||
if (!Array.isArray(content)) {
|
||||
return message;
|
||||
}
|
||||
|
||||
const text = content
|
||||
.map((part) => {
|
||||
if (part.type !== 'text') {
|
||||
throw new Error(
|
||||
`DeepSeek provider only supports text content. Found non-text part of type '${part.type}' in message with role '${message.role}'.`,
|
||||
);
|
||||
}
|
||||
|
||||
return part.text ?? '';
|
||||
})
|
||||
.join('');
|
||||
|
||||
return {
|
||||
...message,
|
||||
content: text,
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
});
|
||||
|
||||
return {
|
||||
...baseRequest,
|
||||
messages,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
export { DashScopeOpenAICompatibleProvider } from './dashscope.js';
|
||||
export { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
|
||||
export { OpenRouterOpenAICompatibleProvider } from './openrouter.js';
|
||||
export { DefaultOpenAICompatibleProvider } from './default.js';
|
||||
export type {
|
||||
|
||||
@@ -11,6 +11,10 @@ export type ChatCompletionContentPartWithCache =
|
||||
| OpenAI.Chat.ChatCompletionContentPartImage
|
||||
| OpenAI.Chat.ChatCompletionContentPartRefusal;
|
||||
|
||||
export type ChatCompletionToolWithCache = OpenAI.Chat.ChatCompletionTool & {
|
||||
cache_control?: { type: 'ephemeral' };
|
||||
};
|
||||
|
||||
export interface OpenAICompatibleProvider {
|
||||
buildHeaders(): Record<string, string | undefined>;
|
||||
buildClient(): OpenAI;
|
||||
|
||||
@@ -278,6 +278,11 @@ describe('tokenLimit with output type', () => {
|
||||
expect(tokenLimit('qwen-vl-max-latest', 'output')).toBe(8192); // 8K output
|
||||
});
|
||||
|
||||
it('should return different limits for input vs output for qwen3-vl-plus', () => {
|
||||
expect(tokenLimit('qwen3-vl-plus', 'input')).toBe(262144); // 256K input
|
||||
expect(tokenLimit('qwen3-vl-plus', 'output')).toBe(32768); // 32K output
|
||||
});
|
||||
|
||||
it('should return same default limits for unknown models', () => {
|
||||
expect(tokenLimit('unknown-model', 'input')).toBe(DEFAULT_TOKEN_LIMIT); // 128K input
|
||||
expect(tokenLimit('unknown-model', 'output')).toBe(
|
||||
|
||||
@@ -135,6 +135,7 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
|
||||
[/^qwen-turbo.*$/, LIMITS['128k']],
|
||||
|
||||
// Qwen Vision Models
|
||||
[/^qwen3-vl-plus$/, LIMITS['256k']], // Qwen3-VL-Plus: 256K input
|
||||
[/^qwen-vl-max.*$/, LIMITS['128k']],
|
||||
|
||||
// Generic vision-model: same as qwen-vl-max (128K token context)
|
||||
@@ -187,8 +188,8 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
|
||||
// Generic vision-model: same as qwen-vl-max-latest (8K max output tokens)
|
||||
[/^vision-model$/, LIMITS['8k']],
|
||||
|
||||
// Qwen3-VL-Plus: 8,192 max output tokens
|
||||
[/^qwen3-vl-plus$/, LIMITS['8k']],
|
||||
// Qwen3-VL-Plus: 32K max output tokens
|
||||
[/^qwen3-vl-plus$/, LIMITS['32k']],
|
||||
];
|
||||
|
||||
/**
|
||||
|
||||
@@ -40,11 +40,29 @@ const AGENT_CONFIG_DIR = 'agents';
|
||||
export class SubagentManager {
|
||||
private readonly validator: SubagentValidator;
|
||||
private subagentsCache: Map<SubagentLevel, SubagentConfig[]> | null = null;
|
||||
private readonly changeListeners: Set<() => void> = new Set();
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
this.validator = new SubagentValidator();
|
||||
}
|
||||
|
||||
addChangeListener(listener: () => void): () => void {
|
||||
this.changeListeners.add(listener);
|
||||
return () => {
|
||||
this.changeListeners.delete(listener);
|
||||
};
|
||||
}
|
||||
|
||||
private notifyChangeListeners(): void {
|
||||
for (const listener of this.changeListeners) {
|
||||
try {
|
||||
listener();
|
||||
} catch (error) {
|
||||
console.warn('Subagent change listener threw an error:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new subagent configuration.
|
||||
*
|
||||
@@ -93,8 +111,8 @@ export class SubagentManager {
|
||||
|
||||
try {
|
||||
await fs.writeFile(filePath, content, 'utf8');
|
||||
// Clear cache after successful creation
|
||||
this.clearCache();
|
||||
// Refresh cache after successful creation
|
||||
await this.refreshCache();
|
||||
} catch (error) {
|
||||
throw new SubagentError(
|
||||
`Failed to write subagent file: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
@@ -183,8 +201,8 @@ export class SubagentManager {
|
||||
|
||||
try {
|
||||
await fs.writeFile(existing.filePath, content, 'utf8');
|
||||
// Clear cache after successful update
|
||||
this.clearCache();
|
||||
// Refresh cache after successful update
|
||||
await this.refreshCache();
|
||||
} catch (error) {
|
||||
throw new SubagentError(
|
||||
`Failed to update subagent file: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
@@ -242,8 +260,8 @@ export class SubagentManager {
|
||||
);
|
||||
}
|
||||
|
||||
// Clear cache after successful deletion
|
||||
this.clearCache();
|
||||
// Refresh cache after successful deletion
|
||||
await this.refreshCache();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -327,21 +345,17 @@ export class SubagentManager {
|
||||
* @private
|
||||
*/
|
||||
private async refreshCache(): Promise<void> {
|
||||
this.subagentsCache = new Map();
|
||||
const subagentsCache = new Map();
|
||||
|
||||
const levels: SubagentLevel[] = ['project', 'user', 'builtin'];
|
||||
|
||||
for (const level of levels) {
|
||||
const levelSubagents = await this.listSubagentsAtLevel(level);
|
||||
this.subagentsCache.set(level, levelSubagents);
|
||||
subagentsCache.set(level, levelSubagents);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the subagents cache, forcing the next listSubagents call to reload from disk.
|
||||
*/
|
||||
clearCache(): void {
|
||||
this.subagentsCache = null;
|
||||
this.subagentsCache = subagentsCache;
|
||||
this.notifyChangeListeners();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -41,12 +41,14 @@ import type {
|
||||
ToolConfig,
|
||||
} from './types.js';
|
||||
import { SubagentTerminateMode } from './types.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
|
||||
vi.mock('../core/geminiChat.js');
|
||||
vi.mock('../core/contentGenerator.js');
|
||||
vi.mock('../utils/environmentContext.js');
|
||||
vi.mock('../core/nonInteractiveToolExecutor.js');
|
||||
vi.mock('../ide/ide-client.js');
|
||||
vi.mock('../core/client.js');
|
||||
|
||||
async function createMockConfig(
|
||||
toolRegistryMocks = {},
|
||||
@@ -194,6 +196,28 @@ describe('subagent.ts', () => {
|
||||
}) as unknown as GeminiChat,
|
||||
);
|
||||
|
||||
// Mock GeminiClient constructor to return a properly mocked client
|
||||
const mockGeminiChat = {
|
||||
setTools: vi.fn(),
|
||||
getHistory: vi.fn().mockReturnValue([]),
|
||||
setHistory: vi.fn(),
|
||||
sendMessageStream: vi.fn(),
|
||||
};
|
||||
|
||||
const mockGeminiClient = {
|
||||
getChat: vi.fn().mockReturnValue(mockGeminiChat),
|
||||
setTools: vi.fn().mockResolvedValue(undefined),
|
||||
isInitialized: vi.fn().mockReturnValue(true),
|
||||
getHistory: vi.fn().mockReturnValue([]),
|
||||
initialize: vi.fn().mockResolvedValue(undefined),
|
||||
setHistory: vi.fn(),
|
||||
};
|
||||
|
||||
// Mock the GeminiClient constructor
|
||||
vi.mocked(GeminiClient).mockImplementation(
|
||||
() => mockGeminiClient as unknown as GeminiClient,
|
||||
);
|
||||
|
||||
// Default mock for executeToolCall
|
||||
vi.mocked(executeToolCall).mockResolvedValue({
|
||||
callId: 'default-call',
|
||||
|
||||
@@ -43,6 +43,7 @@ describe('TaskTool', () => {
|
||||
let config: Config;
|
||||
let taskTool: TaskTool;
|
||||
let mockSubagentManager: SubagentManager;
|
||||
let changeListeners: Array<() => void>;
|
||||
|
||||
const mockSubagents: SubagentConfig[] = [
|
||||
{
|
||||
@@ -70,13 +71,25 @@ describe('TaskTool', () => {
|
||||
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
|
||||
getSessionId: vi.fn().mockReturnValue('test-session-id'),
|
||||
getSubagentManager: vi.fn(),
|
||||
getGeminiClient: vi.fn().mockReturnValue(undefined),
|
||||
} as unknown as Config;
|
||||
|
||||
changeListeners = [];
|
||||
|
||||
// Setup SubagentManager mock
|
||||
mockSubagentManager = {
|
||||
listSubagents: vi.fn().mockResolvedValue(mockSubagents),
|
||||
loadSubagent: vi.fn(),
|
||||
createSubagentScope: vi.fn(),
|
||||
addChangeListener: vi.fn((listener: () => void) => {
|
||||
changeListeners.push(listener);
|
||||
return () => {
|
||||
const index = changeListeners.indexOf(listener);
|
||||
if (index >= 0) {
|
||||
changeListeners.splice(index, 1);
|
||||
}
|
||||
};
|
||||
}),
|
||||
} as unknown as SubagentManager;
|
||||
|
||||
MockedSubagentManager.mockImplementation(() => mockSubagentManager);
|
||||
@@ -106,6 +119,10 @@ describe('TaskTool', () => {
|
||||
expect(mockSubagentManager.listSubagents).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should subscribe to subagent manager changes', () => {
|
||||
expect(mockSubagentManager.addChangeListener).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should update description with available subagents', () => {
|
||||
expect(taskTool.description).toContain('file-search');
|
||||
expect(taskTool.description).toContain(
|
||||
@@ -232,6 +249,31 @@ describe('TaskTool', () => {
|
||||
});
|
||||
|
||||
describe('refreshSubagents', () => {
|
||||
it('should refresh when change listener fires', async () => {
|
||||
const newSubagents: SubagentConfig[] = [
|
||||
{
|
||||
name: 'new-agent',
|
||||
description: 'A brand new agent',
|
||||
systemPrompt: 'Do new things.',
|
||||
level: 'project',
|
||||
filePath: '/project/.qwen/agents/new-agent.md',
|
||||
},
|
||||
];
|
||||
|
||||
vi.mocked(mockSubagentManager.listSubagents).mockResolvedValueOnce(
|
||||
newSubagents,
|
||||
);
|
||||
|
||||
const listener = changeListeners[0];
|
||||
expect(listener).toBeDefined();
|
||||
|
||||
listener?.();
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
expect(taskTool.description).toContain('new-agent');
|
||||
expect(taskTool.description).toContain('A brand new agent');
|
||||
});
|
||||
|
||||
it('should refresh available subagents and update description', async () => {
|
||||
const newSubagents: SubagentConfig[] = [
|
||||
{
|
||||
|
||||
@@ -86,16 +86,19 @@ export class TaskTool extends BaseDeclarativeTool<TaskParams, ToolResult> {
|
||||
);
|
||||
|
||||
this.subagentManager = config.getSubagentManager();
|
||||
this.subagentManager.addChangeListener(() => {
|
||||
void this.refreshSubagents();
|
||||
});
|
||||
|
||||
// Initialize the tool asynchronously
|
||||
this.initializeAsync();
|
||||
this.refreshSubagents();
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously initializes the tool by loading available subagents
|
||||
* and updating the description and schema.
|
||||
*/
|
||||
private async initializeAsync(): Promise<void> {
|
||||
async refreshSubagents(): Promise<void> {
|
||||
try {
|
||||
this.availableSubagents = await this.subagentManager.listSubagents();
|
||||
this.updateDescriptionAndSchema();
|
||||
@@ -103,6 +106,12 @@ export class TaskTool extends BaseDeclarativeTool<TaskParams, ToolResult> {
|
||||
console.warn('Failed to load subagents for Task tool:', error);
|
||||
this.availableSubagents = [];
|
||||
this.updateDescriptionAndSchema();
|
||||
} finally {
|
||||
// Update the client with the new tools
|
||||
const geminiClient = this.config.getGeminiClient();
|
||||
if (geminiClient) {
|
||||
await geminiClient.setTools();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,14 +210,6 @@ assistant: "I'm going to use the Task tool to launch the with the greeting-respo
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes the available subagents and updates the tool description.
|
||||
* This can be called when subagents are added or removed.
|
||||
*/
|
||||
async refreshSubagents(): Promise<void> {
|
||||
await this.initializeAsync();
|
||||
}
|
||||
|
||||
override validateToolParams(params: TaskParams): string | null {
|
||||
// Validate required fields
|
||||
if (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.0.14-nightly.0",
|
||||
"version": "0.0.14",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
Reference in New Issue
Block a user