mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-01-05 16:39:14 +00:00
Compare commits
5 Commits
v0.0.14-ni
...
chore/igno
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aba5d33630 | ||
|
|
0922437bd5 | ||
|
|
9a0cb64a34 | ||
|
|
9fce177bd8 | ||
|
|
f7841338c4 |
@@ -1,5 +1,13 @@
|
||||
# Changelog
|
||||
|
||||
## 0.0.14
|
||||
|
||||
- Added plan mode support for task planning
|
||||
- Fixed unreliable editCorrector that injects extra escape characters
|
||||
- Fixed task tool dynamic updates
|
||||
- Added Qwen3-VL-Plus token limits (256K input, 32K output) and highres support
|
||||
- Enhanced dashScope cache control
|
||||
|
||||
## 0.0.13
|
||||
|
||||
- Added YOLO mode support for automatic vision model switching with CLI arguments and environment variables.
|
||||
|
||||
12
package-lock.json
generated
12
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -13454,7 +13454,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -13662,7 +13662,7 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.13.0",
|
||||
"@lvce-editor/ripgrep": "^1.6.0",
|
||||
@@ -13788,7 +13788,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
@@ -13800,7 +13800,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14-nightly.1"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node scripts/start.js",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,7 +25,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14-nightly.1"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -12,6 +12,7 @@ import type { Config } from '../../config/config.js';
|
||||
import { OpenAIContentGenerator } from './openaiContentGenerator.js';
|
||||
import {
|
||||
DashScopeOpenAICompatibleProvider,
|
||||
DeepSeekOpenAICompatibleProvider,
|
||||
OpenRouterOpenAICompatibleProvider,
|
||||
type OpenAICompatibleProvider,
|
||||
DefaultOpenAICompatibleProvider,
|
||||
@@ -23,6 +24,7 @@ export { ContentGenerationPipeline, type PipelineConfig } from './pipeline.js';
|
||||
export {
|
||||
type OpenAICompatibleProvider,
|
||||
DashScopeOpenAICompatibleProvider,
|
||||
DeepSeekOpenAICompatibleProvider,
|
||||
OpenRouterOpenAICompatibleProvider,
|
||||
} from './provider/index.js';
|
||||
|
||||
@@ -61,6 +63,13 @@ export function determineProvider(
|
||||
);
|
||||
}
|
||||
|
||||
if (DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config)) {
|
||||
return new DeepSeekOpenAICompatibleProvider(
|
||||
contentGeneratorConfig,
|
||||
cliConfig,
|
||||
);
|
||||
}
|
||||
|
||||
// Check for OpenRouter provider
|
||||
if (OpenRouterOpenAICompatibleProvider.isOpenRouterProvider(config)) {
|
||||
return new OpenRouterOpenAICompatibleProvider(
|
||||
|
||||
@@ -248,26 +248,23 @@ export class ContentGenerationPipeline {
|
||||
...this.buildSamplingParameters(request),
|
||||
};
|
||||
|
||||
// Let provider enhance the request (e.g., add metadata, cache control)
|
||||
const enhancedRequest = this.config.provider.buildRequest(
|
||||
baseRequest,
|
||||
userPromptId,
|
||||
);
|
||||
// Add streaming options if present
|
||||
if (streaming) {
|
||||
(
|
||||
baseRequest as unknown as OpenAI.Chat.ChatCompletionCreateParamsStreaming
|
||||
).stream = true;
|
||||
baseRequest.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
// Add tools if present
|
||||
if (request.config?.tools) {
|
||||
enhancedRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
|
||||
baseRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
|
||||
request.config.tools,
|
||||
);
|
||||
}
|
||||
|
||||
// Add streaming options if needed
|
||||
if (streaming) {
|
||||
enhancedRequest.stream = true;
|
||||
enhancedRequest.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
return enhancedRequest;
|
||||
// Let provider enhance the request (e.g., add metadata, cache control)
|
||||
return this.config.provider.buildRequest(baseRequest, userPromptId);
|
||||
}
|
||||
|
||||
private buildSamplingParameters(
|
||||
|
||||
@@ -17,6 +17,7 @@ import { DashScopeOpenAICompatibleProvider } from './dashscope.js';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { AuthType } from '../../contentGenerator.js';
|
||||
import type { ChatCompletionToolWithCache } from './types.js';
|
||||
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
|
||||
|
||||
// Mock OpenAI
|
||||
@@ -253,17 +254,110 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
},
|
||||
]);
|
||||
|
||||
// Last message should NOT have cache control for non-streaming
|
||||
// Last message should NOT have cache control for non-streaming requests
|
||||
const lastMessage = result.messages[1];
|
||||
expect(lastMessage.role).toBe('user');
|
||||
expect(lastMessage.content).toBe('Hello!');
|
||||
});
|
||||
|
||||
it('should add cache control to both system and last messages for streaming requests', () => {
|
||||
const request = { ...baseRequest, stream: true };
|
||||
const result = provider.buildRequest(request, 'test-prompt-id');
|
||||
it('should add cache control to system message only for non-streaming requests with tools', () => {
|
||||
const requestWithTool: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
...baseRequest,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'First tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Second tool output',
|
||||
tool_call_id: 'call_2',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'mockTool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
],
|
||||
stream: false,
|
||||
};
|
||||
|
||||
expect(result.messages).toHaveLength(2);
|
||||
const result = provider.buildRequest(requestWithTool, 'test-prompt-id');
|
||||
|
||||
expect(result.messages).toHaveLength(4);
|
||||
|
||||
const systemMessage = result.messages[0];
|
||||
expect(systemMessage.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'You are a helpful assistant.',
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
]);
|
||||
|
||||
// Tool messages should remain unchanged
|
||||
const firstToolMessage = result.messages[1];
|
||||
expect(firstToolMessage.role).toBe('tool');
|
||||
expect(firstToolMessage.content).toBe('First tool output');
|
||||
|
||||
const secondToolMessage = result.messages[2];
|
||||
expect(secondToolMessage.role).toBe('tool');
|
||||
expect(secondToolMessage.content).toBe('Second tool output');
|
||||
|
||||
// Last message should NOT have cache control for non-streaming requests
|
||||
const lastMessage = result.messages[3];
|
||||
expect(lastMessage.role).toBe('user');
|
||||
expect(lastMessage.content).toBe('Hello!');
|
||||
|
||||
// Tools should NOT have cache control for non-streaming requests
|
||||
const tools = result.tools as ChatCompletionToolWithCache[];
|
||||
expect(tools).toBeDefined();
|
||||
expect(tools).toHaveLength(1);
|
||||
expect(tools[0].cache_control).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should add cache control to system, last history message, and last tool definition for streaming requests', () => {
|
||||
const request = { ...baseRequest, stream: true };
|
||||
const requestWithToolMessage: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
...request,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'First tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Second tool output',
|
||||
tool_call_id: 'call_2',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'mockTool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(
|
||||
requestWithToolMessage,
|
||||
'test-prompt-id',
|
||||
);
|
||||
|
||||
expect(result.messages).toHaveLength(4);
|
||||
|
||||
// System message should have cache control
|
||||
const systemMessage = result.messages[0];
|
||||
@@ -275,8 +369,17 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
},
|
||||
]);
|
||||
|
||||
// Last message should also have cache control for streaming
|
||||
const lastMessage = result.messages[1];
|
||||
// Tool messages should remain unchanged
|
||||
const firstToolMessage = result.messages[1];
|
||||
expect(firstToolMessage.role).toBe('tool');
|
||||
expect(firstToolMessage.content).toBe('First tool output');
|
||||
|
||||
const secondToolMessage = result.messages[2];
|
||||
expect(secondToolMessage.role).toBe('tool');
|
||||
expect(secondToolMessage.content).toBe('Second tool output');
|
||||
|
||||
// Last message should also have cache control
|
||||
const lastMessage = result.messages[3];
|
||||
expect(lastMessage.content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
@@ -284,6 +387,40 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
]);
|
||||
|
||||
const tools = result.tools as ChatCompletionToolWithCache[];
|
||||
expect(tools).toBeDefined();
|
||||
expect(tools).toHaveLength(1);
|
||||
expect(tools[0].cache_control).toEqual({ type: 'ephemeral' });
|
||||
});
|
||||
|
||||
it('should not add cache control to tool messages when request.tools is undefined', () => {
|
||||
const requestWithoutConfiguredTools: OpenAI.Chat.ChatCompletionCreateParams =
|
||||
{
|
||||
...baseRequest,
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{
|
||||
role: 'tool',
|
||||
content: 'Tool output',
|
||||
tool_call_id: 'call_1',
|
||||
},
|
||||
{ role: 'user', content: 'Hello!' },
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(
|
||||
requestWithoutConfiguredTools,
|
||||
'test-prompt-id',
|
||||
);
|
||||
|
||||
expect(result.messages).toHaveLength(3);
|
||||
|
||||
const toolMessage = result.messages[1];
|
||||
expect(toolMessage.role).toBe('tool');
|
||||
expect(toolMessage.content).toBe('Tool output');
|
||||
|
||||
expect(result.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should include metadata in the request', () => {
|
||||
@@ -688,6 +825,60 @@ describe('DashScopeOpenAICompatibleProvider', () => {
|
||||
).toBe(true); // Vision-specific parameter should be preserved
|
||||
});
|
||||
|
||||
it('should set high resolution flag for qwen3-vl-plus', () => {
|
||||
const request: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'qwen3-vl-plus',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Please inspect the image.' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: 'https://example.com/vl.jpg' },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
max_tokens: 50000,
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(request, 'test-prompt-id');
|
||||
|
||||
expect(result.max_tokens).toBe(32768);
|
||||
expect(
|
||||
(result as { vl_high_resolution_images?: boolean })
|
||||
.vl_high_resolution_images,
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('should set high resolution flag for the vision-model alias', () => {
|
||||
const request: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'vision-model',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Alias payload' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: 'https://example.com/alias.png' },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
max_tokens: 9000,
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(request, 'test-prompt-id');
|
||||
|
||||
expect(result.max_tokens).toBe(8192);
|
||||
expect(
|
||||
(result as { vl_high_resolution_images?: boolean })
|
||||
.vl_high_resolution_images,
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle streaming requests with output token limits', () => {
|
||||
const request: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'qwen3-coder-plus',
|
||||
|
||||
@@ -9,6 +9,7 @@ import type {
|
||||
DashScopeRequestMetadata,
|
||||
ChatCompletionContentPartTextWithCache,
|
||||
ChatCompletionContentPartWithCache,
|
||||
ChatCompletionToolWithCache,
|
||||
} from './types.js';
|
||||
|
||||
export class DashScopeOpenAICompatibleProvider
|
||||
@@ -70,7 +71,8 @@ export class DashScopeOpenAICompatibleProvider
|
||||
* Build and configure the request for DashScope API.
|
||||
*
|
||||
* This method applies DashScope-specific configurations including:
|
||||
* - Cache control for system and user messages
|
||||
* - Cache control for the system message, last tool message (when tools are configured),
|
||||
* and the latest history message
|
||||
* - Output token limits based on model capabilities
|
||||
* - Vision model specific parameters (vl_high_resolution_images)
|
||||
* - Request metadata for session tracking
|
||||
@@ -84,13 +86,17 @@ export class DashScopeOpenAICompatibleProvider
|
||||
userPromptId: string,
|
||||
): OpenAI.Chat.ChatCompletionCreateParams {
|
||||
let messages = request.messages;
|
||||
let tools = request.tools;
|
||||
|
||||
// Apply DashScope cache control only if not disabled
|
||||
if (!this.shouldDisableCacheControl()) {
|
||||
// Add cache control to system and last messages for DashScope providers
|
||||
// Only add cache control to system message for non-streaming requests
|
||||
const cacheTarget = request.stream ? 'both' : 'system';
|
||||
messages = this.addDashScopeCacheControl(messages, cacheTarget);
|
||||
const { messages: updatedMessages, tools: updatedTools } =
|
||||
this.addDashScopeCacheControl(
|
||||
request,
|
||||
request.stream ? 'all' : 'system_only',
|
||||
);
|
||||
messages = updatedMessages;
|
||||
tools = updatedTools;
|
||||
}
|
||||
|
||||
// Apply output token limits based on model capabilities
|
||||
@@ -100,10 +106,11 @@ export class DashScopeOpenAICompatibleProvider
|
||||
request.model,
|
||||
);
|
||||
|
||||
if (request.model.startsWith('qwen-vl')) {
|
||||
if (this.isVisionModel(request.model)) {
|
||||
return {
|
||||
...requestWithTokenLimits,
|
||||
messages,
|
||||
...(tools ? { tools } : {}),
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
/* @ts-expect-error dashscope exclusive */
|
||||
vl_high_resolution_images: true,
|
||||
@@ -113,6 +120,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return {
|
||||
...requestWithTokenLimits, // Preserve all original parameters including sampling params and adjusted max_tokens
|
||||
messages,
|
||||
...(tools ? { tools } : {}),
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
} as OpenAI.Chat.ChatCompletionCreateParams;
|
||||
}
|
||||
@@ -130,75 +138,67 @@ export class DashScopeOpenAICompatibleProvider
|
||||
* Add cache control flag to specified message(s) for DashScope providers
|
||||
*/
|
||||
private addDashScopeCacheControl(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last' | 'both' = 'both',
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
if (messages.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
request: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
cacheControl: 'system_only' | 'all',
|
||||
): {
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[];
|
||||
tools?: ChatCompletionToolWithCache[];
|
||||
} {
|
||||
const messages = request.messages;
|
||||
|
||||
let updatedMessages = [...messages];
|
||||
const systemIndex = messages.findIndex((msg) => msg.role === 'system');
|
||||
const lastIndex = messages.length - 1;
|
||||
|
||||
// Add cache control to system message if requested
|
||||
if (target === 'system' || target === 'both') {
|
||||
updatedMessages = this.addCacheControlToMessage(
|
||||
updatedMessages,
|
||||
'system',
|
||||
);
|
||||
}
|
||||
const updatedMessages =
|
||||
messages.length === 0
|
||||
? messages
|
||||
: messages.map((message, index) => {
|
||||
const shouldAddCacheControl = Boolean(
|
||||
(index === systemIndex && systemIndex !== -1) ||
|
||||
(index === lastIndex && cacheControl === 'all'),
|
||||
);
|
||||
|
||||
// Add cache control to last message if requested
|
||||
if (target === 'last' || target === 'both') {
|
||||
updatedMessages = this.addCacheControlToMessage(updatedMessages, 'last');
|
||||
}
|
||||
if (
|
||||
!shouldAddCacheControl ||
|
||||
!('content' in message) ||
|
||||
message.content === null ||
|
||||
message.content === undefined
|
||||
) {
|
||||
return message;
|
||||
}
|
||||
|
||||
return updatedMessages;
|
||||
return {
|
||||
...message,
|
||||
content: this.addCacheControlToContent(message.content),
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
});
|
||||
|
||||
const updatedTools =
|
||||
cacheControl === 'all' && request.tools?.length
|
||||
? this.addCacheControlToTools(request.tools)
|
||||
: (request.tools as ChatCompletionToolWithCache[] | undefined);
|
||||
|
||||
return {
|
||||
messages: updatedMessages,
|
||||
tools: updatedTools,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to add cache control to a specific message
|
||||
*/
|
||||
private addCacheControlToMessage(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last',
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
const updatedMessages = [...messages];
|
||||
const messageIndex = this.findTargetMessageIndex(messages, target);
|
||||
|
||||
if (messageIndex === -1) {
|
||||
return updatedMessages;
|
||||
private addCacheControlToTools(
|
||||
tools: OpenAI.Chat.ChatCompletionTool[],
|
||||
): ChatCompletionToolWithCache[] {
|
||||
if (tools.length === 0) {
|
||||
return tools as ChatCompletionToolWithCache[];
|
||||
}
|
||||
|
||||
const message = updatedMessages[messageIndex];
|
||||
const updatedTools = [...tools] as ChatCompletionToolWithCache[];
|
||||
const lastToolIndex = tools.length - 1;
|
||||
updatedTools[lastToolIndex] = {
|
||||
...updatedTools[lastToolIndex],
|
||||
cache_control: { type: 'ephemeral' },
|
||||
};
|
||||
|
||||
// Only process messages that have content
|
||||
if (
|
||||
'content' in message &&
|
||||
message.content !== null &&
|
||||
message.content !== undefined
|
||||
) {
|
||||
const updatedContent = this.addCacheControlToContent(message.content);
|
||||
updatedMessages[messageIndex] = {
|
||||
...message,
|
||||
content: updatedContent,
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
}
|
||||
|
||||
return updatedMessages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the index of the target message (system or last)
|
||||
*/
|
||||
private findTargetMessageIndex(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last',
|
||||
): number {
|
||||
if (target === 'system') {
|
||||
return messages.findIndex((msg) => msg.role === 'system');
|
||||
} else {
|
||||
return messages.length - 1;
|
||||
}
|
||||
return updatedTools;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -267,6 +267,28 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return contentArray;
|
||||
}
|
||||
|
||||
private isVisionModel(model: string | undefined): boolean {
|
||||
if (!model) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const normalized = model.toLowerCase();
|
||||
|
||||
if (normalized === 'vision-model') {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (normalized.startsWith('qwen-vl')) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (normalized.startsWith('qwen3-vl-plus')) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply output token limit to a request's max_tokens parameter.
|
||||
*
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type OpenAI from 'openai';
|
||||
import { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
|
||||
// Mock OpenAI client to avoid real network calls
|
||||
vi.mock('openai', () => ({
|
||||
default: vi.fn().mockImplementation((config) => ({
|
||||
config,
|
||||
})),
|
||||
}));
|
||||
|
||||
describe('DeepSeekOpenAICompatibleProvider', () => {
|
||||
let provider: DeepSeekOpenAICompatibleProvider;
|
||||
let mockContentGeneratorConfig: ContentGeneratorConfig;
|
||||
let mockCliConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockContentGeneratorConfig = {
|
||||
apiKey: 'test-api-key',
|
||||
baseUrl: 'https://api.deepseek.com/v1',
|
||||
model: 'deepseek-chat',
|
||||
} as ContentGeneratorConfig;
|
||||
|
||||
mockCliConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
provider = new DeepSeekOpenAICompatibleProvider(
|
||||
mockContentGeneratorConfig,
|
||||
mockCliConfig,
|
||||
);
|
||||
});
|
||||
|
||||
describe('isDeepSeekProvider', () => {
|
||||
it('returns true when baseUrl includes deepseek', () => {
|
||||
const result = DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(
|
||||
mockContentGeneratorConfig,
|
||||
);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false for non deepseek baseUrl', () => {
|
||||
const config = {
|
||||
...mockContentGeneratorConfig,
|
||||
baseUrl: 'https://api.example.com/v1',
|
||||
} as ContentGeneratorConfig;
|
||||
|
||||
const result =
|
||||
DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildRequest', () => {
|
||||
const userPromptId = 'prompt-123';
|
||||
|
||||
it('converts array content into a string', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: ' world' },
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(originalRequest, userPromptId);
|
||||
|
||||
expect(result.messages).toHaveLength(1);
|
||||
expect(result.messages?.[0]).toEqual({
|
||||
role: 'user',
|
||||
content: 'Hello world',
|
||||
});
|
||||
expect(originalRequest.messages?.[0].content).toEqual([
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: ' world' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('leaves string content unchanged', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: 'Hello world',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const result = provider.buildRequest(originalRequest, userPromptId);
|
||||
|
||||
expect(result.messages?.[0].content).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('throws when encountering non-text multimodal parts', () => {
|
||||
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: 'deepseek-chat',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: 'https://example.com/image.png' },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
expect(() =>
|
||||
provider.buildRequest(originalRequest, userPromptId),
|
||||
).toThrow(/only supports text content/i);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,79 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type OpenAI from 'openai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { DefaultOpenAICompatibleProvider } from './default.js';
|
||||
|
||||
export class DeepSeekOpenAICompatibleProvider extends DefaultOpenAICompatibleProvider {
|
||||
constructor(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
cliConfig: Config,
|
||||
) {
|
||||
super(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
|
||||
static isDeepSeekProvider(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
): boolean {
|
||||
const baseUrl = contentGeneratorConfig.baseUrl ?? '';
|
||||
|
||||
return baseUrl.toLowerCase().includes('api.deepseek.com');
|
||||
}
|
||||
|
||||
override buildRequest(
|
||||
request: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
userPromptId: string,
|
||||
): OpenAI.Chat.ChatCompletionCreateParams {
|
||||
const baseRequest = super.buildRequest(request, userPromptId);
|
||||
if (!baseRequest.messages?.length) {
|
||||
return baseRequest;
|
||||
}
|
||||
|
||||
const messages = baseRequest.messages.map((message) => {
|
||||
if (!('content' in message)) {
|
||||
return message;
|
||||
}
|
||||
|
||||
const { content } = message;
|
||||
|
||||
if (
|
||||
typeof content === 'string' ||
|
||||
content === null ||
|
||||
content === undefined
|
||||
) {
|
||||
return message;
|
||||
}
|
||||
|
||||
if (!Array.isArray(content)) {
|
||||
return message;
|
||||
}
|
||||
|
||||
const text = content
|
||||
.map((part) => {
|
||||
if (part.type !== 'text') {
|
||||
throw new Error(
|
||||
`DeepSeek provider only supports text content. Found non-text part of type '${part.type}' in message with role '${message.role}'.`,
|
||||
);
|
||||
}
|
||||
|
||||
return part.text ?? '';
|
||||
})
|
||||
.join('');
|
||||
|
||||
return {
|
||||
...message,
|
||||
content: text,
|
||||
} as OpenAI.Chat.ChatCompletionMessageParam;
|
||||
});
|
||||
|
||||
return {
|
||||
...baseRequest,
|
||||
messages,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
export { DashScopeOpenAICompatibleProvider } from './dashscope.js';
|
||||
export { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
|
||||
export { OpenRouterOpenAICompatibleProvider } from './openrouter.js';
|
||||
export { DefaultOpenAICompatibleProvider } from './default.js';
|
||||
export type {
|
||||
|
||||
@@ -11,6 +11,10 @@ export type ChatCompletionContentPartWithCache =
|
||||
| OpenAI.Chat.ChatCompletionContentPartImage
|
||||
| OpenAI.Chat.ChatCompletionContentPartRefusal;
|
||||
|
||||
export type ChatCompletionToolWithCache = OpenAI.Chat.ChatCompletionTool & {
|
||||
cache_control?: { type: 'ephemeral' };
|
||||
};
|
||||
|
||||
export interface OpenAICompatibleProvider {
|
||||
buildHeaders(): Record<string, string | undefined>;
|
||||
buildClient(): OpenAI;
|
||||
|
||||
@@ -278,6 +278,11 @@ describe('tokenLimit with output type', () => {
|
||||
expect(tokenLimit('qwen-vl-max-latest', 'output')).toBe(8192); // 8K output
|
||||
});
|
||||
|
||||
it('should return different limits for input vs output for qwen3-vl-plus', () => {
|
||||
expect(tokenLimit('qwen3-vl-plus', 'input')).toBe(262144); // 256K input
|
||||
expect(tokenLimit('qwen3-vl-plus', 'output')).toBe(32768); // 32K output
|
||||
});
|
||||
|
||||
it('should return same default limits for unknown models', () => {
|
||||
expect(tokenLimit('unknown-model', 'input')).toBe(DEFAULT_TOKEN_LIMIT); // 128K input
|
||||
expect(tokenLimit('unknown-model', 'output')).toBe(
|
||||
|
||||
@@ -135,6 +135,7 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
|
||||
[/^qwen-turbo.*$/, LIMITS['128k']],
|
||||
|
||||
// Qwen Vision Models
|
||||
[/^qwen3-vl-plus$/, LIMITS['256k']], // Qwen3-VL-Plus: 256K input
|
||||
[/^qwen-vl-max.*$/, LIMITS['128k']],
|
||||
|
||||
// Generic vision-model: same as qwen-vl-max (128K token context)
|
||||
@@ -187,8 +188,8 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
|
||||
// Generic vision-model: same as qwen-vl-max-latest (8K max output tokens)
|
||||
[/^vision-model$/, LIMITS['8k']],
|
||||
|
||||
// Qwen3-VL-Plus: 8,192 max output tokens
|
||||
[/^qwen3-vl-plus$/, LIMITS['8k']],
|
||||
// Qwen3-VL-Plus: 32K max output tokens
|
||||
[/^qwen3-vl-plus$/, LIMITS['32k']],
|
||||
];
|
||||
|
||||
/**
|
||||
|
||||
@@ -14,6 +14,7 @@ import type { Config } from '../config/config.js';
|
||||
import { createMockWorkspaceContext } from '../test-utils/mockWorkspaceContext.js';
|
||||
import type { ChildProcess } from 'node:child_process';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
|
||||
|
||||
// Mock @lvce-editor/ripgrep for testing
|
||||
vi.mock('@lvce-editor/ripgrep', () => ({
|
||||
@@ -75,21 +76,35 @@ function createMockSpawn(
|
||||
};
|
||||
}
|
||||
|
||||
function createTestConfig(
|
||||
rootDir: string,
|
||||
extraDirectories: string[] = [],
|
||||
): Config {
|
||||
const fileService = new FileDiscoveryService(rootDir);
|
||||
return {
|
||||
getTargetDir: () => rootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(rootDir, extraDirectories),
|
||||
getDebugMode: () => false,
|
||||
getFileService: () => fileService,
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: true,
|
||||
respectGeminiIgnore: true,
|
||||
}),
|
||||
} as unknown as Config;
|
||||
}
|
||||
|
||||
describe('RipGrepTool', () => {
|
||||
let tempRootDir: string;
|
||||
let grepTool: RipGrepTool;
|
||||
let mockConfig: Config;
|
||||
const abortSignal = new AbortController().signal;
|
||||
|
||||
const mockConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () => createMockWorkspaceContext(tempRootDir),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
mockSpawn.mockClear();
|
||||
tempRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'grep-tool-root-'));
|
||||
mockConfig = createTestConfig(tempRootDir);
|
||||
grepTool = new RipGrepTool(mockConfig);
|
||||
|
||||
// Create some test files and directories
|
||||
@@ -293,6 +308,42 @@ describe('RipGrepTool', () => {
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should filter out matches ignored by .qwenignore', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, '.qwenignore'),
|
||||
'logs/\n',
|
||||
'utf8',
|
||||
);
|
||||
await fs.mkdir(path.join(tempRootDir, 'logs'), { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, 'logs', 'ignored.txt'),
|
||||
'Got it. Thanks for the context!',
|
||||
'utf8',
|
||||
);
|
||||
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
outputData: `logs/ignored.txt:1:Got it. Thanks for the context!${EOL}`,
|
||||
exitCode: 0,
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'Got it\\. Thanks for the context!',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
expect(result.returnDisplay).toBe('No matches found');
|
||||
expect(result.llmContent).toContain(
|
||||
'No matches found for pattern "Got it\\. Thanks for the context!" in the workspace directory',
|
||||
);
|
||||
|
||||
const spawnArgs = mockSpawn.mock.calls[0]?.[1] ?? [];
|
||||
expect(spawnArgs).toContain('--ignore-file');
|
||||
expect(spawnArgs).toContain(path.join(tempRootDir, '.qwenignore'));
|
||||
});
|
||||
|
||||
it('should return "No matches found" when pattern does not exist', async () => {
|
||||
// Setup specific mock for no matches
|
||||
mockSpawn.mockImplementationOnce(
|
||||
@@ -452,12 +503,7 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, [secondDir]),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
const multiDirConfig = createTestConfig(tempRootDir, [secondDir]);
|
||||
|
||||
// Setup specific mock for this test - multi-directory search for 'world'
|
||||
// Mock will be called twice - once for each directory
|
||||
@@ -557,12 +603,7 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, [secondDir]),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
const multiDirConfig = createTestConfig(tempRootDir, [secondDir]);
|
||||
|
||||
// Setup specific mock for this test - searching in 'sub' should only return matches from that directory
|
||||
mockSpawn.mockImplementationOnce(() => {
|
||||
@@ -1187,12 +1228,7 @@ describe('RipGrepTool', () => {
|
||||
|
||||
it('should indicate searching across all workspace directories when no path specified', () => {
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, ['/another/dir']),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
const multiDirConfig = createTestConfig(tempRootDir, ['/another/dir']);
|
||||
|
||||
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
|
||||
const params: RipGrepToolParams = { pattern: 'testPattern' };
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { EOL } from 'node:os';
|
||||
import { EOL, tmpdir } from 'node:os';
|
||||
import { spawn } from 'node:child_process';
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
@@ -58,6 +58,8 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
RipGrepToolParams,
|
||||
ToolResult
|
||||
> {
|
||||
private readonly tempIgnoreFiles: string[] = [];
|
||||
|
||||
constructor(
|
||||
private readonly config: Config,
|
||||
params: RipGrepToolParams,
|
||||
@@ -109,9 +111,28 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
async execute(signal: AbortSignal): Promise<ToolResult> {
|
||||
try {
|
||||
const workspaceContext = this.config.getWorkspaceContext();
|
||||
const fileService = this.config.getFileService();
|
||||
const fileFilteringOptions = this.config.getFileFilteringOptions();
|
||||
const qwenIgnoreFiles =
|
||||
fileFilteringOptions.respectGeminiIgnore === false
|
||||
? []
|
||||
: this.getQwenIgnoreFilePaths();
|
||||
const searchDirAbs = this.resolveAndValidatePath(this.params.path);
|
||||
const searchDirDisplay = this.params.path || '.';
|
||||
|
||||
// if (this.config.getDebugMode()) {
|
||||
console.log(
|
||||
`[GrepTool] Using qwenignore files: ${
|
||||
qwenIgnoreFiles.length > 0
|
||||
? qwenIgnoreFiles.join(', ')
|
||||
: 'none (qwenignore disabled or file missing)'
|
||||
}`,
|
||||
);
|
||||
console.log(
|
||||
`[GrepTool] File filtering: respectGitIgnore=${fileFilteringOptions.respectGitIgnore ?? true}, respectQwenIgnore=${fileFilteringOptions.respectGeminiIgnore ?? true}`,
|
||||
);
|
||||
// }
|
||||
|
||||
// Determine which directories to search
|
||||
let searchDirectories: readonly string[];
|
||||
if (searchDirAbs === null) {
|
||||
@@ -125,26 +146,37 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
let allMatches: GrepMatch[] = [];
|
||||
const totalMaxMatches = DEFAULT_TOTAL_MAX_MATCHES;
|
||||
|
||||
if (this.config.getDebugMode()) {
|
||||
console.log(`[GrepTool] Total result limit: ${totalMaxMatches}`);
|
||||
}
|
||||
|
||||
for (const searchDir of searchDirectories) {
|
||||
const searchResult = await this.performRipgrepSearch({
|
||||
pattern: this.params.pattern,
|
||||
path: searchDir,
|
||||
include: this.params.include,
|
||||
signal,
|
||||
ignoreFiles: qwenIgnoreFiles,
|
||||
});
|
||||
|
||||
let filteredMatches = searchResult;
|
||||
if (
|
||||
fileFilteringOptions.respectGitIgnore ||
|
||||
fileFilteringOptions.respectGeminiIgnore
|
||||
) {
|
||||
filteredMatches = searchResult.filter((match) => {
|
||||
const absoluteMatchPath = path.resolve(searchDir, match.filePath);
|
||||
return !fileService.shouldIgnoreFile(
|
||||
absoluteMatchPath,
|
||||
fileFilteringOptions,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
if (searchDirectories.length > 1) {
|
||||
const dirName = path.basename(searchDir);
|
||||
searchResult.forEach((match) => {
|
||||
filteredMatches.forEach((match) => {
|
||||
match.filePath = path.join(dirName, match.filePath);
|
||||
});
|
||||
}
|
||||
|
||||
allMatches = allMatches.concat(searchResult);
|
||||
allMatches = allMatches.concat(filteredMatches);
|
||||
|
||||
if (allMatches.length >= totalMaxMatches) {
|
||||
allMatches = allMatches.slice(0, totalMaxMatches);
|
||||
@@ -219,6 +251,8 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
llmContent: `Error during grep search operation: ${errorMessage}`,
|
||||
returnDisplay: `Error: ${errorMessage}`,
|
||||
};
|
||||
} finally {
|
||||
this.cleanupTemporaryIgnoreFiles();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,8 +299,9 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
path: string;
|
||||
include?: string;
|
||||
signal: AbortSignal;
|
||||
ignoreFiles?: string[];
|
||||
}): Promise<GrepMatch[]> {
|
||||
const { pattern, path: absolutePath, include } = options;
|
||||
const { pattern, path: absolutePath, include, ignoreFiles } = options;
|
||||
|
||||
const rgArgs = [
|
||||
'--line-number',
|
||||
@@ -281,6 +316,12 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
rgArgs.push('--glob', include);
|
||||
}
|
||||
|
||||
if (ignoreFiles && ignoreFiles.length > 0) {
|
||||
for (const ignoreFile of ignoreFiles) {
|
||||
rgArgs.push('--ignore-file', ignoreFile);
|
||||
}
|
||||
}
|
||||
|
||||
const excludes = [
|
||||
'.git',
|
||||
'node_modules',
|
||||
@@ -389,6 +430,43 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
}
|
||||
return description;
|
||||
}
|
||||
|
||||
private getQwenIgnoreFilePaths(): string[] {
|
||||
const patterns = this.config.getFileService().getGeminiIgnorePatterns();
|
||||
if (patterns.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const tempFilePath = path.join(
|
||||
tmpdir(),
|
||||
`qwen-ignore-${process.pid}-${Date.now()}-${Math.random()
|
||||
.toString(16)
|
||||
.slice(2)}.rgignore`,
|
||||
);
|
||||
|
||||
try {
|
||||
const fileContents = `${patterns.join(EOL)}${EOL}`;
|
||||
fs.writeFileSync(tempFilePath, fileContents, 'utf8');
|
||||
this.tempIgnoreFiles.push(tempFilePath);
|
||||
return [tempFilePath];
|
||||
} catch (error: unknown) {
|
||||
console.warn(
|
||||
`Failed to create temporary .qwenignore for ripgrep: ${getErrorMessage(error)}`,
|
||||
);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
private cleanupTemporaryIgnoreFiles(): void {
|
||||
for (const filePath of this.tempIgnoreFiles) {
|
||||
try {
|
||||
fs.unlinkSync(filePath);
|
||||
} catch {
|
||||
// ignore cleanup errors
|
||||
}
|
||||
}
|
||||
this.tempIgnoreFiles.length = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -52,8 +52,38 @@ export class GitIgnoreParser implements GitIgnoreFilter {
|
||||
}
|
||||
|
||||
private addPatterns(patterns: string[]) {
|
||||
this.ig.add(patterns);
|
||||
this.patterns.push(...patterns);
|
||||
const normalizedPatterns = patterns.map((pattern) => {
|
||||
if (!pattern) {
|
||||
return pattern;
|
||||
}
|
||||
|
||||
if (path.isAbsolute(pattern)) {
|
||||
const relativePattern = path.relative(this.projectRoot, pattern);
|
||||
|
||||
if (relativePattern === '' || relativePattern === '.') {
|
||||
return '/';
|
||||
}
|
||||
|
||||
if (!relativePattern.startsWith('..')) {
|
||||
let normalized = relativePattern.replace(/\\/g, '/');
|
||||
|
||||
if (pattern.endsWith('/') && !normalized.endsWith('/')) {
|
||||
normalized += '/';
|
||||
}
|
||||
|
||||
if (!normalized.startsWith('/')) {
|
||||
normalized = `/${normalized}`;
|
||||
}
|
||||
|
||||
return normalized;
|
||||
}
|
||||
}
|
||||
|
||||
return pattern;
|
||||
});
|
||||
|
||||
this.ig.add(normalizedPatterns);
|
||||
this.patterns.push(...normalizedPatterns);
|
||||
}
|
||||
|
||||
isIgnored(filePath: string): boolean {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.0.14-nightly.1",
|
||||
"version": "0.0.14",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
Reference in New Issue
Block a user