mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
Merge branch 'main' into web-search
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -14,14 +14,16 @@
|
||||
"format": "prettier --write .",
|
||||
"test": "vitest run",
|
||||
"test:ci": "vitest run",
|
||||
"typecheck": "tsc --noEmit"
|
||||
"typecheck": "tsc --noEmit",
|
||||
"postinstall": "node scripts/postinstall.js"
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
"dist",
|
||||
"vendor",
|
||||
"scripts/postinstall.js"
|
||||
],
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@joshua.litt/get-ripgrep": "^0.0.2",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
|
||||
|
||||
85
packages/core/scripts/postinstall.js
Normal file
85
packages/core/scripts/postinstall.js
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { execSync } from 'node:child_process';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
// Get the package root directory
|
||||
const packageRoot = path.join(__dirname, '..');
|
||||
const vendorDir = path.join(packageRoot, 'vendor', 'ripgrep');
|
||||
|
||||
/**
|
||||
* Remove quarantine attribute and set executable permissions on macOS/Linux
|
||||
*/
|
||||
function setupRipgrepBinaries() {
|
||||
if (!fs.existsSync(vendorDir)) {
|
||||
console.log('Vendor directory not found, skipping ripgrep setup');
|
||||
return;
|
||||
}
|
||||
|
||||
const platform = process.platform;
|
||||
const arch = process.arch;
|
||||
|
||||
// Determine the binary directory based on platform and architecture
|
||||
let binaryDir;
|
||||
if (platform === 'darwin' || platform === 'linux') {
|
||||
const archStr = arch === 'x64' || arch === 'arm64' ? arch : null;
|
||||
if (archStr) {
|
||||
binaryDir = path.join(vendorDir, `${archStr}-${platform}`);
|
||||
}
|
||||
} else if (platform === 'win32') {
|
||||
// Windows doesn't need these fixes
|
||||
return;
|
||||
}
|
||||
|
||||
if (!binaryDir || !fs.existsSync(binaryDir)) {
|
||||
console.log(
|
||||
`Binary directory not found for ${platform}-${arch}, skipping ripgrep setup`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rgBinary = path.join(binaryDir, 'rg');
|
||||
|
||||
if (!fs.existsSync(rgBinary)) {
|
||||
console.log(`Ripgrep binary not found at ${rgBinary}`);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Set executable permissions
|
||||
fs.chmodSync(rgBinary, 0o755);
|
||||
console.log(`✓ Set executable permissions on ${rgBinary}`);
|
||||
|
||||
// On macOS, remove quarantine attribute
|
||||
if (platform === 'darwin') {
|
||||
try {
|
||||
execSync(`xattr -d com.apple.quarantine "${rgBinary}"`, {
|
||||
stdio: 'pipe',
|
||||
});
|
||||
console.log(`✓ Removed quarantine attribute from ${rgBinary}`);
|
||||
} catch (error) {
|
||||
// Quarantine attribute might not exist, which is fine
|
||||
if (error.message && !error.message.includes('No such xattr')) {
|
||||
console.warn(
|
||||
`Warning: Could not remove quarantine attribute: ${error.message}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error setting up ripgrep binary: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
setupRipgrepBinaries();
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
QwenLogger,
|
||||
} from '../telemetry/index.js';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
@@ -38,7 +39,8 @@ vi.mock('fs', async (importOriginal) => {
|
||||
import { ShellTool } from '../tools/shell.js';
|
||||
import { ReadFileTool } from '../tools/read-file.js';
|
||||
import { GrepTool } from '../tools/grep.js';
|
||||
import { RipGrepTool, canUseRipgrep } from '../tools/ripGrep.js';
|
||||
import { canUseRipgrep } from '../utils/ripgrepUtils.js';
|
||||
import { RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { logRipgrepFallback } from '../telemetry/loggers.js';
|
||||
import { RipgrepFallbackEvent } from '../telemetry/types.js';
|
||||
import { ToolRegistry } from '../tools/tool-registry.js';
|
||||
@@ -75,9 +77,11 @@ vi.mock('../tools/ls');
|
||||
vi.mock('../tools/read-file');
|
||||
vi.mock('../tools/grep.js');
|
||||
vi.mock('../tools/ripGrep.js', () => ({
|
||||
canUseRipgrep: vi.fn(),
|
||||
RipGrepTool: class MockRipGrepTool {},
|
||||
}));
|
||||
vi.mock('../utils/ripgrepUtils.js', () => ({
|
||||
canUseRipgrep: vi.fn(),
|
||||
}));
|
||||
vi.mock('../tools/glob');
|
||||
vi.mock('../tools/edit');
|
||||
vi.mock('../tools/shell');
|
||||
@@ -247,6 +251,7 @@ describe('Server Config (config.ts)', () => {
|
||||
authType,
|
||||
{
|
||||
model: MODEL,
|
||||
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
|
||||
},
|
||||
);
|
||||
// Verify that contentGeneratorConfig is updated
|
||||
|
||||
@@ -49,7 +49,8 @@ import { LSTool } from '../tools/ls.js';
|
||||
import { MemoryTool, setGeminiMdFilename } from '../tools/memoryTool.js';
|
||||
import { ReadFileTool } from '../tools/read-file.js';
|
||||
import { ReadManyFilesTool } from '../tools/read-many-files.js';
|
||||
import { canUseRipgrep, RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { canUseRipgrep } from '../utils/ripgrepUtils.js';
|
||||
import { RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { ShellTool } from '../tools/shell.js';
|
||||
import { SmartEditTool } from '../tools/smart-edit.js';
|
||||
import { TaskTool } from '../tools/task.js';
|
||||
@@ -87,8 +88,9 @@ import {
|
||||
DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
|
||||
} from './constants.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL } from './models.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
|
||||
import { Storage } from './storage.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
|
||||
// Re-export types
|
||||
export type { AnyToolInvocation, FileFilteringOptions, MCPOAuthConfig };
|
||||
@@ -242,7 +244,7 @@ export interface ConfigParameters {
|
||||
fileDiscoveryService?: FileDiscoveryService;
|
||||
includeDirectories?: string[];
|
||||
bugCommand?: BugCommandSettings;
|
||||
model: string;
|
||||
model?: string;
|
||||
extensionContextFilePaths?: string[];
|
||||
maxSessionTurns?: number;
|
||||
sessionTokenLimit?: number;
|
||||
@@ -295,7 +297,7 @@ export class Config {
|
||||
private fileSystemService: FileSystemService;
|
||||
private contentGeneratorConfig!: ContentGeneratorConfig;
|
||||
private contentGenerator!: ContentGenerator;
|
||||
private readonly _generationConfig: ContentGeneratorConfig;
|
||||
private _generationConfig: Partial<ContentGeneratorConfig>;
|
||||
private readonly embeddingModel: string;
|
||||
private readonly sandbox: SandboxConfig | undefined;
|
||||
private readonly targetDir: string;
|
||||
@@ -453,8 +455,10 @@ export class Config {
|
||||
this._generationConfig = {
|
||||
model: params.model,
|
||||
...(params.generationConfig || {}),
|
||||
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
|
||||
};
|
||||
this.contentGeneratorConfig = this._generationConfig;
|
||||
this.contentGeneratorConfig = this
|
||||
._generationConfig as ContentGeneratorConfig;
|
||||
this.cliVersion = params.cliVersion;
|
||||
|
||||
this.loadMemoryFromIncludeDirectories =
|
||||
@@ -533,6 +537,26 @@ export class Config {
|
||||
return this.contentGenerator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the credentials in the generation config.
|
||||
* This is needed when credentials are set after Config construction.
|
||||
*/
|
||||
updateCredentials(credentials: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
}): void {
|
||||
if (credentials.apiKey) {
|
||||
this._generationConfig.apiKey = credentials.apiKey;
|
||||
}
|
||||
if (credentials.baseUrl) {
|
||||
this._generationConfig.baseUrl = credentials.baseUrl;
|
||||
}
|
||||
if (credentials.model) {
|
||||
this._generationConfig.model = credentials.model;
|
||||
}
|
||||
}
|
||||
|
||||
async refreshAuth(authMethod: AuthType) {
|
||||
// Vertex and Genai have incompatible encryption and sending history with
|
||||
// throughtSignature from Genai to Vertex will fail, we need to strip them
|
||||
@@ -600,7 +624,7 @@ export class Config {
|
||||
}
|
||||
|
||||
getModel(): string {
|
||||
return this.contentGeneratorConfig.model;
|
||||
return this.contentGeneratorConfig?.model || DEFAULT_QWEN_MODEL;
|
||||
}
|
||||
|
||||
async setModel(
|
||||
|
||||
@@ -4,13 +4,9 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import {
|
||||
createContentGenerator,
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
} from './contentGenerator.js';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
@@ -110,83 +106,3 @@ describe('createContentGenerator', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createContentGeneratorConfig', () => {
|
||||
const mockConfig = {
|
||||
getModel: vi.fn().mockReturnValue('gemini-pro'),
|
||||
setModel: vi.fn(),
|
||||
flashFallbackHandler: vi.fn(),
|
||||
getProxy: vi.fn(),
|
||||
getEnableOpenAILogging: vi.fn().mockReturnValue(false),
|
||||
getSamplingParams: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorTimeout: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorMaxRetries: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorDisableCacheControl: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorSamplingParams: vi.fn().mockReturnValue(undefined),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset modules to re-evaluate imports and environment variables
|
||||
vi.resetModules();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should configure for Gemini using GEMINI_API_KEY when set', async () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', 'env-gemini-key');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_GEMINI,
|
||||
);
|
||||
expect(config.apiKey).toBe('env-gemini-key');
|
||||
expect(config.vertexai).toBe(false);
|
||||
});
|
||||
|
||||
it('should not configure for Gemini if GEMINI_API_KEY is empty', async () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', '');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_GEMINI,
|
||||
);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
expect(config.vertexai).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should configure for Vertex AI using GOOGLE_API_KEY when set', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', 'env-google-key');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.apiKey).toBe('env-google-key');
|
||||
expect(config.vertexai).toBe(true);
|
||||
});
|
||||
|
||||
it('should configure for Vertex AI using GCP project and location when set', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'env-gcp-project');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', 'env-gcp-location');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.vertexai).toBe(true);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not configure for Vertex AI if required env vars are empty', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', '');
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', '');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
expect(config.vertexai).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -14,8 +14,8 @@ import type {
|
||||
} from '@google/genai';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
|
||||
import type { UserTierId } from '../code_assist/types.js';
|
||||
import { InstallationManager } from '../utils/installationManager.js';
|
||||
@@ -82,53 +82,37 @@ export function createContentGeneratorConfig(
|
||||
authType: AuthType | undefined,
|
||||
generationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): ContentGeneratorConfig {
|
||||
const geminiApiKey = process.env['GEMINI_API_KEY'] || undefined;
|
||||
const googleApiKey = process.env['GOOGLE_API_KEY'] || undefined;
|
||||
const googleCloudProject = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
|
||||
const googleCloudLocation = process.env['GOOGLE_CLOUD_LOCATION'] || undefined;
|
||||
|
||||
const newContentGeneratorConfig: ContentGeneratorConfig = {
|
||||
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
...(generationConfig || {}),
|
||||
model: generationConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
authType,
|
||||
proxy: config?.getProxy(),
|
||||
};
|
||||
|
||||
// If we are using Google auth or we are in Cloud Shell, there is nothing else to validate for now
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_GEMINI && geminiApiKey) {
|
||||
newContentGeneratorConfig.apiKey = geminiApiKey;
|
||||
newContentGeneratorConfig.vertexai = false;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (
|
||||
authType === AuthType.USE_VERTEX_AI &&
|
||||
(googleApiKey || (googleCloudProject && googleCloudLocation))
|
||||
) {
|
||||
newContentGeneratorConfig.apiKey = googleApiKey;
|
||||
newContentGeneratorConfig.vertexai = true;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.QWEN_OAUTH) {
|
||||
// For Qwen OAuth, we'll handle the API key dynamically in createContentGenerator
|
||||
// Set a special marker to indicate this is Qwen OAuth
|
||||
newContentGeneratorConfig.apiKey = 'QWEN_OAUTH_DYNAMIC_TOKEN';
|
||||
newContentGeneratorConfig.model = DEFAULT_QWEN_MODEL;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: DEFAULT_QWEN_MODEL,
|
||||
apiKey: 'QWEN_OAUTH_DYNAMIC_TOKEN',
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
if (authType === AuthType.USE_OPENAI) {
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || 'qwen3-coder-plus',
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
export async function createContentGenerator(
|
||||
|
||||
@@ -1,2 +1,8 @@
|
||||
export const DEFAULT_TIMEOUT = 120000;
|
||||
export const DEFAULT_MAX_RETRIES = 3;
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1';
|
||||
export const DEFAULT_DASHSCOPE_BASE_URL =
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
||||
export const DEFAULT_DEEPSEEK_BASE_URL = 'https://api.deepseek.com/v1';
|
||||
export const DEFAULT_OPEN_ROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { OpenAIContentConverter } from './converter.js';
|
||||
import type { StreamingToolCallParser } from './streamingToolCallParser.js';
|
||||
import type { GenerateContentParameters, Content } from '@google/genai';
|
||||
|
||||
describe('OpenAIContentConverter', () => {
|
||||
let converter: OpenAIContentConverter;
|
||||
@@ -68,4 +69,77 @@ describe('OpenAIContentConverter', () => {
|
||||
expect(parser.getBuffer(0)).toBe('');
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertGeminiRequestToOpenAI', () => {
|
||||
const createRequestWithFunctionResponse = (
|
||||
response: Record<string, unknown>,
|
||||
): GenerateContentParameters => {
|
||||
const contents: Content[] = [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: {
|
||||
id: 'call_1',
|
||||
name: 'shell',
|
||||
args: {},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call_1',
|
||||
name: 'shell',
|
||||
response,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
return {
|
||||
model: 'models/test',
|
||||
contents,
|
||||
};
|
||||
};
|
||||
|
||||
it('should extract raw output from function response objects', () => {
|
||||
const request = createRequestWithFunctionResponse({
|
||||
output: 'Raw output text',
|
||||
});
|
||||
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request);
|
||||
const toolMessage = messages.find((message) => message.role === 'tool');
|
||||
|
||||
expect(toolMessage).toBeDefined();
|
||||
expect(toolMessage?.content).toBe('Raw output text');
|
||||
});
|
||||
|
||||
it('should prioritize error field when present', () => {
|
||||
const request = createRequestWithFunctionResponse({
|
||||
error: 'Command failed',
|
||||
});
|
||||
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request);
|
||||
const toolMessage = messages.find((message) => message.role === 'tool');
|
||||
|
||||
expect(toolMessage).toBeDefined();
|
||||
expect(toolMessage?.content).toBe('Command failed');
|
||||
});
|
||||
|
||||
it('should stringify non-string responses', () => {
|
||||
const request = createRequestWithFunctionResponse({
|
||||
data: { value: 42 },
|
||||
});
|
||||
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request);
|
||||
const toolMessage = messages.find((message) => message.role === 'tool');
|
||||
|
||||
expect(toolMessage).toBeDefined();
|
||||
expect(toolMessage?.content).toBe('{"data":{"value":42}}');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -276,10 +276,7 @@ export class OpenAIContentConverter {
|
||||
messages.push({
|
||||
role: 'tool' as const,
|
||||
tool_call_id: funcResponse.id || '',
|
||||
content:
|
||||
typeof funcResponse.response === 'string'
|
||||
? funcResponse.response
|
||||
: JSON.stringify(funcResponse.response),
|
||||
content: this.extractFunctionResponseContent(funcResponse.response),
|
||||
});
|
||||
}
|
||||
return;
|
||||
@@ -359,6 +356,36 @@ export class OpenAIContentConverter {
|
||||
return { textParts, functionCalls, functionResponses, mediaParts };
|
||||
}
|
||||
|
||||
private extractFunctionResponseContent(response: unknown): string {
|
||||
if (response === null || response === undefined) {
|
||||
return '';
|
||||
}
|
||||
|
||||
if (typeof response === 'string') {
|
||||
return response;
|
||||
}
|
||||
|
||||
if (typeof response === 'object') {
|
||||
const responseObject = response as Record<string, unknown>;
|
||||
const output = responseObject['output'];
|
||||
if (typeof output === 'string') {
|
||||
return output;
|
||||
}
|
||||
|
||||
const error = responseObject['error'];
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const serialized = JSON.stringify(response);
|
||||
return serialized ?? String(response);
|
||||
} catch {
|
||||
return String(response);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine media type from MIME type
|
||||
*/
|
||||
|
||||
@@ -2,7 +2,11 @@ import OpenAI from 'openai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { AuthType } from '../../contentGenerator.js';
|
||||
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
|
||||
import {
|
||||
DEFAULT_TIMEOUT,
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_DASHSCOPE_BASE_URL,
|
||||
} from '../constants.js';
|
||||
import { tokenLimit } from '../../tokenLimits.js';
|
||||
import type {
|
||||
OpenAICompatibleProvider,
|
||||
@@ -53,7 +57,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
buildClient(): OpenAI {
|
||||
const {
|
||||
apiKey,
|
||||
baseUrl,
|
||||
baseUrl = DEFAULT_DASHSCOPE_BASE_URL,
|
||||
timeout = DEFAULT_TIMEOUT,
|
||||
maxRetries = DEFAULT_MAX_RETRIES,
|
||||
} = this.contentGeneratorConfig;
|
||||
|
||||
@@ -8,7 +8,7 @@ import { OpenAIContentGenerator } from '../core/openaiContentGenerator/index.js'
|
||||
import { DashScopeOpenAICompatibleProvider } from '../core/openaiContentGenerator/provider/dashscope.js';
|
||||
import type { IQwenOAuth2Client } from './qwenOAuth2.js';
|
||||
import { SharedTokenManager } from './sharedTokenManager.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { type Config } from '../config/config.js';
|
||||
import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
@@ -18,10 +18,7 @@ import type {
|
||||
EmbedContentResponse,
|
||||
} from '@google/genai';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
|
||||
// Default fallback base URL if no endpoint is provided
|
||||
const DEFAULT_QWEN_BASE_URL =
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
|
||||
/**
|
||||
* Qwen Content Generator that uses Qwen OAuth tokens with automatic refresh
|
||||
@@ -58,7 +55,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
* Get the current endpoint URL with proper protocol and /v1 suffix
|
||||
*/
|
||||
private getCurrentEndpoint(resourceUrl?: string): string {
|
||||
const baseEndpoint = resourceUrl || DEFAULT_QWEN_BASE_URL;
|
||||
const baseEndpoint = resourceUrl || DEFAULT_DASHSCOPE_BASE_URL;
|
||||
const suffix = '/v1';
|
||||
|
||||
// Normalize the URL: add protocol if missing, ensure /v1 suffix
|
||||
|
||||
@@ -131,16 +131,14 @@ describe('ExitPlanModeTool', () => {
|
||||
}
|
||||
|
||||
const result = await invocation.execute(signal);
|
||||
const expectedLlmMessage =
|
||||
'User has approved your plan. You can now start coding. Start with updating your todo list if applicable.';
|
||||
|
||||
expect(result).toEqual({
|
||||
llmContent: expectedLlmMessage,
|
||||
returnDisplay: {
|
||||
type: 'plan_summary',
|
||||
message: 'User approved the plan.',
|
||||
plan: params.plan,
|
||||
},
|
||||
expect(result.llmContent).toContain(
|
||||
'User has approved your plan. You can now start coding',
|
||||
);
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'plan_summary',
|
||||
message: 'User approved the plan.',
|
||||
plan: params.plan,
|
||||
});
|
||||
|
||||
expect(mockConfig.setApprovalMode).toHaveBeenCalledWith(
|
||||
@@ -188,15 +186,12 @@ describe('ExitPlanModeTool', () => {
|
||||
|
||||
const result = await invocation.execute(signal);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
plan: params.plan,
|
||||
error: 'Plan execution was not approved. Remaining in plan mode.',
|
||||
}),
|
||||
returnDisplay:
|
||||
'Plan execution was not approved. Remaining in plan mode.',
|
||||
});
|
||||
expect(result.llmContent).toBe(
|
||||
'Plan execution was not approved. Remaining in plan mode.',
|
||||
);
|
||||
expect(result.returnDisplay).toBe(
|
||||
'Plan execution was not approved. Remaining in plan mode.',
|
||||
);
|
||||
|
||||
expect(mockConfig.setApprovalMode).toHaveBeenCalledWith(
|
||||
ApprovalMode.PLAN,
|
||||
@@ -215,50 +210,6 @@ describe('ExitPlanModeTool', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle execution errors gracefully', async () => {
|
||||
const params: ExitPlanModeParams = {
|
||||
plan: 'Test plan',
|
||||
};
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const confirmation = await invocation.shouldConfirmExecute(
|
||||
new AbortController().signal,
|
||||
);
|
||||
if (confirmation) {
|
||||
// Don't approve the plan so we go through the rejection path
|
||||
await confirmation.onConfirm(ToolConfirmationOutcome.Cancel);
|
||||
}
|
||||
|
||||
// Create a spy to simulate an error during the execution
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
// Mock JSON.stringify to throw an error in the rejection path
|
||||
const originalStringify = JSON.stringify;
|
||||
vi.spyOn(JSON, 'stringify').mockImplementationOnce(() => {
|
||||
throw new Error('JSON stringify error');
|
||||
});
|
||||
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: 'Failed to present plan. Detail: JSON stringify error',
|
||||
}),
|
||||
returnDisplay: 'Error presenting plan: JSON stringify error',
|
||||
});
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'[ExitPlanModeTool] Error executing exit_plan_mode: JSON stringify error',
|
||||
);
|
||||
|
||||
// Restore original JSON.stringify
|
||||
JSON.stringify = originalStringify;
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should return empty tool locations', () => {
|
||||
const params: ExitPlanModeParams = {
|
||||
plan: 'Test plan',
|
||||
|
||||
@@ -115,17 +115,12 @@ class ExitPlanModeToolInvocation extends BaseToolInvocation<
|
||||
const rejectionMessage =
|
||||
'Plan execution was not approved. Remaining in plan mode.';
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
plan,
|
||||
error: rejectionMessage,
|
||||
}),
|
||||
llmContent: rejectionMessage,
|
||||
returnDisplay: rejectionMessage,
|
||||
};
|
||||
}
|
||||
|
||||
const llmMessage =
|
||||
'User has approved your plan. You can now start coding. Start with updating your todo list if applicable.';
|
||||
const llmMessage = `User has approved your plan. You can now start coding. Start with updating your todo list if applicable.`;
|
||||
const displayMessage = 'User approved the plan.';
|
||||
|
||||
return {
|
||||
@@ -142,11 +137,11 @@ class ExitPlanModeToolInvocation extends BaseToolInvocation<
|
||||
console.error(
|
||||
`[ExitPlanModeTool] Error executing exit_plan_mode: ${errorMessage}`,
|
||||
);
|
||||
|
||||
const errorLlmContent = `Failed to present plan: ${errorMessage}`;
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to present plan. Detail: ${errorMessage}`,
|
||||
}),
|
||||
llmContent: errorLlmContent,
|
||||
returnDisplay: `Error presenting plan: ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -241,9 +241,7 @@ describe('MemoryTool', () => {
|
||||
expectedFsArgument,
|
||||
);
|
||||
const successMessage = `Okay, I've remembered that in global memory: "${params.fact}"`;
|
||||
expect(result.llmContent).toBe(
|
||||
JSON.stringify({ success: true, message: successMessage }),
|
||||
);
|
||||
expect(result.llmContent).toBe(successMessage);
|
||||
expect(result.returnDisplay).toBe(successMessage);
|
||||
});
|
||||
|
||||
@@ -271,9 +269,7 @@ describe('MemoryTool', () => {
|
||||
expectedFsArgument,
|
||||
);
|
||||
const successMessage = `Okay, I've remembered that in project memory: "${params.fact}"`;
|
||||
expect(result.llmContent).toBe(
|
||||
JSON.stringify({ success: true, message: successMessage }),
|
||||
);
|
||||
expect(result.llmContent).toBe(successMessage);
|
||||
expect(result.returnDisplay).toBe(successMessage);
|
||||
});
|
||||
|
||||
@@ -298,10 +294,7 @@ describe('MemoryTool', () => {
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toBe(
|
||||
JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to save memory. Detail: ${underlyingError.message}`,
|
||||
}),
|
||||
`Error saving memory: ${underlyingError.message}`,
|
||||
);
|
||||
expect(result.returnDisplay).toBe(
|
||||
`Error saving memory: ${underlyingError.message}`,
|
||||
@@ -319,6 +312,8 @@ describe('MemoryTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Please specify where to save this memory',
|
||||
);
|
||||
expect(result.llmContent).toContain('Global:');
|
||||
expect(result.llmContent).toContain('Project:');
|
||||
expect(result.returnDisplay).toContain('Global:');
|
||||
expect(result.returnDisplay).toContain('Project:');
|
||||
});
|
||||
|
||||
@@ -309,7 +309,7 @@ Preview of changes to be made to GLOBAL memory:
|
||||
if (!fact || typeof fact !== 'string' || fact.trim() === '') {
|
||||
const errorMessage = 'Parameter "fact" must be a non-empty string.';
|
||||
return {
|
||||
llmContent: JSON.stringify({ success: false, error: errorMessage }),
|
||||
llmContent: `Error: ${errorMessage}`,
|
||||
returnDisplay: `Error: ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
@@ -324,10 +324,7 @@ Global: ${globalPath} (shared across all projects)
|
||||
Project: ${projectPath} (current project only)`;
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: 'Please specify where to save this memory',
|
||||
}),
|
||||
llmContent: errorMessage,
|
||||
returnDisplay: errorMessage,
|
||||
};
|
||||
}
|
||||
@@ -344,10 +341,7 @@ Project: ${projectPath} (current project only)`;
|
||||
await fs.writeFile(memoryFilePath, modified_content, 'utf-8');
|
||||
const successMessage = `Okay, I've updated the ${scope} memory file with your modifications.`;
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: true,
|
||||
message: successMessage,
|
||||
}),
|
||||
llmContent: successMessage,
|
||||
returnDisplay: successMessage,
|
||||
};
|
||||
} else {
|
||||
@@ -359,10 +353,7 @@ Project: ${projectPath} (current project only)`;
|
||||
});
|
||||
const successMessage = `Okay, I've remembered that in ${scope} memory: "${fact}"`;
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: true,
|
||||
message: successMessage,
|
||||
}),
|
||||
llmContent: successMessage,
|
||||
returnDisplay: successMessage,
|
||||
};
|
||||
}
|
||||
@@ -372,11 +363,9 @@ Project: ${projectPath} (current project only)`;
|
||||
console.error(
|
||||
`[MemoryTool] Error executing save_memory for fact "${fact}" in ${scope}: ${errorMessage}`,
|
||||
);
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to save memory. Detail: ${errorMessage}`,
|
||||
}),
|
||||
llmContent: `Error saving memory: ${errorMessage}`,
|
||||
returnDisplay: `Error saving memory: ${errorMessage}`,
|
||||
error: {
|
||||
message: errorMessage,
|
||||
|
||||
@@ -14,7 +14,7 @@ import {
|
||||
type Mock,
|
||||
} from 'vitest';
|
||||
import type { RipGrepToolParams } from './ripGrep.js';
|
||||
import { canUseRipgrep, RipGrepTool, ensureRgPath } from './ripGrep.js';
|
||||
import { RipGrepTool } from './ripGrep.js';
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs/promises';
|
||||
import os, { EOL } from 'node:os';
|
||||
@@ -22,24 +22,11 @@ import type { Config } from '../config/config.js';
|
||||
import { createMockWorkspaceContext } from '../test-utils/mockWorkspaceContext.js';
|
||||
import type { ChildProcess } from 'node:child_process';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { downloadRipGrep } from '@joshua.litt/get-ripgrep';
|
||||
import { fileExists } from '../utils/fileUtils.js';
|
||||
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
|
||||
|
||||
// Mock dependencies for canUseRipgrep
|
||||
vi.mock('@joshua.litt/get-ripgrep', () => ({
|
||||
downloadRipGrep: vi.fn(),
|
||||
}));
|
||||
vi.mock('../utils/fileUtils.js', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('../utils/fileUtils.js')>();
|
||||
return {
|
||||
...actual,
|
||||
fileExists: vi.fn(),
|
||||
};
|
||||
});
|
||||
vi.mock('../config/storage.js', () => ({
|
||||
Storage: {
|
||||
getGlobalBinDir: vi.fn().mockReturnValue('/mock/bin/dir'),
|
||||
},
|
||||
// Mock ripgrepUtils
|
||||
vi.mock('../utils/ripgrepUtils.js', () => ({
|
||||
ensureRipgrepPath: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock child_process for ripgrep calls
|
||||
@@ -49,97 +36,6 @@ vi.mock('child_process', () => ({
|
||||
|
||||
const mockSpawn = vi.mocked(spawn);
|
||||
|
||||
describe('canUseRipgrep', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should return true if ripgrep already exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
const result = await canUseRipgrep();
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledWith(path.join('/mock/bin/dir', 'rg'));
|
||||
expect(downloadRipGrep).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should download ripgrep and return true if it does not exist initially', async () => {
|
||||
(fileExists as Mock)
|
||||
.mockResolvedValueOnce(false)
|
||||
.mockResolvedValueOnce(true);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
|
||||
it('should return false if download fails and file does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
|
||||
it('should propagate errors from downloadRipGrep', async () => {
|
||||
const error = new Error('Download failed');
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockRejectedValue(error);
|
||||
|
||||
await expect(canUseRipgrep()).rejects.toThrow(error);
|
||||
expect(fileExists).toHaveBeenCalledTimes(1);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
});
|
||||
|
||||
describe('ensureRgPath', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should return rg path if ripgrep already exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
const rgPath = await ensureRgPath();
|
||||
expect(rgPath).toBe(path.join('/mock/bin/dir', 'rg'));
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
expect(downloadRipGrep).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return rg path if ripgrep is downloaded successfully', async () => {
|
||||
(fileExists as Mock)
|
||||
.mockResolvedValueOnce(false)
|
||||
.mockResolvedValueOnce(true);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
const rgPath = await ensureRgPath();
|
||||
expect(rgPath).toBe(path.join('/mock/bin/dir', 'rg'));
|
||||
expect(downloadRipGrep).toHaveBeenCalledOnce();
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should throw an error if ripgrep cannot be used after download attempt', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
await expect(ensureRgPath()).rejects.toThrow('Cannot use ripgrep.');
|
||||
expect(downloadRipGrep).toHaveBeenCalledOnce();
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should propagate errors from downloadRipGrep', async () => {
|
||||
const error = new Error('Download failed');
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockRejectedValue(error);
|
||||
|
||||
await expect(ensureRgPath()).rejects.toThrow(error);
|
||||
expect(fileExists).toHaveBeenCalledTimes(1);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
});
|
||||
|
||||
// Helper function to create mock spawn implementations
|
||||
function createMockSpawn(
|
||||
options: {
|
||||
@@ -201,8 +97,7 @@ describe('RipGrepTool', () => {
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
(ensureRipgrepPath as Mock).mockResolvedValue('/mock/path/to/rg');
|
||||
mockSpawn.mockClear();
|
||||
tempRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'grep-tool-root-'));
|
||||
grepTool = new RipGrepTool(mockConfig);
|
||||
@@ -551,16 +446,18 @@ describe('RipGrepTool', () => {
|
||||
});
|
||||
|
||||
it('should throw an error if ripgrep is not available', async () => {
|
||||
// Make ensureRgPath throw
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
// Make ensureRipgrepBinary throw
|
||||
(ensureRipgrepPath as Mock).mockRejectedValue(
|
||||
new Error('Ripgrep binary not found'),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'world' };
|
||||
const invocation = grepTool.build(params);
|
||||
|
||||
expect(await invocation.execute(abortSignal)).toStrictEqual({
|
||||
llmContent: 'Error during grep search operation: Cannot use ripgrep.',
|
||||
returnDisplay: 'Error: Cannot use ripgrep.',
|
||||
llmContent:
|
||||
'Error during grep search operation: Ripgrep binary not found',
|
||||
returnDisplay: 'Error: Ripgrep binary not found',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -8,44 +8,16 @@ import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { EOL } from 'node:os';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { downloadRipGrep } from '@joshua.litt/get-ripgrep';
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { SchemaValidator } from '../utils/schemaValidator.js';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import { getErrorMessage, isNodeError } from '../utils/errors.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { fileExists } from '../utils/fileUtils.js';
|
||||
import { Storage } from '../config/storage.js';
|
||||
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
|
||||
|
||||
const DEFAULT_TOTAL_MAX_MATCHES = 20000;
|
||||
|
||||
function getRgPath(): string {
|
||||
return path.join(Storage.getGlobalBinDir(), 'rg');
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if `rg` exists, if not then attempt to download it.
|
||||
*/
|
||||
export async function canUseRipgrep(): Promise<boolean> {
|
||||
if (await fileExists(getRgPath())) {
|
||||
return true;
|
||||
}
|
||||
|
||||
await downloadRipGrep(Storage.getGlobalBinDir());
|
||||
return await fileExists(getRgPath());
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures `rg` is downloaded, or throws.
|
||||
*/
|
||||
export async function ensureRgPath(): Promise<string> {
|
||||
if (await canUseRipgrep()) {
|
||||
return getRgPath();
|
||||
}
|
||||
throw new Error('Cannot use ripgrep.');
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for the GrepTool
|
||||
*/
|
||||
@@ -320,7 +292,7 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
rgArgs.push(absolutePath);
|
||||
|
||||
try {
|
||||
const rgPath = await ensureRgPath();
|
||||
const rgPath = await ensureRipgrepPath();
|
||||
const output = await new Promise<string>((resolve, reject) => {
|
||||
const child = spawn(rgPath, rgArgs, {
|
||||
windowsHide: true,
|
||||
@@ -342,11 +314,7 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
|
||||
child.on('error', (err) => {
|
||||
options.signal.removeEventListener('abort', cleanup);
|
||||
reject(
|
||||
new Error(
|
||||
`Failed to start ripgrep: ${err.message}. Please ensure @lvce-editor/ripgrep is properly installed.`,
|
||||
),
|
||||
);
|
||||
reject(new Error(`Failed to start ripgrep: ${err.message}.`));
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
|
||||
@@ -141,7 +141,12 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('success');
|
||||
expect(result.llmContent).toContain(
|
||||
'Todos have been modified successfully',
|
||||
);
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Your todo list has changed');
|
||||
expect(result.llmContent).toContain(JSON.stringify(params.todos));
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'todo_list',
|
||||
todos: [
|
||||
@@ -178,7 +183,12 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('success');
|
||||
expect(result.llmContent).toContain(
|
||||
'Todos have been modified successfully',
|
||||
);
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Your todo list has changed');
|
||||
expect(result.llmContent).toContain(JSON.stringify(params.todos));
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'todo_list',
|
||||
todos: [
|
||||
@@ -208,7 +218,10 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('"success":false');
|
||||
expect(result.llmContent).toContain('Failed to modify todos');
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Todo list modification failed');
|
||||
expect(result.llmContent).toContain('Write failed');
|
||||
expect(result.returnDisplay).toContain('Error writing todos');
|
||||
});
|
||||
|
||||
@@ -223,7 +236,10 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('success');
|
||||
expect(result.llmContent).toContain('Todo list has been cleared');
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Your todo list is now empty');
|
||||
expect(result.llmContent).toContain('no pending tasks');
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'todo_list',
|
||||
todos: [],
|
||||
|
||||
@@ -340,11 +340,30 @@ class TodoWriteToolInvocation extends BaseToolInvocation<
|
||||
todos: finalTodos,
|
||||
};
|
||||
|
||||
// Create plain string format with system reminder
|
||||
const todosJson = JSON.stringify(finalTodos);
|
||||
let llmContent: string;
|
||||
|
||||
if (finalTodos.length === 0) {
|
||||
// Special message for empty todos
|
||||
llmContent = `Todo list has been cleared.
|
||||
|
||||
<system-reminder>
|
||||
Your todo list is now empty. DO NOT mention this explicitly to the user. You have no pending tasks in your todo list.
|
||||
</system-reminder>`;
|
||||
} else {
|
||||
// Normal message for todos with items
|
||||
llmContent = `Todos have been modified successfully. Ensure that you continue to use the todo list to track your progress. Please proceed with the current tasks if applicable
|
||||
|
||||
<system-reminder>
|
||||
Your todo list has changed. DO NOT mention this explicitly to the user. Here are the latest contents of your todo list:
|
||||
|
||||
${todosJson}. Continue on with the tasks at hand if applicable.
|
||||
</system-reminder>`;
|
||||
}
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: true,
|
||||
todos: finalTodos,
|
||||
}),
|
||||
llmContent,
|
||||
returnDisplay: todoResultDisplay,
|
||||
};
|
||||
} catch (error) {
|
||||
@@ -353,11 +372,16 @@ class TodoWriteToolInvocation extends BaseToolInvocation<
|
||||
console.error(
|
||||
`[TodoWriteTool] Error executing todo_write: ${errorMessage}`,
|
||||
);
|
||||
|
||||
// Create plain string format for error with system reminder
|
||||
const errorLlmContent = `Failed to modify todos. An error occurred during the operation.
|
||||
|
||||
<system-reminder>
|
||||
Todo list modification failed with error: ${errorMessage}. You may need to retry or handle this error appropriately.
|
||||
</system-reminder>`;
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to write todos. Detail: ${errorMessage}`,
|
||||
}),
|
||||
llmContent: errorLlmContent,
|
||||
returnDisplay: `Error writing todos: ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -339,6 +339,7 @@ describe('editor utils', () => {
|
||||
diffCommand.args,
|
||||
{
|
||||
stdio: 'inherit',
|
||||
shell: process.platform === 'win32',
|
||||
},
|
||||
);
|
||||
expect(mockSpawnOn).toHaveBeenCalledWith('close', expect.any(Function));
|
||||
|
||||
@@ -195,6 +195,7 @@ export async function openDiff(
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const childProcess = spawn(diffCommand.command, diffCommand.args, {
|
||||
stdio: 'inherit',
|
||||
shell: process.platform === 'win32',
|
||||
});
|
||||
|
||||
childProcess.on('close', (code) => {
|
||||
|
||||
258
packages/core/src/utils/ripgrepUtils.test.ts
Normal file
258
packages/core/src/utils/ripgrepUtils.test.ts
Normal file
@@ -0,0 +1,258 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
|
||||
import {
|
||||
canUseRipgrep,
|
||||
ensureRipgrepPath,
|
||||
getRipgrepPath,
|
||||
} from './ripgrepUtils.js';
|
||||
import { fileExists } from './fileUtils.js';
|
||||
import path from 'node:path';
|
||||
|
||||
// Mock fileUtils
|
||||
vi.mock('./fileUtils.js', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('./fileUtils.js')>();
|
||||
return {
|
||||
...actual,
|
||||
fileExists: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
describe('ripgrepUtils', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('getRipgrepPath', () => {
|
||||
it('should return path with .exe extension on Windows', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock Windows x64
|
||||
Object.defineProperty(process, 'platform', { value: 'win32' });
|
||||
Object.defineProperty(process, 'arch', { value: 'x64' });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
expect(rgPath).toContain('x64-win32');
|
||||
expect(rgPath).toContain('rg.exe');
|
||||
expect(rgPath).toContain(path.join('vendor', 'ripgrep'));
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should return path without .exe extension on macOS', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock macOS arm64
|
||||
Object.defineProperty(process, 'platform', { value: 'darwin' });
|
||||
Object.defineProperty(process, 'arch', { value: 'arm64' });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
expect(rgPath).toContain('arm64-darwin');
|
||||
expect(rgPath).toContain('rg');
|
||||
expect(rgPath).not.toContain('.exe');
|
||||
expect(rgPath).toContain(path.join('vendor', 'ripgrep'));
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should return path without .exe extension on Linux', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock Linux x64
|
||||
Object.defineProperty(process, 'platform', { value: 'linux' });
|
||||
Object.defineProperty(process, 'arch', { value: 'x64' });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
expect(rgPath).toContain('x64-linux');
|
||||
expect(rgPath).toContain('rg');
|
||||
expect(rgPath).not.toContain('.exe');
|
||||
expect(rgPath).toContain(path.join('vendor', 'ripgrep'));
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should throw error for unsupported platform', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock unsupported platform
|
||||
Object.defineProperty(process, 'platform', { value: 'freebsd' });
|
||||
Object.defineProperty(process, 'arch', { value: 'x64' });
|
||||
|
||||
expect(() => getRipgrepPath()).toThrow('Unsupported platform: freebsd');
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should throw error for unsupported architecture', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock unsupported architecture
|
||||
Object.defineProperty(process, 'platform', { value: 'darwin' });
|
||||
Object.defineProperty(process, 'arch', { value: 'ia32' });
|
||||
|
||||
expect(() => getRipgrepPath()).toThrow('Unsupported architecture: ia32');
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should handle all supported platform/arch combinations', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
const combinations: Array<{
|
||||
platform: string;
|
||||
arch: string;
|
||||
}> = [
|
||||
{ platform: 'darwin', arch: 'x64' },
|
||||
{ platform: 'darwin', arch: 'arm64' },
|
||||
{ platform: 'linux', arch: 'x64' },
|
||||
{ platform: 'linux', arch: 'arm64' },
|
||||
{ platform: 'win32', arch: 'x64' },
|
||||
];
|
||||
|
||||
combinations.forEach(({ platform, arch }) => {
|
||||
Object.defineProperty(process, 'platform', { value: platform });
|
||||
Object.defineProperty(process, 'arch', { value: arch });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
const binaryName = platform === 'win32' ? 'rg.exe' : 'rg';
|
||||
const expectedPathSegment = path.join(
|
||||
`${arch}-${platform}`,
|
||||
binaryName,
|
||||
);
|
||||
expect(rgPath).toContain(expectedPathSegment);
|
||||
});
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
});
|
||||
|
||||
describe('canUseRipgrep', () => {
|
||||
it('should return true if ripgrep binary exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should return false if ripgrep binary does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should return false if platform is unsupported', async () => {
|
||||
const originalPlatform = process.platform;
|
||||
|
||||
// Mock unsupported platform
|
||||
Object.defineProperty(process, 'platform', { value: 'aix' });
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).not.toHaveBeenCalled();
|
||||
|
||||
// Restore original value
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
});
|
||||
|
||||
it('should return false if architecture is unsupported', async () => {
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock unsupported architecture
|
||||
Object.defineProperty(process, 'arch', { value: 's390x' });
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).not.toHaveBeenCalled();
|
||||
|
||||
// Restore original value
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
});
|
||||
|
||||
describe('ensureRipgrepBinary', () => {
|
||||
it('should return ripgrep path if binary exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
|
||||
const rgPath = await ensureRipgrepPath();
|
||||
|
||||
expect(rgPath).toBeDefined();
|
||||
expect(rgPath).toContain('rg');
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
expect(fileExists).toHaveBeenCalledWith(rgPath);
|
||||
});
|
||||
|
||||
it('should throw error if binary does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(
|
||||
/Ripgrep binary not found/,
|
||||
);
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(/Platform:/);
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(/Architecture:/);
|
||||
|
||||
expect(fileExists).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should throw error with correct path information', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
|
||||
try {
|
||||
await ensureRipgrepPath();
|
||||
// Should not reach here
|
||||
expect(true).toBe(false);
|
||||
} catch (error) {
|
||||
expect(error).toBeInstanceOf(Error);
|
||||
const errorMessage = (error as Error).message;
|
||||
expect(errorMessage).toContain('Ripgrep binary not found at');
|
||||
expect(errorMessage).toContain(process.platform);
|
||||
expect(errorMessage).toContain(process.arch);
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw error if platform is unsupported', async () => {
|
||||
const originalPlatform = process.platform;
|
||||
|
||||
// Mock unsupported platform
|
||||
Object.defineProperty(process, 'platform', { value: 'openbsd' });
|
||||
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(
|
||||
'Unsupported platform: openbsd',
|
||||
);
|
||||
|
||||
// Restore original value
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
});
|
||||
});
|
||||
});
|
||||
114
packages/core/src/utils/ripgrepUtils.ts
Normal file
114
packages/core/src/utils/ripgrepUtils.ts
Normal file
@@ -0,0 +1,114 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { fileExists } from './fileUtils.js';
|
||||
|
||||
// Get the directory of the current module
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
type Platform = 'darwin' | 'linux' | 'win32';
|
||||
type Architecture = 'x64' | 'arm64';
|
||||
|
||||
/**
|
||||
* Maps process.platform values to vendor directory names
|
||||
*/
|
||||
function getPlatformString(platform: string): Platform {
|
||||
switch (platform) {
|
||||
case 'darwin':
|
||||
case 'linux':
|
||||
case 'win32':
|
||||
return platform;
|
||||
default:
|
||||
throw new Error(`Unsupported platform: ${platform}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps process.arch values to vendor directory names
|
||||
*/
|
||||
function getArchitectureString(arch: string): Architecture {
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
case 'arm64':
|
||||
return arch;
|
||||
default:
|
||||
throw new Error(`Unsupported architecture: ${arch}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the path to the bundled ripgrep binary for the current platform
|
||||
*/
|
||||
export function getRipgrepPath(): string {
|
||||
const platform = getPlatformString(process.platform);
|
||||
const arch = getArchitectureString(process.arch);
|
||||
|
||||
// Binary name includes .exe on Windows
|
||||
const binaryName = platform === 'win32' ? 'rg.exe' : 'rg';
|
||||
|
||||
// Path resolution:
|
||||
// When running from transpiled code: dist/src/utils/ripgrepUtils.js -> ../../../vendor/ripgrep/
|
||||
// When running from bundle: dist/index.js -> vendor/ripgrep/
|
||||
|
||||
// Detect if we're running from a bundle (single file)
|
||||
// In bundle, __filename will be something like /path/to/dist/index.js
|
||||
// In transpiled code, __filename will be /path/to/dist/src/utils/ripgrepUtils.js
|
||||
const isBundled = !__filename.includes(path.join('src', 'utils'));
|
||||
|
||||
const vendorPath = isBundled
|
||||
? path.join(
|
||||
__dirname,
|
||||
'vendor',
|
||||
'ripgrep',
|
||||
`${arch}-${platform}`,
|
||||
binaryName,
|
||||
)
|
||||
: path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'..',
|
||||
'..',
|
||||
'vendor',
|
||||
'ripgrep',
|
||||
`${arch}-${platform}`,
|
||||
binaryName,
|
||||
);
|
||||
|
||||
return vendorPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if ripgrep binary is available
|
||||
*/
|
||||
export async function canUseRipgrep(): Promise<boolean> {
|
||||
try {
|
||||
const rgPath = getRipgrepPath();
|
||||
return await fileExists(rgPath);
|
||||
} catch (_error) {
|
||||
// Unsupported platform/arch
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures ripgrep binary exists and returns its path
|
||||
* @throws Error if ripgrep binary is not available
|
||||
*/
|
||||
export async function ensureRipgrepPath(): Promise<string> {
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
if (!(await fileExists(rgPath))) {
|
||||
throw new Error(
|
||||
`Ripgrep binary not found at ${rgPath}. ` +
|
||||
`Platform: ${process.platform}, Architecture: ${process.arch}`,
|
||||
);
|
||||
}
|
||||
|
||||
return rgPath;
|
||||
}
|
||||
3
packages/core/vendor/ripgrep/COPYING
vendored
Normal file
3
packages/core/vendor/ripgrep/COPYING
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
This project is dual-licensed under the Unlicense and MIT licenses.
|
||||
|
||||
You may use this code under the terms of either license.
|
||||
BIN
packages/core/vendor/ripgrep/arm64-darwin/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/arm64-darwin/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/arm64-linux/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/arm64-linux/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/x64-darwin/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/x64-darwin/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/x64-linux/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/x64-linux/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/x64-win32/rg.exe
vendored
Normal file
BIN
packages/core/vendor/ripgrep/x64-win32/rg.exe
vendored
Normal file
Binary file not shown.
Reference in New Issue
Block a user