mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-01-05 08:29:15 +00:00
Compare commits
1 Commits
mingholy/f
...
v0.1.1-pre
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9cbd8bb4e5 |
@@ -309,8 +309,7 @@ If you are experiencing performance issues with file searching (e.g., with `@` c
|
||||
```
|
||||
|
||||
- **`tavilyApiKey`** (string):
|
||||
- **Description:** API key for Tavily web search service. Used to enable the `web_search` tool functionality.
|
||||
- **Note:** This is a legacy configuration format. For Qwen OAuth users, DashScope provider is automatically available without any configuration. For other authentication types, configure Tavily or Google providers using the new `webSearch` configuration format.
|
||||
- **Description:** API key for Tavily web search service. Required to enable the `web_search` tool functionality. If not configured, the web search tool will be disabled and skipped.
|
||||
- **Default:** `undefined` (web search disabled)
|
||||
- **Example:** `"tavilyApiKey": "tvly-your-api-key-here"`
|
||||
- **`chatCompression`** (object):
|
||||
@@ -466,8 +465,8 @@ The CLI automatically loads environment variables from an `.env` file. The loadi
|
||||
- This is useful for development and testing.
|
||||
- **`TAVILY_API_KEY`**:
|
||||
- Your API key for the Tavily web search service.
|
||||
- Used to enable the `web_search` tool functionality.
|
||||
- **Note:** For Qwen OAuth users, DashScope provider is automatically available without any configuration. For other authentication types, configure Tavily or Google providers to enable web search.
|
||||
- Required to enable the `web_search` tool functionality.
|
||||
- If not configured, the web search tool will be disabled and skipped.
|
||||
- Example: `export TAVILY_API_KEY="tvly-your-api-key-here"`
|
||||
|
||||
## Command-Line Arguments
|
||||
@@ -541,9 +540,6 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
- Displays the version of the CLI.
|
||||
- **`--openai-logging`**:
|
||||
- Enables logging of OpenAI API calls for debugging and analysis. This flag overrides the `enableOpenAILogging` setting in `settings.json`.
|
||||
- **`--openai-logging-dir <directory>`**:
|
||||
- Sets a custom directory path for OpenAI API logs. This flag overrides the `openAILoggingDir` setting in `settings.json`. Supports absolute paths, relative paths, and `~` expansion.
|
||||
- **Example:** `qwen --openai-logging-dir "~/qwen-logs" --openai-logging`
|
||||
- **`--tavily-api-key <api_key>`**:
|
||||
- Sets the Tavily API key for web search functionality for this session.
|
||||
- Example: `qwen --tavily-api-key tvly-your-api-key-here`
|
||||
|
||||
@@ -171,18 +171,6 @@ Settings are organized into categories. All settings should be placed within the
|
||||
- **Description:** Disables loop detection checks. Loop detection prevents infinite loops in AI responses but can generate false positives that interrupt legitimate workflows. Enable this option if you experience frequent false positive loop detection interruptions.
|
||||
- **Default:** `false`
|
||||
|
||||
- **`model.enableOpenAILogging`** (boolean):
|
||||
- **Description:** Enables logging of OpenAI API calls for debugging and analysis. When enabled, API requests and responses are logged to JSON files.
|
||||
- **Default:** `false`
|
||||
|
||||
- **`model.openAILoggingDir`** (string):
|
||||
- **Description:** Custom directory path for OpenAI API logs. If not specified, defaults to `logs/openai` in the current working directory. Supports absolute paths, relative paths (resolved from current working directory), and `~` expansion (home directory).
|
||||
- **Default:** `undefined`
|
||||
- **Examples:**
|
||||
- `"~/qwen-logs"` - Logs to `~/qwen-logs` directory
|
||||
- `"./custom-logs"` - Logs to `./custom-logs` relative to current directory
|
||||
- `"/tmp/openai-logs"` - Logs to absolute path `/tmp/openai-logs`
|
||||
|
||||
#### `context`
|
||||
|
||||
- **`context.fileName`** (string or array of strings):
|
||||
@@ -258,14 +246,6 @@ Settings are organized into categories. All settings should be placed within the
|
||||
- It must return function output as JSON on `stdout`, analogous to [`functionResponse.response.content`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functionresponse).
|
||||
- **Default:** `undefined`
|
||||
|
||||
- **`tools.useRipgrep`** (boolean):
|
||||
- **Description:** Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.
|
||||
- **Default:** `true`
|
||||
|
||||
- **`tools.useBuiltinRipgrep`** (boolean):
|
||||
- **Description:** Use the bundled ripgrep binary. When set to `false`, the system-level `rg` command will be used instead. This setting is only effective when `tools.useRipgrep` is `true`.
|
||||
- **Default:** `true`
|
||||
|
||||
#### `mcp`
|
||||
|
||||
- **`mcp.serverCommand`** (string):
|
||||
@@ -317,8 +297,7 @@ Settings are organized into categories. All settings should be placed within the
|
||||
- **Default:** `undefined`
|
||||
|
||||
- **`advanced.tavilyApiKey`** (string):
|
||||
- **Description:** API key for Tavily web search service. Used to enable the `web_search` tool functionality.
|
||||
- **Note:** This is a legacy configuration format. For Qwen OAuth users, DashScope provider is automatically available without any configuration. For other authentication types, configure Tavily or Google providers using the new `webSearch` configuration format.
|
||||
- **Description:** API key for Tavily web search service. Required to enable the `web_search` tool functionality. If not configured, the web search tool will be disabled and skipped.
|
||||
- **Default:** `undefined`
|
||||
|
||||
#### `mcpServers`
|
||||
@@ -399,8 +378,6 @@ Here is an example of a `settings.json` file with the nested structure, new as o
|
||||
"model": {
|
||||
"name": "qwen3-coder-plus",
|
||||
"maxSessionTurns": 10,
|
||||
"enableOpenAILogging": false,
|
||||
"openAILoggingDir": "~/qwen-logs",
|
||||
"summarizeToolOutput": {
|
||||
"run_shell_command": {
|
||||
"tokenBudget": 100
|
||||
@@ -489,8 +466,8 @@ The CLI automatically loads environment variables from an `.env` file. The loadi
|
||||
- Set to a string to customize the title of the CLI.
|
||||
- **`TAVILY_API_KEY`**:
|
||||
- Your API key for the Tavily web search service.
|
||||
- Used to enable the `web_search` tool functionality.
|
||||
- **Note:** For Qwen OAuth users, DashScope provider is automatically available without any configuration. For other authentication types, configure Tavily or Google providers to enable web search.
|
||||
- Required to enable the `web_search` tool functionality.
|
||||
- If not configured, the web search tool will be disabled and skipped.
|
||||
- Example: `export TAVILY_API_KEY="tvly-your-api-key-here"`
|
||||
|
||||
## Command-Line Arguments
|
||||
@@ -571,9 +548,6 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
- Displays the version of the CLI.
|
||||
- **`--openai-logging`**:
|
||||
- Enables logging of OpenAI API calls for debugging and analysis. This flag overrides the `enableOpenAILogging` setting in `settings.json`.
|
||||
- **`--openai-logging-dir <directory>`**:
|
||||
- Sets a custom directory path for OpenAI API logs. This flag overrides the `openAILoggingDir` setting in `settings.json`. Supports absolute paths, relative paths, and `~` expansion.
|
||||
- **Example:** `qwen --openai-logging-dir "~/qwen-logs" --openai-logging`
|
||||
- **`--tavily-api-key <api_key>`**:
|
||||
- Sets the Tavily API key for web search functionality for this session.
|
||||
- Example: `qwen --tavily-api-key tvly-your-api-key-here`
|
||||
|
||||
@@ -107,7 +107,7 @@ The `qwen-extension.json` file contains the configuration for the extension. The
|
||||
- `mcpServers`: A map of MCP servers to configure. The key is the name of the server, and the value is the server configuration. These servers will be loaded on startup just like MCP servers configured in a [`settings.json` file](./cli/configuration.md). If both an extension and a `settings.json` file configure an MCP server with the same name, the server defined in the `settings.json` file takes precedence.
|
||||
- Note that all MCP server configuration options are supported except for `trust`.
|
||||
- `contextFileName`: The name of the file that contains the context for the extension. This will be used to load the context from the extension directory. If this property is not used but a `QWEN.md` file is present in your extension directory, then that file will be loaded.
|
||||
- `excludeTools`: An array of tool names to exclude from the model. You can also specify command-specific restrictions for tools that support it, like the `run_shell_command` tool. For example, `"excludeTools": ["run_shell_command(rm -rf)"]` will block the `rm -rf` command. Note that this differs from the MCP server `excludeTools` functionality, which can be listed in the MCP server config. **Important:** Tools specified in `excludeTools` will be disabled for the entire conversation context and will affect all subsequent queries in the current session.
|
||||
- `excludeTools`: An array of tool names to exclude from the model. You can also specify command-specific restrictions for tools that support it, like the `run_shell_command` tool. For example, `"excludeTools": ["run_shell_command(rm -rf)"]` will block the `rm -rf` command. Note that this differs from the MCP server `excludeTools` functionality, which can be listed in the MCP server config.
|
||||
|
||||
When Qwen Code starts, it loads all the extensions and merges their configurations. If there are any conflicts, the workspace configuration takes precedence.
|
||||
|
||||
|
||||
@@ -1,186 +1,43 @@
|
||||
# Web Search Tool (`web_search`)
|
||||
|
||||
This document describes the `web_search` tool for performing web searches using multiple providers.
|
||||
This document describes the `web_search` tool.
|
||||
|
||||
## Description
|
||||
|
||||
Use `web_search` to perform a web search and get information from the internet. The tool supports multiple search providers and returns a concise answer with source citations when available.
|
||||
|
||||
### Supported Providers
|
||||
|
||||
1. **DashScope** (Official, Free) - Automatically available for Qwen OAuth users (200 requests/minute, 2000 requests/day)
|
||||
2. **Tavily** - High-quality search API with built-in answer generation
|
||||
3. **Google Custom Search** - Google's Custom Search JSON API
|
||||
Use `web_search` to perform a web search using the Tavily API. The tool returns a concise answer with sources when possible.
|
||||
|
||||
### Arguments
|
||||
|
||||
`web_search` takes two arguments:
|
||||
`web_search` takes one argument:
|
||||
|
||||
- `query` (string, required): The search query
|
||||
- `provider` (string, optional): Specific provider to use ("dashscope", "tavily", "google")
|
||||
- If not specified, uses the default provider from configuration
|
||||
- `query` (string, required): The search query.
|
||||
|
||||
## Configuration
|
||||
## How to use `web_search`
|
||||
|
||||
### Method 1: Settings File (Recommended)
|
||||
`web_search` calls the Tavily API directly. You must configure the `TAVILY_API_KEY` through one of the following methods:
|
||||
|
||||
Add to your `settings.json`:
|
||||
1. **Settings file**: Add `"tavilyApiKey": "your-key-here"` to your `settings.json`
|
||||
2. **Environment variable**: Set `TAVILY_API_KEY` in your environment or `.env` file
|
||||
3. **Command line**: Use `--tavily-api-key your-key-here` when running the CLI
|
||||
|
||||
```json
|
||||
{
|
||||
"webSearch": {
|
||||
"provider": [
|
||||
{ "type": "dashscope" },
|
||||
{ "type": "tavily", "apiKey": "tvly-xxxxx" },
|
||||
{
|
||||
"type": "google",
|
||||
"apiKey": "your-google-api-key",
|
||||
"searchEngineId": "your-search-engine-id"
|
||||
}
|
||||
],
|
||||
"default": "dashscope"
|
||||
}
|
||||
}
|
||||
```
|
||||
If the key is not configured, the tool will be disabled and skipped.
|
||||
|
||||
**Notes:**
|
||||
|
||||
- DashScope doesn't require an API key (official, free service)
|
||||
- **Qwen OAuth users:** DashScope is automatically added to your provider list, even if not explicitly configured
|
||||
- Configure additional providers (Tavily, Google) if you want to use them alongside DashScope
|
||||
- Set `default` to specify which provider to use by default (if not set, priority order: Tavily > Google > DashScope)
|
||||
|
||||
### Method 2: Environment Variables
|
||||
|
||||
Set environment variables in your shell or `.env` file:
|
||||
|
||||
```bash
|
||||
# Tavily
|
||||
export TAVILY_API_KEY="tvly-xxxxx"
|
||||
|
||||
# Google
|
||||
export GOOGLE_API_KEY="your-api-key"
|
||||
export GOOGLE_SEARCH_ENGINE_ID="your-engine-id"
|
||||
```
|
||||
|
||||
### Method 3: Command Line Arguments
|
||||
|
||||
Pass API keys when running Qwen Code:
|
||||
|
||||
```bash
|
||||
# Tavily
|
||||
qwen --tavily-api-key tvly-xxxxx
|
||||
|
||||
# Google
|
||||
qwen --google-api-key your-key --google-search-engine-id your-id
|
||||
|
||||
# Specify default provider
|
||||
qwen --web-search-default tavily
|
||||
```
|
||||
|
||||
### Backward Compatibility (Deprecated)
|
||||
|
||||
⚠️ **DEPRECATED:** The legacy `tavilyApiKey` configuration is still supported for backward compatibility but is deprecated:
|
||||
|
||||
```json
|
||||
{
|
||||
"advanced": {
|
||||
"tavilyApiKey": "tvly-xxxxx" // ⚠️ Deprecated
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Important:** This configuration is deprecated and will be removed in a future version. Please migrate to the new `webSearch` configuration format shown above. The old configuration will automatically configure Tavily as a provider, but we strongly recommend updating your configuration.
|
||||
|
||||
## Disabling Web Search
|
||||
|
||||
If you want to disable the web search functionality, you can exclude the `web_search` tool in your `settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"tools": {
|
||||
"exclude": ["web_search"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** This setting requires a restart of Qwen Code to take effect. Once disabled, the `web_search` tool will not be available to the model, even if web search providers are configured.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic search (using default provider)
|
||||
Usage:
|
||||
|
||||
```
|
||||
web_search(query="latest advancements in AI")
|
||||
web_search(query="Your query goes here.")
|
||||
```
|
||||
|
||||
### Search with specific provider
|
||||
## `web_search` examples
|
||||
|
||||
Get information on a topic:
|
||||
|
||||
```
|
||||
web_search(query="latest advancements in AI", provider="tavily")
|
||||
web_search(query="latest advancements in AI-powered code generation")
|
||||
```
|
||||
|
||||
### Real-world examples
|
||||
## Important notes
|
||||
|
||||
```
|
||||
web_search(query="weather in San Francisco today")
|
||||
web_search(query="latest Node.js LTS version", provider="google")
|
||||
web_search(query="best practices for React 19", provider="dashscope")
|
||||
```
|
||||
|
||||
## Provider Details
|
||||
|
||||
### DashScope (Official)
|
||||
|
||||
- **Cost:** Free
|
||||
- **Authentication:** Automatically available when using Qwen OAuth authentication
|
||||
- **Configuration:** No API key required, automatically added to provider list for Qwen OAuth users
|
||||
- **Quota:** 200 requests/minute, 2000 requests/day
|
||||
- **Best for:** General queries, always available as fallback for Qwen OAuth users
|
||||
- **Auto-registration:** If you're using Qwen OAuth, DashScope is automatically added to your provider list even if you don't configure it explicitly
|
||||
|
||||
### Tavily
|
||||
|
||||
- **Cost:** Requires API key (paid service with free tier)
|
||||
- **Sign up:** https://tavily.com
|
||||
- **Features:** High-quality results with AI-generated answers
|
||||
- **Best for:** Research, comprehensive answers with citations
|
||||
|
||||
### Google Custom Search
|
||||
|
||||
- **Cost:** Free tier available (100 queries/day)
|
||||
- **Setup:**
|
||||
1. Enable Custom Search API in Google Cloud Console
|
||||
2. Create a Custom Search Engine at https://programmablesearchengine.google.com
|
||||
- **Features:** Google's search quality
|
||||
- **Best for:** Specific, factual queries
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Response format:** Returns a concise answer with numbered source citations
|
||||
- **Citations:** Source links are appended as a numbered list: [1], [2], etc.
|
||||
- **Multiple providers:** If one provider fails, manually specify another using the `provider` parameter
|
||||
- **DashScope availability:** Automatically available for Qwen OAuth users, no configuration needed
|
||||
- **Default provider selection:** The system automatically selects a default provider based on availability:
|
||||
1. Your explicit `default` configuration (highest priority)
|
||||
2. CLI argument `--web-search-default`
|
||||
3. First available provider by priority: Tavily > Google > DashScope
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Tool not available?**
|
||||
|
||||
- **For Qwen OAuth users:** The tool is automatically registered with DashScope provider, no configuration needed
|
||||
- **For other authentication types:** Ensure at least one provider (Tavily or Google) is configured
|
||||
- For Tavily/Google: Verify your API keys are correct
|
||||
|
||||
**Provider-specific errors?**
|
||||
|
||||
- Use the `provider` parameter to try a different search provider
|
||||
- Check your API quotas and rate limits
|
||||
- Verify API keys are properly set in configuration
|
||||
|
||||
**Need help?**
|
||||
|
||||
- Check your configuration: Run `qwen` and use the settings dialog
|
||||
- View your current settings in `~/.qwen-code/settings.json` (macOS/Linux) or `%USERPROFILE%\.qwen-code\settings.json` (Windows)
|
||||
- **Response returned:** The `web_search` tool returns a concise answer when available, with a list of source links.
|
||||
- **Citations:** Source links are appended as a numbered list.
|
||||
- **API key:** Configure `TAVILY_API_KEY` via settings.json, environment variables, .env files, or command line arguments. If not configured, the tool is not registered.
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { createRequire } from 'node:module';
|
||||
import { writeFileSync, rmSync } from 'node:fs';
|
||||
import { writeFileSync } from 'node:fs';
|
||||
|
||||
let esbuild;
|
||||
try {
|
||||
@@ -22,9 +22,6 @@ const __dirname = path.dirname(__filename);
|
||||
const require = createRequire(import.meta.url);
|
||||
const pkg = require(path.resolve(__dirname, 'package.json'));
|
||||
|
||||
// Clean dist directory (cross-platform)
|
||||
rmSync(path.resolve(__dirname, 'dist'), { recursive: true, force: true });
|
||||
|
||||
const external = [
|
||||
'@lydell/node-pty',
|
||||
'node-pty',
|
||||
|
||||
@@ -36,10 +36,10 @@ describe('JSON output', () => {
|
||||
});
|
||||
|
||||
it('should return a JSON error for enforced auth mismatch before running', async () => {
|
||||
process.env['OPENAI_API_KEY'] = 'test-key';
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
await rig.setup('json-output-auth-mismatch', {
|
||||
settings: {
|
||||
security: { auth: { enforcedType: 'qwen-oauth' } },
|
||||
security: { auth: { enforcedType: 'gemini-api-key' } },
|
||||
},
|
||||
});
|
||||
|
||||
@@ -50,7 +50,7 @@ describe('JSON output', () => {
|
||||
} catch (e) {
|
||||
thrown = e as Error;
|
||||
} finally {
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
delete process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
}
|
||||
|
||||
expect(thrown).toBeDefined();
|
||||
@@ -80,8 +80,10 @@ describe('JSON output', () => {
|
||||
expect(payload.error.type).toBe('Error');
|
||||
expect(payload.error.code).toBe(1);
|
||||
expect(payload.error.message).toContain(
|
||||
'configured auth type is qwen-oauth',
|
||||
'configured auth type is gemini-api-key',
|
||||
);
|
||||
expect(payload.error.message).toContain(
|
||||
'current auth type is oauth-personal',
|
||||
);
|
||||
expect(payload.error.message).toContain('current auth type is openai');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -9,6 +9,7 @@ import { mkdirSync, writeFileSync, readFileSync } from 'node:fs';
|
||||
import { join, dirname } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { env } from 'node:process';
|
||||
import { DEFAULT_QWEN_MODEL } from '../packages/core/src/config/models.js';
|
||||
import fs from 'node:fs';
|
||||
import { EOL } from 'node:os';
|
||||
import * as pty from '@lydell/node-pty';
|
||||
@@ -181,6 +182,7 @@ export class TestRig {
|
||||
otlpEndpoint: '',
|
||||
outfile: telemetryPath,
|
||||
},
|
||||
model: DEFAULT_QWEN_MODEL,
|
||||
sandbox: env.GEMINI_SANDBOX !== 'false' ? env.GEMINI_SANDBOX : false,
|
||||
...options.settings, // Allow tests to override/add settings
|
||||
};
|
||||
|
||||
@@ -9,53 +9,14 @@ import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
|
||||
|
||||
describe('web_search', () => {
|
||||
it('should be able to search the web', async () => {
|
||||
// Check if any web search provider is available
|
||||
const hasTavilyKey = !!process.env['TAVILY_API_KEY'];
|
||||
const hasGoogleKey =
|
||||
!!process.env['GOOGLE_API_KEY'] &&
|
||||
!!process.env['GOOGLE_SEARCH_ENGINE_ID'];
|
||||
|
||||
// Skip if no provider is configured
|
||||
// Note: DashScope provider is automatically available for Qwen OAuth users,
|
||||
// but we can't easily detect that in tests without actual OAuth credentials
|
||||
if (!hasTavilyKey && !hasGoogleKey) {
|
||||
console.warn(
|
||||
'Skipping web search test: No web search provider configured. ' +
|
||||
'Set TAVILY_API_KEY or GOOGLE_API_KEY+GOOGLE_SEARCH_ENGINE_ID environment variables.',
|
||||
);
|
||||
// Skip if Tavily key is not configured
|
||||
if (!process.env['TAVILY_API_KEY']) {
|
||||
console.warn('Skipping web search test: TAVILY_API_KEY not set');
|
||||
return;
|
||||
}
|
||||
|
||||
const rig = new TestRig();
|
||||
// Configure web search in settings if provider keys are available
|
||||
const webSearchSettings: Record<string, unknown> = {};
|
||||
const providers: Array<{
|
||||
type: string;
|
||||
apiKey?: string;
|
||||
searchEngineId?: string;
|
||||
}> = [];
|
||||
|
||||
if (hasTavilyKey) {
|
||||
providers.push({ type: 'tavily', apiKey: process.env['TAVILY_API_KEY'] });
|
||||
}
|
||||
if (hasGoogleKey) {
|
||||
providers.push({
|
||||
type: 'google',
|
||||
apiKey: process.env['GOOGLE_API_KEY'],
|
||||
searchEngineId: process.env['GOOGLE_SEARCH_ENGINE_ID'],
|
||||
});
|
||||
}
|
||||
|
||||
if (providers.length > 0) {
|
||||
webSearchSettings.webSearch = {
|
||||
provider: providers,
|
||||
default: providers[0]?.type,
|
||||
};
|
||||
}
|
||||
|
||||
await rig.setup('should be able to search the web', {
|
||||
settings: webSearchSettings,
|
||||
});
|
||||
await rig.setup('should be able to search the web');
|
||||
|
||||
let result;
|
||||
try {
|
||||
|
||||
55
package-lock.json
generated
55
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -555,7 +555,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
@@ -579,7 +578,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
@@ -2120,7 +2118,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
|
||||
"integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=8.0.0"
|
||||
}
|
||||
@@ -3282,7 +3279,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz",
|
||||
"integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.10.4",
|
||||
"@babel/runtime": "^7.12.5",
|
||||
@@ -3721,7 +3717,6 @@
|
||||
"integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"csstype": "^3.0.2"
|
||||
}
|
||||
@@ -3732,7 +3727,6 @@
|
||||
"integrity": "sha512-4hOiT/dwO8Ko0gV1m/TJZYk3y0KBnY9vzDh7W+DH17b2HFSOGgdj33dhihPeuy3l0q23+4e+hoXHV6hCC4dCXw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"peerDependencies": {
|
||||
"@types/react": "^19.0.0"
|
||||
}
|
||||
@@ -3938,7 +3932,6 @@
|
||||
"integrity": "sha512-6sMvZePQrnZH2/cJkwRpkT7DxoAWh+g6+GFRK6bV3YQo7ogi3SX5rgF6099r5Q53Ma5qeT7LGmOmuIutF4t3lA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/scope-manager": "8.35.0",
|
||||
"@typescript-eslint/types": "8.35.0",
|
||||
@@ -4707,7 +4700,6 @@
|
||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
|
||||
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"acorn": "bin/acorn"
|
||||
},
|
||||
@@ -5062,7 +5054,8 @@
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
|
||||
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/array-includes": {
|
||||
"version": "3.1.9",
|
||||
@@ -6227,6 +6220,7 @@
|
||||
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
|
||||
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"safe-buffer": "5.2.1"
|
||||
},
|
||||
@@ -7260,7 +7254,6 @@
|
||||
"integrity": "sha512-GsGizj2Y1rCWDu6XoEekL3RLilp0voSePurjZIkxL3wlm5o5EC9VpgaP7lrCvjnkuLvzFBQWB3vWB3K5KQTveQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@eslint-community/eslint-utils": "^4.2.0",
|
||||
"@eslint-community/regexpp": "^4.12.1",
|
||||
@@ -7730,6 +7723,7 @@
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
|
||||
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"accepts": "~1.3.8",
|
||||
"array-flatten": "1.1.1",
|
||||
@@ -7791,6 +7785,7 @@
|
||||
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
|
||||
"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
@@ -7800,6 +7795,7 @@
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
@@ -7809,6 +7805,7 @@
|
||||
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
|
||||
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
@@ -7975,6 +7972,7 @@
|
||||
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz",
|
||||
"integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"debug": "2.6.9",
|
||||
"encodeurl": "~2.0.0",
|
||||
@@ -7993,6 +7991,7 @@
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
@@ -8001,13 +8000,15 @@
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/finalhandler/node_modules/statuses": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
|
||||
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
@@ -9046,7 +9047,6 @@
|
||||
"resolved": "https://registry.npmjs.org/ink/-/ink-6.2.3.tgz",
|
||||
"integrity": "sha512-fQkfEJjKbLXIcVWEE3MvpYSnwtbbmRsmeNDNz1pIuOFlwE+UF2gsy228J36OXKZGWJWZJKUigphBSqCNMcARtg==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@alcalzone/ansi-tokenize": "^0.2.0",
|
||||
"ansi-escapes": "^7.0.0",
|
||||
@@ -10950,6 +10950,7 @@
|
||||
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
|
||||
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
@@ -12157,7 +12158,8 @@
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
|
||||
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/path-type": {
|
||||
"version": "3.0.0",
|
||||
@@ -12661,7 +12663,6 @@
|
||||
"resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz",
|
||||
"integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
@@ -12672,7 +12673,6 @@
|
||||
"integrity": "sha512-cq/o30z9W2Wb4rzBefjv5fBalHU0rJGZCHAkf/RHSBWSSYwh8PlQTqqOJmgIIbBtpj27T6FIPXeomIjZtCNVqA==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"shell-quote": "^1.6.1",
|
||||
"ws": "^7"
|
||||
@@ -12706,7 +12706,6 @@
|
||||
"integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"scheduler": "^0.26.0"
|
||||
},
|
||||
@@ -14516,7 +14515,6 @@
|
||||
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
@@ -14690,8 +14688,7 @@
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
|
||||
"integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
|
||||
"dev": true,
|
||||
"license": "0BSD",
|
||||
"peer": true
|
||||
"license": "0BSD"
|
||||
},
|
||||
"node_modules/tsx": {
|
||||
"version": "4.20.3",
|
||||
@@ -14699,7 +14696,6 @@
|
||||
"integrity": "sha512-qjbnuR9Tr+FJOMBqJCW5ehvIo/buZq7vH7qD7JziU98h6l3qGy0a/yPFjwO+y0/T7GFpNgNAvEcPPVfyT8rrPQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"esbuild": "~0.25.0",
|
||||
"get-tsconfig": "^4.7.5"
|
||||
@@ -14884,7 +14880,6 @@
|
||||
"integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
@@ -15154,6 +15149,7 @@
|
||||
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
||||
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 0.4.0"
|
||||
}
|
||||
@@ -15209,7 +15205,6 @@
|
||||
"integrity": "sha512-ixXJB1YRgDIw2OszKQS9WxGHKwLdCsbQNkpJN171udl6szi/rIySHL6/Os3s2+oE4P/FLD4dxg4mD7Wust+u5g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"esbuild": "^0.25.0",
|
||||
"fdir": "^6.4.6",
|
||||
@@ -15323,7 +15318,6 @@
|
||||
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
@@ -15337,7 +15331,6 @@
|
||||
"integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/chai": "^5.2.2",
|
||||
"@vitest/expect": "3.2.4",
|
||||
@@ -16016,7 +16009,6 @@
|
||||
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
|
||||
"integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/colinhacks"
|
||||
}
|
||||
@@ -16032,7 +16024,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -16147,7 +16139,7 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"hasInstallScript": true,
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
@@ -16277,7 +16269,6 @@
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
@@ -16287,7 +16278,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
@@ -16299,7 +16290,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.5"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.1-preview.0"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "cross-env node scripts/start.js",
|
||||
@@ -28,7 +28,7 @@
|
||||
"build:all": "npm run build && npm run build:sandbox && npm run build:vscode",
|
||||
"build:packages": "npm run build --workspaces",
|
||||
"build:sandbox": "node scripts/build_sandbox.js",
|
||||
"bundle": "npm run generate && node esbuild.config.js && node scripts/copy_bundle_assets.js",
|
||||
"bundle": "rm -rf dist && npm run generate && node esbuild.config.js && node scripts/copy_bundle_assets.js",
|
||||
"test": "npm run test --workspaces --if-present --parallel",
|
||||
"test:ci": "npm run test:ci --workspaces --if-present --parallel && npm run test:scripts",
|
||||
"test:scripts": "vitest run --config ./scripts/tests/vitest.config.ts",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,7 +25,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.5"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.1-preview.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
|
||||
@@ -18,26 +18,60 @@ vi.mock('./settings.js', () => ({
|
||||
describe('validateAuthMethod', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
vi.stubEnv('GEMINI_API_KEY', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', undefined);
|
||||
vi.stubEnv('GOOGLE_API_KEY', undefined);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should return null for USE_OPENAI', () => {
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBeNull();
|
||||
it('should return null for LOGIN_WITH_GOOGLE', () => {
|
||||
expect(validateAuthMethod(AuthType.LOGIN_WITH_GOOGLE)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message for USE_OPENAI if OPENAI_API_KEY is not set', () => {
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBe(
|
||||
'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.',
|
||||
);
|
||||
it('should return null for CLOUD_SHELL', () => {
|
||||
expect(validateAuthMethod(AuthType.CLOUD_SHELL)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for QWEN_OAUTH', () => {
|
||||
expect(validateAuthMethod(AuthType.QWEN_OAUTH)).toBeNull();
|
||||
describe('USE_GEMINI', () => {
|
||||
it('should return null if GEMINI_API_KEY is set', () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', 'test-key');
|
||||
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message if GEMINI_API_KEY is not set', () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', undefined);
|
||||
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBe(
|
||||
'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('USE_VERTEX_AI', () => {
|
||||
it('should return null if GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION are set', () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', 'test-location');
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if GOOGLE_API_KEY is set', () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', 'test-api-key');
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message if no required environment variables are set', () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', undefined);
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBe(
|
||||
'When using Vertex AI, you must specify either:\n' +
|
||||
'• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' +
|
||||
'• GOOGLE_API_KEY environment variable (if using express mode).\n' +
|
||||
'Update your environment and try again (no reload needed if using .env)!',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should return an error message for an invalid auth method', () => {
|
||||
|
||||
@@ -8,13 +8,39 @@ import { AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { loadEnvironment, loadSettings } from './settings.js';
|
||||
|
||||
export function validateAuthMethod(authMethod: string): string | null {
|
||||
const settings = loadSettings();
|
||||
loadEnvironment(settings.merged);
|
||||
loadEnvironment(loadSettings().merged);
|
||||
if (
|
||||
authMethod === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authMethod === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
if (!process.env['GEMINI_API_KEY']) {
|
||||
return 'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_VERTEX_AI) {
|
||||
const hasVertexProjectLocationConfig =
|
||||
!!process.env['GOOGLE_CLOUD_PROJECT'] &&
|
||||
!!process.env['GOOGLE_CLOUD_LOCATION'];
|
||||
const hasGoogleApiKey = !!process.env['GOOGLE_API_KEY'];
|
||||
if (!hasVertexProjectLocationConfig && !hasGoogleApiKey) {
|
||||
return (
|
||||
'When using Vertex AI, you must specify either:\n' +
|
||||
'• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' +
|
||||
'• GOOGLE_API_KEY environment variable (if using express mode).\n' +
|
||||
'Update your environment and try again (no reload needed if using .env)!'
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_OPENAI) {
|
||||
const hasApiKey =
|
||||
process.env['OPENAI_API_KEY'] || settings.merged.security?.auth?.apiKey;
|
||||
if (!hasApiKey) {
|
||||
if (!process.env['OPENAI_API_KEY']) {
|
||||
return 'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
|
||||
}
|
||||
return null;
|
||||
@@ -28,3 +54,15 @@ export function validateAuthMethod(authMethod: string): string | null {
|
||||
|
||||
return 'Invalid auth method selected.';
|
||||
}
|
||||
|
||||
export const setOpenAIApiKey = (apiKey: string): void => {
|
||||
process.env['OPENAI_API_KEY'] = apiKey;
|
||||
};
|
||||
|
||||
export const setOpenAIBaseUrl = (baseUrl: string): void => {
|
||||
process.env['OPENAI_BASE_URL'] = baseUrl;
|
||||
};
|
||||
|
||||
export const setOpenAIModel = (model: string): void => {
|
||||
process.env['OPENAI_MODEL'] = model;
|
||||
};
|
||||
|
||||
@@ -2399,73 +2399,6 @@ describe('loadCliConfig useRipgrep', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadCliConfig useBuiltinRipgrep', () => {
|
||||
const originalArgv = process.argv;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
vi.mocked(os.homedir).mockReturnValue('/mock/home/user');
|
||||
vi.stubEnv('GEMINI_API_KEY', 'test-api-key');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.argv = originalArgv;
|
||||
vi.unstubAllEnvs();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should be true by default when useBuiltinRipgrep is not set in settings', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const settings: Settings = {};
|
||||
const config = await loadCliConfig(
|
||||
settings,
|
||||
[],
|
||||
new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
argv.extensions,
|
||||
),
|
||||
'test-session',
|
||||
argv,
|
||||
);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
|
||||
it('should be false when useBuiltinRipgrep is set to false in settings', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const settings: Settings = { tools: { useBuiltinRipgrep: false } };
|
||||
const config = await loadCliConfig(
|
||||
settings,
|
||||
[],
|
||||
new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
argv.extensions,
|
||||
),
|
||||
'test-session',
|
||||
argv,
|
||||
);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(false);
|
||||
});
|
||||
|
||||
it('should be true when useBuiltinRipgrep is explicitly set to true in settings', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const settings: Settings = { tools: { useBuiltinRipgrep: true } };
|
||||
const config = await loadCliConfig(
|
||||
settings,
|
||||
[],
|
||||
new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
argv.extensions,
|
||||
),
|
||||
'test-session',
|
||||
argv,
|
||||
);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('screenReader configuration', () => {
|
||||
const originalArgv = process.argv;
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import { extensionsCommand } from '../commands/extensions.js';
|
||||
import {
|
||||
ApprovalMode,
|
||||
Config,
|
||||
DEFAULT_QWEN_MODEL,
|
||||
DEFAULT_QWEN_EMBEDDING_MODEL,
|
||||
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
|
||||
EditTool,
|
||||
@@ -42,7 +43,6 @@ import { mcpCommand } from '../commands/mcp.js';
|
||||
|
||||
import { isWorkspaceTrusted } from './trustedFolders.js';
|
||||
import type { ExtensionEnablementManager } from './extensions/extensionEnablement.js';
|
||||
import { buildWebSearchConfig } from './webSearch.js';
|
||||
|
||||
// Simple console logger for now - replace with actual logger if available
|
||||
const logger = {
|
||||
@@ -114,13 +114,9 @@ export interface CliArgs {
|
||||
openaiLogging: boolean | undefined;
|
||||
openaiApiKey: string | undefined;
|
||||
openaiBaseUrl: string | undefined;
|
||||
openaiLoggingDir: string | undefined;
|
||||
proxy: string | undefined;
|
||||
includeDirectories: string[] | undefined;
|
||||
tavilyApiKey: string | undefined;
|
||||
googleApiKey: string | undefined;
|
||||
googleSearchEngineId: string | undefined;
|
||||
webSearchDefault: string | undefined;
|
||||
screenReader: boolean | undefined;
|
||||
vlmSwitchMode: string | undefined;
|
||||
useSmartEdit: boolean | undefined;
|
||||
@@ -198,13 +194,14 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
})
|
||||
.option('proxy', {
|
||||
type: 'string',
|
||||
description: 'Proxy for Qwen Code, like schema://user:password@host:port',
|
||||
description:
|
||||
'Proxy for gemini client, like schema://user:password@host:port',
|
||||
})
|
||||
.deprecateOption(
|
||||
'proxy',
|
||||
'Use the "proxy" setting in settings.json instead. This flag will be removed in a future version.',
|
||||
)
|
||||
.command('$0 [query..]', 'Launch Qwen Code CLI', (yargsInstance: Argv) =>
|
||||
.command('$0 [query..]', 'Launch Gemini CLI', (yargsInstance: Argv) =>
|
||||
yargsInstance
|
||||
.positional('query', {
|
||||
description:
|
||||
@@ -318,11 +315,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
description:
|
||||
'Enable logging of OpenAI API calls for debugging and analysis',
|
||||
})
|
||||
.option('openai-logging-dir', {
|
||||
type: 'string',
|
||||
description:
|
||||
'Custom directory path for OpenAI API logs. Overrides settings files.',
|
||||
})
|
||||
.option('openai-api-key', {
|
||||
type: 'string',
|
||||
description: 'OpenAI API key to use for authentication',
|
||||
@@ -333,20 +325,7 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
})
|
||||
.option('tavily-api-key', {
|
||||
type: 'string',
|
||||
description: 'Tavily API key for web search',
|
||||
})
|
||||
.option('google-api-key', {
|
||||
type: 'string',
|
||||
description: 'Google Custom Search API key',
|
||||
})
|
||||
.option('google-search-engine-id', {
|
||||
type: 'string',
|
||||
description: 'Google Custom Search Engine ID',
|
||||
})
|
||||
.option('web-search-default', {
|
||||
type: 'string',
|
||||
description:
|
||||
'Default web search provider (dashscope, tavily, google)',
|
||||
description: 'Tavily API key for web search functionality',
|
||||
})
|
||||
.option('screen-reader', {
|
||||
type: 'boolean',
|
||||
@@ -690,11 +669,13 @@ export async function loadCliConfig(
|
||||
);
|
||||
}
|
||||
|
||||
const resolvedModel =
|
||||
const defaultModel = DEFAULT_QWEN_MODEL;
|
||||
const resolvedModel: string =
|
||||
argv.model ||
|
||||
process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name;
|
||||
settings.model?.name ||
|
||||
defaultModel;
|
||||
|
||||
const sandboxConfig = await loadSandboxConfig(settings, argv);
|
||||
const screenReader =
|
||||
@@ -758,27 +739,18 @@ export async function loadCliConfig(
|
||||
generationConfig: {
|
||||
...(settings.model?.generationConfig || {}),
|
||||
model: resolvedModel,
|
||||
apiKey:
|
||||
argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey,
|
||||
baseUrl:
|
||||
argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl,
|
||||
apiKey: argv.openaiApiKey || process.env['OPENAI_API_KEY'],
|
||||
baseUrl: argv.openaiBaseUrl || process.env['OPENAI_BASE_URL'],
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.model?.enableOpenAILogging
|
||||
: argv.openaiLogging) ?? false,
|
||||
openAILoggingDir:
|
||||
argv.openaiLoggingDir || settings.model?.openAILoggingDir,
|
||||
},
|
||||
cliVersion: await getCliVersion(),
|
||||
webSearch: buildWebSearchConfig(
|
||||
argv,
|
||||
settings,
|
||||
settings.security?.auth?.selectedType,
|
||||
),
|
||||
tavilyApiKey:
|
||||
argv.tavilyApiKey ||
|
||||
settings.advanced?.tavilyApiKey ||
|
||||
process.env['TAVILY_API_KEY'],
|
||||
summarizeToolOutput: settings.model?.summarizeToolOutput,
|
||||
ideMode,
|
||||
chatCompression: settings.model?.chatCompression,
|
||||
@@ -786,7 +758,6 @@ export async function loadCliConfig(
|
||||
interactive,
|
||||
trustedFolder,
|
||||
useRipgrep: settings.tools?.useRipgrep,
|
||||
useBuiltinRipgrep: settings.tools?.useBuiltinRipgrep,
|
||||
shouldUseNodePtyShell: settings.tools?.shell?.enableInteractiveShell,
|
||||
skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck,
|
||||
enablePromptCompletion: settings.general?.enablePromptCompletion ?? false,
|
||||
|
||||
@@ -66,8 +66,6 @@ import {
|
||||
loadEnvironment,
|
||||
migrateDeprecatedSettings,
|
||||
SettingScope,
|
||||
SETTINGS_VERSION,
|
||||
SETTINGS_VERSION_KEY,
|
||||
} from './settings.js';
|
||||
import { FatalConfigError, QWEN_DIR } from '@qwen-code/qwen-code-core';
|
||||
|
||||
@@ -96,7 +94,6 @@ vi.mock('fs', async (importOriginal) => {
|
||||
existsSync: vi.fn(),
|
||||
readFileSync: vi.fn(),
|
||||
writeFileSync: vi.fn(),
|
||||
renameSync: vi.fn(),
|
||||
mkdirSync: vi.fn(),
|
||||
realpathSync: (p: string) => p,
|
||||
};
|
||||
@@ -174,15 +171,11 @@ describe('Settings Loading and Merging', () => {
|
||||
getSystemSettingsPath(),
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.user.settings).toEqual({});
|
||||
expect(settings.workspace.settings).toEqual({});
|
||||
expect(settings.merged).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -214,14 +207,10 @@ describe('Settings Loading and Merging', () => {
|
||||
expectedUserSettingsPath,
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual({});
|
||||
expect(settings.merged).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -252,13 +241,9 @@ describe('Settings Loading and Merging', () => {
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.user.settings).toEqual({});
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.merged).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -319,20 +304,10 @@ describe('Settings Loading and Merging', () => {
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.merged).toEqual({
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'system-theme',
|
||||
},
|
||||
@@ -386,7 +361,6 @@ describe('Settings Loading and Merging', () => {
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.merged).toEqual({
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'legacy-dark',
|
||||
},
|
||||
@@ -439,132 +413,6 @@ describe('Settings Loading and Merging', () => {
|
||||
expect((settings.merged as TestSettings)['allowedTools']).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should add version field to migrated settings file', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
const legacySettingsContent = {
|
||||
theme: 'dark',
|
||||
model: 'qwen-coder',
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(legacySettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that fs.writeFileSync was called with migrated settings including version
|
||||
expect(fs.writeFileSync).toHaveBeenCalled();
|
||||
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
|
||||
const writtenContent = JSON.parse(writeCall[1] as string);
|
||||
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
|
||||
});
|
||||
|
||||
it('should not re-migrate settings that have version field', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
const migratedSettingsContent = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'dark',
|
||||
},
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(migratedSettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that fs.renameSync and fs.writeFileSync were NOT called
|
||||
// (because no migration was needed)
|
||||
expect(fs.renameSync).not.toHaveBeenCalled();
|
||||
expect(fs.writeFileSync).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should add version field to V2 settings without version and write to disk', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
// V2 format but no version field
|
||||
const v2SettingsWithoutVersion = {
|
||||
ui: {
|
||||
theme: 'dark',
|
||||
},
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(v2SettingsWithoutVersion);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that fs.writeFileSync was called (to add version)
|
||||
// but NOT fs.renameSync (no backup needed, just adding version)
|
||||
expect(fs.renameSync).not.toHaveBeenCalled();
|
||||
expect(fs.writeFileSync).toHaveBeenCalledTimes(1);
|
||||
|
||||
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
|
||||
const writtenPath = writeCall[0];
|
||||
const writtenContent = JSON.parse(writeCall[1] as string);
|
||||
|
||||
expect(writtenPath).toBe(USER_SETTINGS_PATH);
|
||||
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
|
||||
expect(writtenContent.ui?.theme).toBe('dark');
|
||||
expect(writtenContent.model?.name).toBe('qwen-coder');
|
||||
});
|
||||
|
||||
it('should correctly handle partially migrated settings without version field', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
// Edge case: model already in V2 format (object), but autoAccept in V1 format
|
||||
const partiallyMigratedContent = {
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
autoAccept: false, // V1 key
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(partiallyMigratedContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that the migrated settings preserve the model object correctly
|
||||
expect(fs.writeFileSync).toHaveBeenCalled();
|
||||
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
|
||||
const writtenContent = JSON.parse(writeCall[1] as string);
|
||||
|
||||
// Model should remain as an object, not double-nested
|
||||
expect(writtenContent.model).toEqual({ name: 'qwen-coder' });
|
||||
// autoAccept should be migrated to tools.autoAccept
|
||||
expect(writtenContent.tools?.autoAccept).toBe(false);
|
||||
// Version field should be added
|
||||
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
|
||||
});
|
||||
|
||||
it('should correctly merge and migrate legacy array properties from multiple scopes', () => {
|
||||
(mockFsExistsSync as Mock).mockReturnValue(true);
|
||||
const legacyUserSettings = {
|
||||
@@ -667,24 +515,11 @@ describe('Settings Loading and Merging', () => {
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.systemDefaults.settings).toEqual({
|
||||
...systemDefaultsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.systemDefaults.settings).toEqual(systemDefaultsContent);
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.merged).toEqual({
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
context: {
|
||||
fileName: 'WORKSPACE_CONTEXT.md',
|
||||
includeDirectories: [
|
||||
@@ -1031,14 +866,8 @@ describe('Settings Loading and Merging', () => {
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.merged.mcpServers).toEqual({
|
||||
'user-server': {
|
||||
command: 'user-command',
|
||||
@@ -1867,13 +1696,9 @@ describe('Settings Loading and Merging', () => {
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.system.path).toBe(MOCK_ENV_SYSTEM_SETTINGS_PATH);
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.merged).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -2423,44 +2248,6 @@ describe('Settings Loading and Merging', () => {
|
||||
customWittyPhrases: ['test phrase'],
|
||||
});
|
||||
});
|
||||
|
||||
it('should remove version field when migrating to V1', () => {
|
||||
const v2Settings = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'dark',
|
||||
},
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
};
|
||||
const v1Settings = migrateSettingsToV1(v2Settings);
|
||||
|
||||
// Version field should not be present in V1 settings
|
||||
expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined();
|
||||
// Other fields should be properly migrated
|
||||
expect(v1Settings).toEqual({
|
||||
theme: 'dark',
|
||||
model: 'qwen-coder',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle version field in unrecognized properties', () => {
|
||||
const v2Settings = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
general: {
|
||||
vimMode: true,
|
||||
},
|
||||
someUnrecognizedKey: 'value',
|
||||
};
|
||||
const v1Settings = migrateSettingsToV1(v2Settings);
|
||||
|
||||
// Version field should be filtered out
|
||||
expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined();
|
||||
// Unrecognized keys should be preserved
|
||||
expect(v1Settings['someUnrecognizedKey']).toBe('value');
|
||||
expect(v1Settings['vimMode']).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadEnvironment', () => {
|
||||
@@ -2581,73 +2368,6 @@ describe('Settings Loading and Merging', () => {
|
||||
};
|
||||
expect(needsMigration(settings)).toBe(false);
|
||||
});
|
||||
|
||||
describe('with version field', () => {
|
||||
it('should return false when version field indicates current or newer version', () => {
|
||||
const settingsWithVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
theme: 'dark', // Even though this is a V1 key, version field takes precedence
|
||||
};
|
||||
expect(needsMigration(settingsWithVersion)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when version field indicates a newer version', () => {
|
||||
const settingsWithNewerVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION + 1,
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithNewerVersion)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true when version field indicates an older version', () => {
|
||||
const settingsWithOldVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION - 1,
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithOldVersion)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use fallback logic when version field is not a number', () => {
|
||||
const settingsWithInvalidVersion = {
|
||||
[SETTINGS_VERSION_KEY]: 'not-a-number',
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithInvalidVersion)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use fallback logic when version field is missing', () => {
|
||||
const settingsWithoutVersion = {
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithoutVersion)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge case: partially migrated settings', () => {
|
||||
it('should return true for partially migrated settings without version field', () => {
|
||||
// This simulates the dangerous edge case: model already in V2 format,
|
||||
// but other fields in V1 format
|
||||
const partiallyMigrated = {
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
autoAccept: false, // V1 key
|
||||
};
|
||||
expect(needsMigration(partiallyMigrated)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for partially migrated settings WITH version field', () => {
|
||||
// With version field, we trust that it's been properly migrated
|
||||
const partiallyMigratedWithVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
autoAccept: false, // This would look like V1 but version says it's V2
|
||||
};
|
||||
expect(needsMigration(partiallyMigratedWithVersion)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('migrateDeprecatedSettings', () => {
|
||||
|
||||
@@ -56,10 +56,6 @@ export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE'];
|
||||
|
||||
const MIGRATE_V2_OVERWRITE = true;
|
||||
|
||||
// Settings version to track migration state
|
||||
export const SETTINGS_VERSION = 2;
|
||||
export const SETTINGS_VERSION_KEY = '$version';
|
||||
|
||||
const MIGRATION_MAP: Record<string, string> = {
|
||||
accessibility: 'ui.accessibility',
|
||||
allowedTools: 'tools.allowed',
|
||||
@@ -220,16 +216,8 @@ function setNestedProperty(
|
||||
}
|
||||
|
||||
export function needsMigration(settings: Record<string, unknown>): boolean {
|
||||
// Check version field first - if present and matches current version, no migration needed
|
||||
if (SETTINGS_VERSION_KEY in settings) {
|
||||
const version = settings[SETTINGS_VERSION_KEY];
|
||||
if (typeof version === 'number' && version >= SETTINGS_VERSION) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to legacy detection: A file needs migration if it contains any
|
||||
// top-level key that is moved to a nested location in V2.
|
||||
// A file needs migration if it contains any top-level key that is moved to a
|
||||
// nested location in V2.
|
||||
const hasV1Keys = Object.entries(MIGRATION_MAP).some(([v1Key, v2Path]) => {
|
||||
if (v1Key === v2Path || !(v1Key in settings)) {
|
||||
return false;
|
||||
@@ -262,21 +250,6 @@ function migrateSettingsToV2(
|
||||
|
||||
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
|
||||
if (flatKeys.has(oldKey)) {
|
||||
// Safety check: If this key is a V2 container (like 'model') and it's
|
||||
// already an object, it's likely already in V2 format. Skip migration
|
||||
// to prevent double-nesting (e.g., model.name.name).
|
||||
if (
|
||||
KNOWN_V2_CONTAINERS.has(oldKey) &&
|
||||
typeof flatSettings[oldKey] === 'object' &&
|
||||
flatSettings[oldKey] !== null &&
|
||||
!Array.isArray(flatSettings[oldKey])
|
||||
) {
|
||||
// This is already a V2 container, carry it over as-is
|
||||
v2Settings[oldKey] = flatSettings[oldKey];
|
||||
flatKeys.delete(oldKey);
|
||||
continue;
|
||||
}
|
||||
|
||||
setNestedProperty(v2Settings, newPath, flatSettings[oldKey]);
|
||||
flatKeys.delete(oldKey);
|
||||
}
|
||||
@@ -314,9 +287,6 @@ function migrateSettingsToV2(
|
||||
}
|
||||
}
|
||||
|
||||
// Set version field to indicate this is a V2 settings file
|
||||
v2Settings[SETTINGS_VERSION_KEY] = SETTINGS_VERSION;
|
||||
|
||||
return v2Settings;
|
||||
}
|
||||
|
||||
@@ -366,11 +336,6 @@ export function migrateSettingsToV1(
|
||||
|
||||
// Carry over any unrecognized keys
|
||||
for (const remainingKey of v2Keys) {
|
||||
// Skip the version field - it's only for V2 format
|
||||
if (remainingKey === SETTINGS_VERSION_KEY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const value = v2Settings[remainingKey];
|
||||
if (value === undefined) {
|
||||
continue;
|
||||
@@ -656,22 +621,6 @@ export function loadSettings(
|
||||
}
|
||||
settingsObject = migratedSettings;
|
||||
}
|
||||
} else if (!(SETTINGS_VERSION_KEY in settingsObject)) {
|
||||
// No migration needed, but version field is missing - add it for future optimizations
|
||||
settingsObject[SETTINGS_VERSION_KEY] = SETTINGS_VERSION;
|
||||
if (MIGRATE_V2_OVERWRITE) {
|
||||
try {
|
||||
fs.writeFileSync(
|
||||
filePath,
|
||||
JSON.stringify(settingsObject, null, 2),
|
||||
'utf-8',
|
||||
);
|
||||
} catch (e) {
|
||||
console.error(
|
||||
`Error adding version to settings file: ${getErrorMessage(e)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
return { settings: settingsObject as Settings, rawJson: content };
|
||||
}
|
||||
|
||||
@@ -558,16 +558,6 @@ const SETTINGS_SCHEMA = {
|
||||
description: 'Enable OpenAI logging.',
|
||||
showInDialog: true,
|
||||
},
|
||||
openAILoggingDir: {
|
||||
type: 'string',
|
||||
label: 'OpenAI Logging Directory',
|
||||
category: 'Model',
|
||||
requiresRestart: false,
|
||||
default: undefined as string | undefined,
|
||||
description:
|
||||
'Custom directory path for OpenAI API logs. If not specified, defaults to logs/openai in the current working directory.',
|
||||
showInDialog: true,
|
||||
},
|
||||
generationConfig: {
|
||||
type: 'object',
|
||||
label: 'Generation Configuration',
|
||||
@@ -857,16 +847,6 @@ const SETTINGS_SCHEMA = {
|
||||
'Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.',
|
||||
showInDialog: true,
|
||||
},
|
||||
useBuiltinRipgrep: {
|
||||
type: 'boolean',
|
||||
label: 'Use Builtin Ripgrep',
|
||||
category: 'Tools',
|
||||
requiresRestart: false,
|
||||
default: true,
|
||||
description:
|
||||
'Use the bundled ripgrep binary. When set to false, the system-level "rg" command will be used instead. This setting is only effective when useRipgrep is true.',
|
||||
showInDialog: true,
|
||||
},
|
||||
enableToolOutputTruncation: {
|
||||
type: 'boolean',
|
||||
label: 'Enable Tool Output Truncation',
|
||||
@@ -1011,24 +991,6 @@ const SETTINGS_SCHEMA = {
|
||||
description: 'Whether to use an external authentication flow.',
|
||||
showInDialog: false,
|
||||
},
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
label: 'API Key',
|
||||
category: 'Security',
|
||||
requiresRestart: true,
|
||||
default: undefined as string | undefined,
|
||||
description: 'API key for OpenAI compatible authentication.',
|
||||
showInDialog: false,
|
||||
},
|
||||
baseUrl: {
|
||||
type: 'string',
|
||||
label: 'Base URL',
|
||||
category: 'Security',
|
||||
requiresRestart: true,
|
||||
default: undefined as string | undefined,
|
||||
description: 'Base URL for OpenAI compatible API.',
|
||||
showInDialog: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1082,36 +1044,17 @@ const SETTINGS_SCHEMA = {
|
||||
},
|
||||
tavilyApiKey: {
|
||||
type: 'string',
|
||||
label: 'Tavily API Key (Deprecated)',
|
||||
label: 'Tavily API Key',
|
||||
category: 'Advanced',
|
||||
requiresRestart: false,
|
||||
default: undefined as string | undefined,
|
||||
description:
|
||||
'⚠️ DEPRECATED: Please use webSearch.provider configuration instead. Legacy API key for the Tavily API.',
|
||||
'The API key for the Tavily API. Required to enable the web_search tool functionality.',
|
||||
showInDialog: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
webSearch: {
|
||||
type: 'object',
|
||||
label: 'Web Search',
|
||||
category: 'Advanced',
|
||||
requiresRestart: true,
|
||||
default: undefined as
|
||||
| {
|
||||
provider: Array<{
|
||||
type: 'tavily' | 'google' | 'dashscope';
|
||||
apiKey?: string;
|
||||
searchEngineId?: string;
|
||||
}>;
|
||||
default: string;
|
||||
}
|
||||
| undefined,
|
||||
description: 'Configuration for web search providers.',
|
||||
showInDialog: false,
|
||||
},
|
||||
|
||||
experimental: {
|
||||
type: 'object',
|
||||
label: 'Experimental',
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { AuthType } from '@qwen-code/qwen-code-core';
|
||||
import type { WebSearchProviderConfig } from '@qwen-code/qwen-code-core';
|
||||
import type { Settings } from './settings.js';
|
||||
|
||||
/**
|
||||
* CLI arguments related to web search configuration
|
||||
*/
|
||||
export interface WebSearchCliArgs {
|
||||
tavilyApiKey?: string;
|
||||
googleApiKey?: string;
|
||||
googleSearchEngineId?: string;
|
||||
webSearchDefault?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Web search configuration structure
|
||||
*/
|
||||
export interface WebSearchConfig {
|
||||
provider: WebSearchProviderConfig[];
|
||||
default: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build webSearch configuration from multiple sources with priority:
|
||||
* 1. settings.json (new format) - highest priority
|
||||
* 2. Command line args + environment variables
|
||||
* 3. Legacy tavilyApiKey (backward compatibility)
|
||||
*
|
||||
* @param argv - Command line arguments
|
||||
* @param settings - User settings from settings.json
|
||||
* @param authType - Authentication type (e.g., 'qwen-oauth')
|
||||
* @returns WebSearch configuration or undefined if no providers available
|
||||
*/
|
||||
export function buildWebSearchConfig(
|
||||
argv: WebSearchCliArgs,
|
||||
settings: Settings,
|
||||
authType?: string,
|
||||
): WebSearchConfig | undefined {
|
||||
const isQwenOAuth = authType === AuthType.QWEN_OAUTH;
|
||||
|
||||
// Step 1: Collect providers from settings or command line/env
|
||||
let providers: WebSearchProviderConfig[] = [];
|
||||
let userDefault: string | undefined;
|
||||
|
||||
if (settings.webSearch) {
|
||||
// Use providers from settings.json
|
||||
providers = [...settings.webSearch.provider];
|
||||
userDefault = settings.webSearch.default;
|
||||
} else {
|
||||
// Build providers from command line args and environment variables
|
||||
const tavilyKey =
|
||||
argv.tavilyApiKey ||
|
||||
settings.advanced?.tavilyApiKey ||
|
||||
process.env['TAVILY_API_KEY'];
|
||||
if (tavilyKey) {
|
||||
providers.push({
|
||||
type: 'tavily',
|
||||
apiKey: tavilyKey,
|
||||
} as WebSearchProviderConfig);
|
||||
}
|
||||
|
||||
const googleKey = argv.googleApiKey || process.env['GOOGLE_API_KEY'];
|
||||
const googleEngineId =
|
||||
argv.googleSearchEngineId || process.env['GOOGLE_SEARCH_ENGINE_ID'];
|
||||
if (googleKey && googleEngineId) {
|
||||
providers.push({
|
||||
type: 'google',
|
||||
apiKey: googleKey,
|
||||
searchEngineId: googleEngineId,
|
||||
} as WebSearchProviderConfig);
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Ensure dashscope is available for qwen-oauth users
|
||||
if (isQwenOAuth) {
|
||||
const hasDashscope = providers.some((p) => p.type === 'dashscope');
|
||||
if (!hasDashscope) {
|
||||
providers.push({ type: 'dashscope' } as WebSearchProviderConfig);
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: If no providers available, return undefined
|
||||
if (providers.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Step 4: Determine default provider
|
||||
// Priority: user explicit config > CLI arg > first available provider (tavily > google > dashscope)
|
||||
const providerPriority: Array<'tavily' | 'google' | 'dashscope'> = [
|
||||
'tavily',
|
||||
'google',
|
||||
'dashscope',
|
||||
];
|
||||
|
||||
// Determine default provider based on availability
|
||||
let defaultProvider = userDefault || argv.webSearchDefault;
|
||||
if (!defaultProvider) {
|
||||
// Find first available provider by priority order
|
||||
for (const providerType of providerPriority) {
|
||||
if (providers.some((p) => p.type === providerType)) {
|
||||
defaultProvider = providerType;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Fallback to first available provider if none found in priority list
|
||||
if (!defaultProvider) {
|
||||
defaultProvider = providers[0]?.type || 'dashscope';
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
provider: providers,
|
||||
default: defaultProvider,
|
||||
};
|
||||
}
|
||||
@@ -327,13 +327,9 @@ describe('gemini.tsx main function kitty protocol', () => {
|
||||
openaiLogging: undefined,
|
||||
openaiApiKey: undefined,
|
||||
openaiBaseUrl: undefined,
|
||||
openaiLoggingDir: undefined,
|
||||
proxy: undefined,
|
||||
includeDirectories: undefined,
|
||||
tavilyApiKey: undefined,
|
||||
googleApiKey: undefined,
|
||||
googleSearchEngineId: undefined,
|
||||
webSearchDefault: undefined,
|
||||
screenReader: undefined,
|
||||
vlmSwitchMode: undefined,
|
||||
useSmartEdit: undefined,
|
||||
|
||||
@@ -17,7 +17,11 @@ import dns from 'node:dns';
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import { start_sandbox } from './utils/sandbox.js';
|
||||
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
|
||||
import { loadSettings, migrateDeprecatedSettings } from './config/settings.js';
|
||||
import {
|
||||
loadSettings,
|
||||
migrateDeprecatedSettings,
|
||||
SettingScope,
|
||||
} from './config/settings.js';
|
||||
import { themeManager } from './ui/themes/theme-manager.js';
|
||||
import { getStartupWarnings } from './utils/startupWarnings.js';
|
||||
import { getUserStartupWarnings } from './utils/userStartupWarnings.js';
|
||||
@@ -229,6 +233,17 @@ export async function main() {
|
||||
validateDnsResolutionOrder(settings.merged.advanced?.dnsResolutionOrder),
|
||||
);
|
||||
|
||||
// Set a default auth type if one isn't set.
|
||||
if (!settings.merged.security?.auth?.selectedType) {
|
||||
if (process.env['CLOUD_SHELL'] === 'true') {
|
||||
settings.setValue(
|
||||
SettingScope.User,
|
||||
'selectedAuthType',
|
||||
AuthType.CLOUD_SHELL,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Load custom themes from settings
|
||||
themeManager.loadCustomThemes(settings.merged.ui?.customThemes);
|
||||
|
||||
@@ -387,11 +402,7 @@ export async function main() {
|
||||
let input = config.getQuestion();
|
||||
const startupWarnings = [
|
||||
...(await getStartupWarnings()),
|
||||
...(await getUserStartupWarnings({
|
||||
workspaceRoot: process.cwd(),
|
||||
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
|
||||
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
|
||||
})),
|
||||
...(await getUserStartupWarnings()),
|
||||
];
|
||||
|
||||
// Render UI, passing necessary config values. Check that there is no command line question.
|
||||
|
||||
@@ -1227,28 +1227,4 @@ describe('FileCommandLoader', () => {
|
||||
expect(commands).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('AbortError handling', () => {
|
||||
it('should silently ignore AbortError when operation is cancelled', async () => {
|
||||
const userCommandsDir = Storage.getUserCommandsDir();
|
||||
mock({
|
||||
[userCommandsDir]: {
|
||||
'test1.toml': 'prompt = "Prompt 1"',
|
||||
'test2.toml': 'prompt = "Prompt 2"',
|
||||
},
|
||||
});
|
||||
|
||||
const loader = new FileCommandLoader(null);
|
||||
const controller = new AbortController();
|
||||
const signal = controller.signal;
|
||||
|
||||
// Start loading and immediately abort
|
||||
const loadPromise = loader.loadCommands(signal);
|
||||
controller.abort();
|
||||
|
||||
// Should not throw or print errors
|
||||
const commands = await loadPromise;
|
||||
expect(commands).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -120,11 +120,7 @@ export class FileCommandLoader implements ICommandLoader {
|
||||
// Add all commands without deduplication
|
||||
allCommands.push(...commands);
|
||||
} catch (error) {
|
||||
// Ignore ENOENT (directory doesn't exist) and AbortError (operation was cancelled)
|
||||
const isEnoent = (error as NodeJS.ErrnoException).code === 'ENOENT';
|
||||
const isAbortError =
|
||||
error instanceof Error && error.name === 'AbortError';
|
||||
if (!isEnoent && !isAbortError) {
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
console.error(
|
||||
`[FileCommandLoader] Error loading commands from ${dirInfo.path}:`,
|
||||
error,
|
||||
|
||||
@@ -916,9 +916,17 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
(result: IdeIntegrationNudgeResult) => {
|
||||
if (result.userSelection === 'yes') {
|
||||
handleSlashCommand('/ide install');
|
||||
settings.setValue(SettingScope.User, 'ide.hasSeenNudge', true);
|
||||
settings.setValue(
|
||||
SettingScope.User,
|
||||
'hasSeenIdeIntegrationNudge',
|
||||
true,
|
||||
);
|
||||
} else if (result.userSelection === 'dismiss') {
|
||||
settings.setValue(SettingScope.User, 'ide.hasSeenNudge', true);
|
||||
settings.setValue(
|
||||
SettingScope.User,
|
||||
'hasSeenIdeIntegrationNudge',
|
||||
true,
|
||||
);
|
||||
}
|
||||
setIdePromptAnswered(true);
|
||||
},
|
||||
|
||||
@@ -8,7 +8,12 @@ import type React from 'react';
|
||||
import { useState } from 'react';
|
||||
import { AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { Box, Text } from 'ink';
|
||||
import { validateAuthMethod } from '../../config/auth.js';
|
||||
import {
|
||||
setOpenAIApiKey,
|
||||
setOpenAIBaseUrl,
|
||||
setOpenAIModel,
|
||||
validateAuthMethod,
|
||||
} from '../../config/auth.js';
|
||||
import { type LoadedSettings, SettingScope } from '../../config/settings.js';
|
||||
import { Colors } from '../colors.js';
|
||||
import { useKeypress } from '../hooks/useKeypress.js';
|
||||
@@ -16,15 +21,7 @@ import { OpenAIKeyPrompt } from '../components/OpenAIKeyPrompt.js';
|
||||
import { RadioButtonSelect } from '../components/shared/RadioButtonSelect.js';
|
||||
|
||||
interface AuthDialogProps {
|
||||
onSelect: (
|
||||
authMethod: AuthType | undefined,
|
||||
scope: SettingScope,
|
||||
credentials?: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
},
|
||||
) => void;
|
||||
onSelect: (authMethod: AuthType | undefined, scope: SettingScope) => void;
|
||||
settings: LoadedSettings;
|
||||
initialErrorMessage?: string | null;
|
||||
}
|
||||
@@ -73,7 +70,11 @@ export function AuthDialog({
|
||||
return item.value === defaultAuthType;
|
||||
}
|
||||
|
||||
return item.value === AuthType.QWEN_OAUTH;
|
||||
if (process.env['GEMINI_API_KEY']) {
|
||||
return item.value === AuthType.USE_GEMINI;
|
||||
}
|
||||
|
||||
return item.value === AuthType.LOGIN_WITH_GOOGLE;
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -100,12 +101,11 @@ export function AuthDialog({
|
||||
baseUrl: string,
|
||||
model: string,
|
||||
) => {
|
||||
setOpenAIApiKey(apiKey);
|
||||
setOpenAIBaseUrl(baseUrl);
|
||||
setOpenAIModel(model);
|
||||
setShowOpenAIKeyPrompt(false);
|
||||
onSelect(AuthType.USE_OPENAI, SettingScope.User, {
|
||||
apiKey,
|
||||
baseUrl,
|
||||
model,
|
||||
});
|
||||
onSelect(AuthType.USE_OPENAI, SettingScope.User);
|
||||
};
|
||||
|
||||
const handleOpenAIKeyCancel = () => {
|
||||
|
||||
@@ -6,11 +6,12 @@
|
||||
|
||||
import { useState, useCallback, useEffect } from 'react';
|
||||
import type { LoadedSettings, SettingScope } from '../../config/settings.js';
|
||||
import type { AuthType, Config } from '@qwen-code/qwen-code-core';
|
||||
import { AuthType, type Config } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
clearCachedCredentialFile,
|
||||
getErrorMessage,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { runExitCleanup } from '../../utils/cleanup.js';
|
||||
import { AuthState } from '../types.js';
|
||||
import { validateAuthMethod } from '../../config/auth.js';
|
||||
|
||||
@@ -46,7 +47,6 @@ export const useAuthCommand = (settings: LoadedSettings, config: Config) => {
|
||||
setAuthError(error);
|
||||
if (error) {
|
||||
setAuthState(AuthState.Updating);
|
||||
setIsAuthDialogOpen(true);
|
||||
}
|
||||
},
|
||||
[setAuthError, setAuthState],
|
||||
@@ -87,49 +87,24 @@ export const useAuthCommand = (settings: LoadedSettings, config: Config) => {
|
||||
|
||||
// Handle auth selection from dialog
|
||||
const handleAuthSelect = useCallback(
|
||||
async (
|
||||
authType: AuthType | undefined,
|
||||
scope: SettingScope,
|
||||
credentials?: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
},
|
||||
) => {
|
||||
async (authType: AuthType | undefined, scope: SettingScope) => {
|
||||
if (authType) {
|
||||
await clearCachedCredentialFile();
|
||||
|
||||
// Save OpenAI credentials if provided
|
||||
if (credentials) {
|
||||
// Update Config's internal generationConfig before calling refreshAuth
|
||||
// This ensures refreshAuth has access to the new credentials
|
||||
config.updateCredentials({
|
||||
apiKey: credentials.apiKey,
|
||||
baseUrl: credentials.baseUrl,
|
||||
model: credentials.model,
|
||||
});
|
||||
|
||||
// Also set environment variables for compatibility with other parts of the code
|
||||
if (credentials.apiKey) {
|
||||
settings.setValue(
|
||||
scope,
|
||||
'security.auth.apiKey',
|
||||
credentials.apiKey,
|
||||
);
|
||||
}
|
||||
if (credentials.baseUrl) {
|
||||
settings.setValue(
|
||||
scope,
|
||||
'security.auth.baseUrl',
|
||||
credentials.baseUrl,
|
||||
);
|
||||
}
|
||||
if (credentials.model) {
|
||||
settings.setValue(scope, 'model.name', credentials.model);
|
||||
}
|
||||
}
|
||||
|
||||
settings.setValue(scope, 'security.auth.selectedType', authType);
|
||||
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE &&
|
||||
config.isBrowserLaunchSuppressed()
|
||||
) {
|
||||
await runExitCleanup();
|
||||
console.log(`
|
||||
----------------------------------------------------------------
|
||||
Logging in with Google... Please restart Gemini CLI to continue.
|
||||
----------------------------------------------------------------
|
||||
`);
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
setIsAuthDialogOpen(false);
|
||||
|
||||
@@ -11,7 +11,6 @@ import { createMockCommandContext } from '../../test-utils/mockCommandContext.js
|
||||
import { getCliVersion } from '../../utils/version.js';
|
||||
import { GIT_COMMIT_INFO } from '../../generated/git-commit.js';
|
||||
import { formatMemoryUsage } from '../utils/formatters.js';
|
||||
import { AuthType } from '@qwen-code/qwen-code-core';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('open');
|
||||
@@ -27,6 +26,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
getDetectedIdeDisplayName: vi.fn().mockReturnValue('VSCode'),
|
||||
}),
|
||||
},
|
||||
sessionId: 'test-session-id',
|
||||
};
|
||||
});
|
||||
vi.mock('node:process', () => ({
|
||||
@@ -58,16 +58,6 @@ describe('bugCommand', () => {
|
||||
getModel: () => 'qwen3-coder-plus',
|
||||
getBugCommand: () => undefined,
|
||||
getIdeMode: () => true,
|
||||
getSessionId: () => 'test-session-id',
|
||||
},
|
||||
settings: {
|
||||
merged: {
|
||||
security: {
|
||||
auth: {
|
||||
selectedType: undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -81,7 +71,6 @@ describe('bugCommand', () => {
|
||||
* **Session ID:** test-session-id
|
||||
* **Operating System:** test-platform v20.0.0
|
||||
* **Sandbox Environment:** test
|
||||
* **Auth Type:**
|
||||
* **Model Version:** qwen3-coder-plus
|
||||
* **Memory Usage:** 100 MB
|
||||
* **IDE Client:** VSCode
|
||||
@@ -102,16 +91,6 @@ describe('bugCommand', () => {
|
||||
getModel: () => 'qwen3-coder-plus',
|
||||
getBugCommand: () => ({ urlTemplate: customTemplate }),
|
||||
getIdeMode: () => true,
|
||||
getSessionId: () => 'test-session-id',
|
||||
},
|
||||
settings: {
|
||||
merged: {
|
||||
security: {
|
||||
auth: {
|
||||
selectedType: undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -125,7 +104,6 @@ describe('bugCommand', () => {
|
||||
* **Session ID:** test-session-id
|
||||
* **Operating System:** test-platform v20.0.0
|
||||
* **Sandbox Environment:** test
|
||||
* **Auth Type:**
|
||||
* **Model Version:** qwen3-coder-plus
|
||||
* **Memory Usage:** 100 MB
|
||||
* **IDE Client:** VSCode
|
||||
@@ -136,50 +114,4 @@ describe('bugCommand', () => {
|
||||
|
||||
expect(open).toHaveBeenCalledWith(expectedUrl);
|
||||
});
|
||||
|
||||
it('should include Base URL when auth type is OpenAI', async () => {
|
||||
const mockContext = createMockCommandContext({
|
||||
services: {
|
||||
config: {
|
||||
getModel: () => 'qwen3-coder-plus',
|
||||
getBugCommand: () => undefined,
|
||||
getIdeMode: () => true,
|
||||
getSessionId: () => 'test-session-id',
|
||||
getContentGeneratorConfig: () => ({
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
}),
|
||||
},
|
||||
settings: {
|
||||
merged: {
|
||||
security: {
|
||||
auth: {
|
||||
selectedType: AuthType.USE_OPENAI,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!bugCommand.action) throw new Error('Action is not defined');
|
||||
await bugCommand.action(mockContext, 'OpenAI bug');
|
||||
|
||||
const expectedInfo = `
|
||||
* **CLI Version:** 0.1.0
|
||||
* **Git Commit:** ${GIT_COMMIT_INFO}
|
||||
* **Session ID:** test-session-id
|
||||
* **Operating System:** test-platform v20.0.0
|
||||
* **Sandbox Environment:** test
|
||||
* **Auth Type:** ${AuthType.USE_OPENAI}
|
||||
* **Base URL:** https://api.openai.com/v1
|
||||
* **Model Version:** qwen3-coder-plus
|
||||
* **Memory Usage:** 100 MB
|
||||
* **IDE Client:** VSCode
|
||||
`;
|
||||
const expectedUrl =
|
||||
'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title=OpenAI%20bug&info=' +
|
||||
encodeURIComponent(expectedInfo);
|
||||
|
||||
expect(open).toHaveBeenCalledWith(expectedUrl);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -15,7 +15,7 @@ import { MessageType } from '../types.js';
|
||||
import { GIT_COMMIT_INFO } from '../../generated/git-commit.js';
|
||||
import { formatMemoryUsage } from '../utils/formatters.js';
|
||||
import { getCliVersion } from '../../utils/version.js';
|
||||
import { IdeClient, AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { IdeClient, sessionId } from '@qwen-code/qwen-code-core';
|
||||
|
||||
export const bugCommand: SlashCommand = {
|
||||
name: 'bug',
|
||||
@@ -38,24 +38,13 @@ export const bugCommand: SlashCommand = {
|
||||
const cliVersion = await getCliVersion();
|
||||
const memoryUsage = formatMemoryUsage(process.memoryUsage().rss);
|
||||
const ideClient = await getIdeClientName(context);
|
||||
const selectedAuthType =
|
||||
context.services.settings.merged.security?.auth?.selectedType || '';
|
||||
const baseUrl =
|
||||
selectedAuthType === AuthType.USE_OPENAI
|
||||
? config?.getContentGeneratorConfig()?.baseUrl
|
||||
: undefined;
|
||||
|
||||
let info = `
|
||||
* **CLI Version:** ${cliVersion}
|
||||
* **Git Commit:** ${GIT_COMMIT_INFO}
|
||||
* **Session ID:** ${config?.getSessionId() || 'unknown'}
|
||||
* **Session ID:** ${sessionId}
|
||||
* **Operating System:** ${osVersion}
|
||||
* **Sandbox Environment:** ${sandboxEnv}
|
||||
* **Auth Type:** ${selectedAuthType}`;
|
||||
if (baseUrl) {
|
||||
info += `\n* **Base URL:** ${baseUrl}`;
|
||||
}
|
||||
info += `
|
||||
* **Model Version:** ${modelVersion}
|
||||
* **Memory Usage:** ${memoryUsage}
|
||||
`;
|
||||
|
||||
@@ -130,7 +130,7 @@ export function OpenAIKeyPrompt({
|
||||
}
|
||||
|
||||
// Handle regular character input
|
||||
if (key.sequence && !key.ctrl && !key.meta) {
|
||||
if (key.sequence && !key.ctrl && !key.meta && !key.name) {
|
||||
// Filter control characters
|
||||
const cleanInput = key.sequence
|
||||
.split('')
|
||||
|
||||
@@ -12,7 +12,6 @@ import type {
|
||||
Config,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { renderWithProviders } from '../../../test-utils/render.js';
|
||||
import type { LoadedSettings } from '../../../config/settings.js';
|
||||
|
||||
describe('ToolConfirmationMessage', () => {
|
||||
const mockConfig = {
|
||||
@@ -188,63 +187,4 @@ describe('ToolConfirmationMessage', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('external editor option', () => {
|
||||
const editConfirmationDetails: ToolCallConfirmationDetails = {
|
||||
type: 'edit',
|
||||
title: 'Confirm Edit',
|
||||
fileName: 'test.txt',
|
||||
filePath: '/test.txt',
|
||||
fileDiff: '...diff...',
|
||||
originalContent: 'a',
|
||||
newContent: 'b',
|
||||
onConfirm: vi.fn(),
|
||||
};
|
||||
|
||||
it('should show "Modify with external editor" when preferredEditor is set', () => {
|
||||
const mockConfig = {
|
||||
isTrustedFolder: () => true,
|
||||
getIdeMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
const { lastFrame } = renderWithProviders(
|
||||
<ToolConfirmationMessage
|
||||
confirmationDetails={editConfirmationDetails}
|
||||
config={mockConfig}
|
||||
availableTerminalHeight={30}
|
||||
terminalWidth={80}
|
||||
/>,
|
||||
{
|
||||
settings: {
|
||||
merged: { general: { preferredEditor: 'vscode' } },
|
||||
} as unknown as LoadedSettings,
|
||||
},
|
||||
);
|
||||
|
||||
expect(lastFrame()).toContain('Modify with external editor');
|
||||
});
|
||||
|
||||
it('should NOT show "Modify with external editor" when preferredEditor is not set', () => {
|
||||
const mockConfig = {
|
||||
isTrustedFolder: () => true,
|
||||
getIdeMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
const { lastFrame } = renderWithProviders(
|
||||
<ToolConfirmationMessage
|
||||
confirmationDetails={editConfirmationDetails}
|
||||
config={mockConfig}
|
||||
availableTerminalHeight={30}
|
||||
terminalWidth={80}
|
||||
/>,
|
||||
{
|
||||
settings: {
|
||||
merged: { general: {} },
|
||||
} as unknown as LoadedSettings,
|
||||
},
|
||||
);
|
||||
|
||||
expect(lastFrame()).not.toContain('Modify with external editor');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -15,14 +15,12 @@ import type {
|
||||
ToolExecuteConfirmationDetails,
|
||||
ToolMcpConfirmationDetails,
|
||||
Config,
|
||||
EditorType,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { IdeClient, ToolConfirmationOutcome } from '@qwen-code/qwen-code-core';
|
||||
import type { RadioSelectItem } from '../shared/RadioButtonSelect.js';
|
||||
import { RadioButtonSelect } from '../shared/RadioButtonSelect.js';
|
||||
import { MaxSizedBox } from '../shared/MaxSizedBox.js';
|
||||
import { useKeypress } from '../../hooks/useKeypress.js';
|
||||
import { useSettings } from '../../contexts/SettingsContext.js';
|
||||
import { theme } from '../../semantic-colors.js';
|
||||
|
||||
export interface ToolConfirmationMessageProps {
|
||||
@@ -47,11 +45,6 @@ export const ToolConfirmationMessage: React.FC<
|
||||
const { onConfirm } = confirmationDetails;
|
||||
const childWidth = terminalWidth - 2; // 2 for padding
|
||||
|
||||
const settings = useSettings();
|
||||
const preferredEditor = settings.merged.general?.preferredEditor as
|
||||
| EditorType
|
||||
| undefined;
|
||||
|
||||
const [ideClient, setIdeClient] = useState<IdeClient | null>(null);
|
||||
const [isDiffingEnabled, setIsDiffingEnabled] = useState(false);
|
||||
|
||||
@@ -206,7 +199,7 @@ export const ToolConfirmationMessage: React.FC<
|
||||
key: 'Yes, allow always',
|
||||
});
|
||||
}
|
||||
if ((!config.getIdeMode() || !isDiffingEnabled) && preferredEditor) {
|
||||
if (!config.getIdeMode() || !isDiffingEnabled) {
|
||||
options.push({
|
||||
label: 'Modify with external editor',
|
||||
value: ToolConfirmationOutcome.ModifyWithEditor,
|
||||
|
||||
@@ -23,7 +23,7 @@ export const ToolsList: React.FC<ToolsListProps> = ({
|
||||
}) => (
|
||||
<Box flexDirection="column" marginBottom={1}>
|
||||
<Text bold color={theme.text.primary}>
|
||||
Available Qwen Code CLI tools:
|
||||
Available Gemini CLI tools:
|
||||
</Text>
|
||||
<Box height={1} />
|
||||
{tools.length > 0 ? (
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
||||
|
||||
exports[`<ToolsList /> > renders correctly with descriptions 1`] = `
|
||||
"Available Qwen Code CLI tools:
|
||||
"Available Gemini CLI tools:
|
||||
|
||||
- Test Tool One (test-tool-one)
|
||||
This is the first test tool.
|
||||
@@ -16,14 +16,14 @@ exports[`<ToolsList /> > renders correctly with descriptions 1`] = `
|
||||
`;
|
||||
|
||||
exports[`<ToolsList /> > renders correctly with no tools 1`] = `
|
||||
"Available Qwen Code CLI tools:
|
||||
"Available Gemini CLI tools:
|
||||
|
||||
No tools available
|
||||
"
|
||||
`;
|
||||
|
||||
exports[`<ToolsList /> > renders correctly without descriptions 1`] = `
|
||||
"Available Qwen Code CLI tools:
|
||||
"Available Gemini CLI tools:
|
||||
|
||||
- Test Tool One
|
||||
- Test Tool Two
|
||||
|
||||
@@ -109,7 +109,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'general.preferredEditor',
|
||||
'preferredEditor',
|
||||
editorType,
|
||||
);
|
||||
|
||||
@@ -139,7 +139,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'general.preferredEditor',
|
||||
'preferredEditor',
|
||||
undefined,
|
||||
);
|
||||
|
||||
@@ -170,7 +170,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'general.preferredEditor',
|
||||
'preferredEditor',
|
||||
editorType,
|
||||
);
|
||||
|
||||
@@ -199,7 +199,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'general.preferredEditor',
|
||||
'preferredEditor',
|
||||
editorType,
|
||||
);
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ export const useEditorSettings = (
|
||||
}
|
||||
|
||||
try {
|
||||
loadedSettings.setValue(scope, 'general.preferredEditor', editorType);
|
||||
loadedSettings.setValue(scope, 'preferredEditor', editorType);
|
||||
addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
|
||||
@@ -22,22 +22,12 @@ vi.mock('os', async (importOriginal) => {
|
||||
describe('getUserStartupWarnings', () => {
|
||||
let testRootDir: string;
|
||||
let homeDir: string;
|
||||
let startupOptions: {
|
||||
workspaceRoot: string;
|
||||
useRipgrep: boolean;
|
||||
useBuiltinRipgrep: boolean;
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
testRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'warnings-test-'));
|
||||
homeDir = path.join(testRootDir, 'home');
|
||||
await fs.mkdir(homeDir, { recursive: true });
|
||||
vi.mocked(os.homedir).mockReturnValue(homeDir);
|
||||
startupOptions = {
|
||||
workspaceRoot: testRootDir,
|
||||
useRipgrep: true,
|
||||
useBuiltinRipgrep: true,
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
@@ -47,10 +37,7 @@ describe('getUserStartupWarnings', () => {
|
||||
|
||||
describe('home directory check', () => {
|
||||
it('should return a warning when running in home directory', async () => {
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: homeDir,
|
||||
});
|
||||
const warnings = await getUserStartupWarnings(homeDir);
|
||||
expect(warnings).toContainEqual(
|
||||
expect.stringContaining('home directory'),
|
||||
);
|
||||
@@ -59,10 +46,7 @@ describe('getUserStartupWarnings', () => {
|
||||
it('should not return a warning when running in a project directory', async () => {
|
||||
const projectDir = path.join(testRootDir, 'project');
|
||||
await fs.mkdir(projectDir);
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: projectDir,
|
||||
});
|
||||
const warnings = await getUserStartupWarnings(projectDir);
|
||||
expect(warnings).not.toContainEqual(
|
||||
expect.stringContaining('home directory'),
|
||||
);
|
||||
@@ -72,10 +56,7 @@ describe('getUserStartupWarnings', () => {
|
||||
describe('root directory check', () => {
|
||||
it('should return a warning when running in a root directory', async () => {
|
||||
const rootDir = path.parse(testRootDir).root;
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: rootDir,
|
||||
});
|
||||
const warnings = await getUserStartupWarnings(rootDir);
|
||||
expect(warnings).toContainEqual(
|
||||
expect.stringContaining('root directory'),
|
||||
);
|
||||
@@ -87,10 +68,7 @@ describe('getUserStartupWarnings', () => {
|
||||
it('should not return a warning when running in a non-root directory', async () => {
|
||||
const projectDir = path.join(testRootDir, 'project');
|
||||
await fs.mkdir(projectDir);
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: projectDir,
|
||||
});
|
||||
const warnings = await getUserStartupWarnings(projectDir);
|
||||
expect(warnings).not.toContainEqual(
|
||||
expect.stringContaining('root directory'),
|
||||
);
|
||||
@@ -100,10 +78,7 @@ describe('getUserStartupWarnings', () => {
|
||||
describe('error handling', () => {
|
||||
it('should handle errors when checking directory', async () => {
|
||||
const nonExistentPath = path.join(testRootDir, 'non-existent');
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: nonExistentPath,
|
||||
});
|
||||
const warnings = await getUserStartupWarnings(nonExistentPath);
|
||||
const expectedWarning =
|
||||
'Could not verify the current directory due to a file system error.';
|
||||
expect(warnings).toEqual([expectedWarning, expectedWarning]);
|
||||
|
||||
@@ -7,26 +7,19 @@
|
||||
import fs from 'node:fs/promises';
|
||||
import * as os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { canUseRipgrep } from '@qwen-code/qwen-code-core';
|
||||
|
||||
type WarningCheckOptions = {
|
||||
workspaceRoot: string;
|
||||
useRipgrep: boolean;
|
||||
useBuiltinRipgrep: boolean;
|
||||
};
|
||||
|
||||
type WarningCheck = {
|
||||
id: string;
|
||||
check: (options: WarningCheckOptions) => Promise<string | null>;
|
||||
check: (workspaceRoot: string) => Promise<string | null>;
|
||||
};
|
||||
|
||||
// Individual warning checks
|
||||
const homeDirectoryCheck: WarningCheck = {
|
||||
id: 'home-directory',
|
||||
check: async (options: WarningCheckOptions) => {
|
||||
check: async (workspaceRoot: string) => {
|
||||
try {
|
||||
const [workspaceRealPath, homeRealPath] = await Promise.all([
|
||||
fs.realpath(options.workspaceRoot),
|
||||
fs.realpath(workspaceRoot),
|
||||
fs.realpath(os.homedir()),
|
||||
]);
|
||||
|
||||
@@ -42,9 +35,9 @@ const homeDirectoryCheck: WarningCheck = {
|
||||
|
||||
const rootDirectoryCheck: WarningCheck = {
|
||||
id: 'root-directory',
|
||||
check: async (options: WarningCheckOptions) => {
|
||||
check: async (workspaceRoot: string) => {
|
||||
try {
|
||||
const workspaceRealPath = await fs.realpath(options.workspaceRoot);
|
||||
const workspaceRealPath = await fs.realpath(workspaceRoot);
|
||||
const errorMessage =
|
||||
'Warning: You are running Qwen Code in the root directory. Your entire folder structure will be used for context. It is strongly recommended to run in a project-specific directory.';
|
||||
|
||||
@@ -60,33 +53,17 @@ const rootDirectoryCheck: WarningCheck = {
|
||||
},
|
||||
};
|
||||
|
||||
const ripgrepAvailabilityCheck: WarningCheck = {
|
||||
id: 'ripgrep-availability',
|
||||
check: async (options: WarningCheckOptions) => {
|
||||
if (!options.useRipgrep) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const isAvailable = await canUseRipgrep(options.useBuiltinRipgrep);
|
||||
if (!isAvailable) {
|
||||
return 'Ripgrep not available: Please install ripgrep globally to enable faster file content search. Falling back to built-in grep.';
|
||||
}
|
||||
return null;
|
||||
},
|
||||
};
|
||||
|
||||
// All warning checks
|
||||
const WARNING_CHECKS: readonly WarningCheck[] = [
|
||||
homeDirectoryCheck,
|
||||
rootDirectoryCheck,
|
||||
ripgrepAvailabilityCheck,
|
||||
];
|
||||
|
||||
export async function getUserStartupWarnings(
|
||||
options: WarningCheckOptions,
|
||||
workspaceRoot: string = process.cwd(),
|
||||
): Promise<string[]> {
|
||||
const results = await Promise.all(
|
||||
WARNING_CHECKS.map((check) => check.check(options)),
|
||||
WARNING_CHECKS.map((check) => check.check(workspaceRoot)),
|
||||
);
|
||||
return results.filter((msg) => msg !== null);
|
||||
}
|
||||
|
||||
@@ -105,6 +105,34 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect(processExitSpy).toHaveBeenCalledWith(1);
|
||||
});
|
||||
|
||||
it('uses LOGIN_WITH_GOOGLE if GOOGLE_GENAI_USE_GCA is set', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.LOGIN_WITH_GOOGLE);
|
||||
});
|
||||
|
||||
it('uses USE_GEMINI if GEMINI_API_KEY is set', async () => {
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('uses USE_OPENAI if OPENAI_API_KEY is set', async () => {
|
||||
process.env['OPENAI_API_KEY'] = 'fake-openai-key';
|
||||
const nonInteractiveConfig = {
|
||||
@@ -140,6 +168,104 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.QWEN_OAUTH);
|
||||
});
|
||||
|
||||
it('uses USE_VERTEX_AI if GOOGLE_GENAI_USE_VERTEXAI is true (with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION)', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI);
|
||||
});
|
||||
|
||||
it('uses USE_VERTEX_AI if GOOGLE_GENAI_USE_VERTEXAI is true and GOOGLE_API_KEY is set', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_API_KEY'] = 'vertex-api-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI);
|
||||
});
|
||||
|
||||
it('uses LOGIN_WITH_GOOGLE if GOOGLE_GENAI_USE_GCA is set, even with other env vars', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.LOGIN_WITH_GOOGLE);
|
||||
});
|
||||
|
||||
it('uses USE_VERTEX_AI if both GEMINI_API_KEY and GOOGLE_GENAI_USE_VERTEXAI are set', async () => {
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI);
|
||||
});
|
||||
|
||||
it('uses USE_GEMINI if GOOGLE_GENAI_USE_VERTEXAI is false, GEMINI_API_KEY is set, and project/location are available', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'false';
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('uses configuredAuthType if provided', async () => {
|
||||
// Set required env var for USE_GEMINI
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_GEMINI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('exits if validateAuthMethod returns error', async () => {
|
||||
// Mock validateAuthMethod to return error
|
||||
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue('Auth error!');
|
||||
@@ -191,25 +317,26 @@ describe('validateNonInterActiveAuth', () => {
|
||||
});
|
||||
|
||||
it('uses enforcedAuthType if provided', async () => {
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.USE_OPENAI;
|
||||
mockSettings.merged.security!.auth!.selectedType = AuthType.USE_OPENAI;
|
||||
// Set required env var for USE_OPENAI to ensure enforcedAuthType takes precedence
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.USE_GEMINI;
|
||||
mockSettings.merged.security!.auth!.selectedType = AuthType.USE_GEMINI;
|
||||
// Set required env var for USE_GEMINI to ensure enforcedAuthType takes precedence
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_GEMINI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_OPENAI);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('exits if currentAuthType does not match enforcedAuthType', async () => {
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.QWEN_OAUTH;
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
mockSettings.merged.security!.auth!.enforcedType =
|
||||
AuthType.LOGIN_WITH_GOOGLE;
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
|
||||
@@ -219,7 +346,7 @@ describe('validateNonInterActiveAuth', () => {
|
||||
} as unknown as Config;
|
||||
try {
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_GEMINI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
@@ -229,7 +356,7 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect((e as Error).message).toContain('process.exit(1) called');
|
||||
}
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'The configured auth type is qwen-oauth, but the current auth type is openai. Please re-authenticate with the correct type.',
|
||||
'The configured auth type is oauth-personal, but the current auth type is vertex-ai. Please re-authenticate with the correct type.',
|
||||
);
|
||||
expect(processExitSpy).toHaveBeenCalledWith(1);
|
||||
});
|
||||
@@ -267,8 +394,8 @@ describe('validateNonInterActiveAuth', () => {
|
||||
});
|
||||
|
||||
it('prints JSON error when enforced auth mismatches current auth and exits with code 1', async () => {
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.QWEN_OAUTH;
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.USE_GEMINI;
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
@@ -297,14 +424,14 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect(payload.error.type).toBe('Error');
|
||||
expect(payload.error.code).toBe(1);
|
||||
expect(payload.error.message).toContain(
|
||||
'The configured auth type is qwen-oauth, but the current auth type is openai.',
|
||||
'The configured auth type is gemini-api-key, but the current auth type is oauth-personal.',
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it('prints JSON error when validateAuthMethod fails and exits with code 1', async () => {
|
||||
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue('Auth error!');
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
@@ -317,7 +444,7 @@ describe('validateNonInterActiveAuth', () => {
|
||||
let thrown: Error | undefined;
|
||||
try {
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_GEMINI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
|
||||
@@ -12,13 +12,21 @@ import { type LoadedSettings } from './config/settings.js';
|
||||
import { handleError } from './utils/errors.js';
|
||||
|
||||
function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
if (process.env['GOOGLE_GENAI_USE_GCA'] === 'true') {
|
||||
return AuthType.LOGIN_WITH_GOOGLE;
|
||||
}
|
||||
if (process.env['GOOGLE_GENAI_USE_VERTEXAI'] === 'true') {
|
||||
return AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
if (process.env['GEMINI_API_KEY']) {
|
||||
return AuthType.USE_GEMINI;
|
||||
}
|
||||
if (process.env['OPENAI_API_KEY']) {
|
||||
return AuthType.USE_OPENAI;
|
||||
}
|
||||
if (process.env['QWEN_OAUTH']) {
|
||||
return AuthType.QWEN_OAUTH;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.1-preview.0",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -16,7 +16,6 @@ import {
|
||||
QwenLogger,
|
||||
} from '../telemetry/index.js';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
@@ -154,11 +153,6 @@ vi.mock('../core/tokenLimits.js', () => ({
|
||||
|
||||
describe('Server Config (config.ts)', () => {
|
||||
const MODEL = 'qwen3-coder-plus';
|
||||
|
||||
// Default mock for canUseRipgrep to return true (tests that care about ripgrep will override this)
|
||||
beforeEach(() => {
|
||||
vi.mocked(canUseRipgrep).mockResolvedValue(true);
|
||||
});
|
||||
const SANDBOX: SandboxConfig = {
|
||||
command: 'docker',
|
||||
image: 'qwen-code-sandbox',
|
||||
@@ -256,7 +250,6 @@ describe('Server Config (config.ts)', () => {
|
||||
authType,
|
||||
{
|
||||
model: MODEL,
|
||||
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
|
||||
},
|
||||
);
|
||||
// Verify that contentGeneratorConfig is updated
|
||||
@@ -583,40 +576,6 @@ describe('Server Config (config.ts)', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('UseBuiltinRipgrep Configuration', () => {
|
||||
it('should default useBuiltinRipgrep to true when not provided', () => {
|
||||
const config = new Config(baseParams);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
|
||||
it('should set useBuiltinRipgrep to false when provided as false', () => {
|
||||
const paramsWithBuiltinRipgrep: ConfigParameters = {
|
||||
...baseParams,
|
||||
useBuiltinRipgrep: false,
|
||||
};
|
||||
const config = new Config(paramsWithBuiltinRipgrep);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(false);
|
||||
});
|
||||
|
||||
it('should set useBuiltinRipgrep to true when explicitly provided as true', () => {
|
||||
const paramsWithBuiltinRipgrep: ConfigParameters = {
|
||||
...baseParams,
|
||||
useBuiltinRipgrep: true,
|
||||
};
|
||||
const config = new Config(paramsWithBuiltinRipgrep);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
|
||||
it('should default useBuiltinRipgrep to true when undefined', () => {
|
||||
const paramsWithUndefinedBuiltinRipgrep: ConfigParameters = {
|
||||
...baseParams,
|
||||
useBuiltinRipgrep: undefined,
|
||||
};
|
||||
const config = new Config(paramsWithUndefinedBuiltinRipgrep);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createToolRegistry', () => {
|
||||
it('should register a tool if coreTools contains an argument-specific pattern', async () => {
|
||||
const params: ConfigParameters = {
|
||||
@@ -864,60 +823,10 @@ describe('setApprovalMode with folder trust', () => {
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(true);
|
||||
expect(wasGrepRegistered).toBe(false);
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(true);
|
||||
expect(logRipgrepFallback).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should register RipGrepTool with system ripgrep when useBuiltinRipgrep is false', async () => {
|
||||
(canUseRipgrep as Mock).mockResolvedValue(true);
|
||||
const config = new Config({
|
||||
...baseParams,
|
||||
useRipgrep: true,
|
||||
useBuiltinRipgrep: false,
|
||||
});
|
||||
await config.initialize();
|
||||
|
||||
const calls = (ToolRegistry.prototype.registerTool as Mock).mock.calls;
|
||||
const wasRipGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(RipGrepTool),
|
||||
);
|
||||
const wasGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(GrepTool),
|
||||
);
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(true);
|
||||
expect(wasGrepRegistered).toBe(false);
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(false);
|
||||
});
|
||||
|
||||
it('should fall back to GrepTool and log error when useBuiltinRipgrep is false but system ripgrep is not available', async () => {
|
||||
(canUseRipgrep as Mock).mockResolvedValue(false);
|
||||
const config = new Config({
|
||||
...baseParams,
|
||||
useRipgrep: true,
|
||||
useBuiltinRipgrep: false,
|
||||
});
|
||||
await config.initialize();
|
||||
|
||||
const calls = (ToolRegistry.prototype.registerTool as Mock).mock.calls;
|
||||
const wasRipGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(RipGrepTool),
|
||||
);
|
||||
const wasGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(GrepTool),
|
||||
);
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(false);
|
||||
expect(wasGrepRegistered).toBe(true);
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(false);
|
||||
expect(logRipgrepFallback).toHaveBeenCalledWith(
|
||||
config,
|
||||
expect.any(RipgrepFallbackEvent),
|
||||
);
|
||||
const event = (logRipgrepFallback as Mock).mock.calls[0][1];
|
||||
expect(event.error).toContain('Ripgrep is not available');
|
||||
});
|
||||
|
||||
it('should fall back to GrepTool and log error when useRipgrep is true and builtin ripgrep is not available', async () => {
|
||||
it('should register GrepTool as a fallback when useRipgrep is true but it is not available', async () => {
|
||||
(canUseRipgrep as Mock).mockResolvedValue(false);
|
||||
const config = new Config({ ...baseParams, useRipgrep: true });
|
||||
await config.initialize();
|
||||
@@ -932,16 +841,15 @@ describe('setApprovalMode with folder trust', () => {
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(false);
|
||||
expect(wasGrepRegistered).toBe(true);
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(true);
|
||||
expect(logRipgrepFallback).toHaveBeenCalledWith(
|
||||
config,
|
||||
expect.any(RipgrepFallbackEvent),
|
||||
);
|
||||
const event = (logRipgrepFallback as Mock).mock.calls[0][1];
|
||||
expect(event.error).toContain('Ripgrep is not available');
|
||||
expect(event.error).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should fall back to GrepTool and log error when canUseRipgrep throws an error', async () => {
|
||||
it('should register GrepTool as a fallback when canUseRipgrep throws an error', async () => {
|
||||
const error = new Error('ripGrep check failed');
|
||||
(canUseRipgrep as Mock).mockRejectedValue(error);
|
||||
const config = new Config({ ...baseParams, useRipgrep: true });
|
||||
@@ -980,6 +888,7 @@ describe('setApprovalMode with folder trust', () => {
|
||||
expect(wasRipGrepRegistered).toBe(false);
|
||||
expect(wasGrepRegistered).toBe(true);
|
||||
expect(canUseRipgrep).not.toHaveBeenCalled();
|
||||
expect(logRipgrepFallback).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -57,7 +57,7 @@ import { TaskTool } from '../tools/task.js';
|
||||
import { TodoWriteTool } from '../tools/todoWrite.js';
|
||||
import { ToolRegistry } from '../tools/tool-registry.js';
|
||||
import { WebFetchTool } from '../tools/web-fetch.js';
|
||||
import { WebSearchTool } from '../tools/web-search/index.js';
|
||||
import { WebSearchTool } from '../tools/web-search.js';
|
||||
import { WriteFileTool } from '../tools/write-file.js';
|
||||
|
||||
// Other modules
|
||||
@@ -88,9 +88,8 @@ import {
|
||||
DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
|
||||
} from './constants.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL } from './models.js';
|
||||
import { Storage } from './storage.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
|
||||
// Re-export types
|
||||
export type { AnyToolInvocation, FileFilteringOptions, MCPOAuthConfig };
|
||||
@@ -244,7 +243,7 @@ export interface ConfigParameters {
|
||||
fileDiscoveryService?: FileDiscoveryService;
|
||||
includeDirectories?: string[];
|
||||
bugCommand?: BugCommandSettings;
|
||||
model?: string;
|
||||
model: string;
|
||||
extensionContextFilePaths?: string[];
|
||||
maxSessionTurns?: number;
|
||||
sessionTokenLimit?: number;
|
||||
@@ -262,19 +261,11 @@ export interface ConfigParameters {
|
||||
cliVersion?: string;
|
||||
loadMemoryFromIncludeDirectories?: boolean;
|
||||
// Web search providers
|
||||
webSearch?: {
|
||||
provider: Array<{
|
||||
type: 'tavily' | 'google' | 'dashscope';
|
||||
apiKey?: string;
|
||||
searchEngineId?: string;
|
||||
}>;
|
||||
default: string;
|
||||
};
|
||||
tavilyApiKey?: string;
|
||||
chatCompression?: ChatCompressionSettings;
|
||||
interactive?: boolean;
|
||||
trustedFolder?: boolean;
|
||||
useRipgrep?: boolean;
|
||||
useBuiltinRipgrep?: boolean;
|
||||
shouldUseNodePtyShell?: boolean;
|
||||
skipNextSpeakerCheck?: boolean;
|
||||
shellExecutionConfig?: ShellExecutionConfig;
|
||||
@@ -298,7 +289,7 @@ export class Config {
|
||||
private fileSystemService: FileSystemService;
|
||||
private contentGeneratorConfig!: ContentGeneratorConfig;
|
||||
private contentGenerator!: ContentGenerator;
|
||||
private _generationConfig: Partial<ContentGeneratorConfig>;
|
||||
private readonly _generationConfig: ContentGeneratorConfig;
|
||||
private readonly embeddingModel: string;
|
||||
private readonly sandbox: SandboxConfig | undefined;
|
||||
private readonly targetDir: string;
|
||||
@@ -358,19 +349,11 @@ export class Config {
|
||||
private readonly cliVersion?: string;
|
||||
private readonly experimentalZedIntegration: boolean = false;
|
||||
private readonly loadMemoryFromIncludeDirectories: boolean = false;
|
||||
private readonly webSearch?: {
|
||||
provider: Array<{
|
||||
type: 'tavily' | 'google' | 'dashscope';
|
||||
apiKey?: string;
|
||||
searchEngineId?: string;
|
||||
}>;
|
||||
default: string;
|
||||
};
|
||||
private readonly tavilyApiKey?: string;
|
||||
private readonly chatCompression: ChatCompressionSettings | undefined;
|
||||
private readonly interactive: boolean;
|
||||
private readonly trustedFolder: boolean | undefined;
|
||||
private readonly useRipgrep: boolean;
|
||||
private readonly useBuiltinRipgrep: boolean;
|
||||
private readonly shouldUseNodePtyShell: boolean;
|
||||
private readonly skipNextSpeakerCheck: boolean;
|
||||
private shellExecutionConfig: ShellExecutionConfig;
|
||||
@@ -457,10 +440,8 @@ export class Config {
|
||||
this._generationConfig = {
|
||||
model: params.model,
|
||||
...(params.generationConfig || {}),
|
||||
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
|
||||
};
|
||||
this.contentGeneratorConfig = this
|
||||
._generationConfig as ContentGeneratorConfig;
|
||||
this.contentGeneratorConfig = this._generationConfig;
|
||||
this.cliVersion = params.cliVersion;
|
||||
|
||||
this.loadMemoryFromIncludeDirectories =
|
||||
@@ -468,12 +449,13 @@ export class Config {
|
||||
this.chatCompression = params.chatCompression;
|
||||
this.interactive = params.interactive ?? false;
|
||||
this.trustedFolder = params.trustedFolder;
|
||||
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
|
||||
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? false;
|
||||
this.skipLoopDetection = params.skipLoopDetection ?? false;
|
||||
|
||||
// Web search
|
||||
this.webSearch = params.webSearch;
|
||||
this.tavilyApiKey = params.tavilyApiKey;
|
||||
this.useRipgrep = params.useRipgrep ?? true;
|
||||
this.useBuiltinRipgrep = params.useBuiltinRipgrep ?? true;
|
||||
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
|
||||
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? true;
|
||||
this.shellExecutionConfig = {
|
||||
@@ -538,26 +520,6 @@ export class Config {
|
||||
return this.contentGenerator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the credentials in the generation config.
|
||||
* This is needed when credentials are set after Config construction.
|
||||
*/
|
||||
updateCredentials(credentials: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
}): void {
|
||||
if (credentials.apiKey) {
|
||||
this._generationConfig.apiKey = credentials.apiKey;
|
||||
}
|
||||
if (credentials.baseUrl) {
|
||||
this._generationConfig.baseUrl = credentials.baseUrl;
|
||||
}
|
||||
if (credentials.model) {
|
||||
this._generationConfig.model = credentials.model;
|
||||
}
|
||||
}
|
||||
|
||||
async refreshAuth(authMethod: AuthType) {
|
||||
// Vertex and Genai have incompatible encryption and sending history with
|
||||
// throughtSignature from Genai to Vertex will fail, we need to strip them
|
||||
@@ -625,7 +587,7 @@ export class Config {
|
||||
}
|
||||
|
||||
getModel(): string {
|
||||
return this.contentGeneratorConfig?.model || DEFAULT_QWEN_MODEL;
|
||||
return this.contentGeneratorConfig.model;
|
||||
}
|
||||
|
||||
async setModel(
|
||||
@@ -926,8 +888,8 @@ export class Config {
|
||||
}
|
||||
|
||||
// Web search provider configuration
|
||||
getWebSearchConfig() {
|
||||
return this.webSearch;
|
||||
getTavilyApiKey(): string | undefined {
|
||||
return this.tavilyApiKey;
|
||||
}
|
||||
|
||||
getIdeMode(): boolean {
|
||||
@@ -1003,10 +965,6 @@ export class Config {
|
||||
return this.useRipgrep;
|
||||
}
|
||||
|
||||
getUseBuiltinRipgrep(): boolean {
|
||||
return this.useBuiltinRipgrep;
|
||||
}
|
||||
|
||||
getShouldUseNodePtyShell(): boolean {
|
||||
return this.shouldUseNodePtyShell;
|
||||
}
|
||||
@@ -1134,18 +1092,13 @@ export class Config {
|
||||
let useRipgrep = false;
|
||||
let errorString: undefined | string = undefined;
|
||||
try {
|
||||
useRipgrep = await canUseRipgrep(this.getUseBuiltinRipgrep());
|
||||
useRipgrep = await canUseRipgrep();
|
||||
} catch (error: unknown) {
|
||||
errorString = String(error);
|
||||
}
|
||||
if (useRipgrep) {
|
||||
registerCoreTool(RipGrepTool, this);
|
||||
} else {
|
||||
errorString =
|
||||
errorString ||
|
||||
'Ripgrep is not available. Please install ripgrep globally.';
|
||||
|
||||
// Log for telemetry
|
||||
logRipgrepFallback(this, new RipgrepFallbackEvent(errorString));
|
||||
registerCoreTool(GrepTool, this);
|
||||
}
|
||||
@@ -1166,10 +1119,8 @@ export class Config {
|
||||
registerCoreTool(TodoWriteTool, this);
|
||||
registerCoreTool(ExitPlanModeTool, this);
|
||||
registerCoreTool(WebFetchTool, this);
|
||||
// Conditionally register web search tool if web search provider is configured
|
||||
// buildWebSearchConfig ensures qwen-oauth users get dashscope provider, so
|
||||
// if tool is registered, config must exist
|
||||
if (this.getWebSearchConfig()) {
|
||||
// Conditionally register web search tool only if Tavily API key is set
|
||||
if (this.getTavilyApiKey()) {
|
||||
registerCoreTool(WebSearchTool, this);
|
||||
}
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -288,7 +288,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -517,7 +517,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -731,7 +731,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -945,7 +945,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1159,7 +1159,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1373,7 +1373,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1587,7 +1587,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1801,7 +1801,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2015,7 +2015,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2252,7 +2252,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2549,7 +2549,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2786,7 +2786,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -3079,7 +3079,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -3293,7 +3293,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
@@ -21,9 +21,6 @@ vi.mock('../../telemetry/loggers.js', () => ({
|
||||
}));
|
||||
|
||||
vi.mock('../../utils/openaiLogger.js', () => ({
|
||||
OpenAILogger: vi.fn().mockImplementation(() => ({
|
||||
logInteraction: vi.fn(),
|
||||
})),
|
||||
openaiLogger: {
|
||||
logInteraction: vi.fn(),
|
||||
},
|
||||
|
||||
@@ -16,11 +16,11 @@ import {
|
||||
|
||||
import type { Content, GenerateContentResponse, Part } from '@google/genai';
|
||||
import {
|
||||
findCompressSplitPoint,
|
||||
isThinkingDefault,
|
||||
isThinkingSupported,
|
||||
GeminiClient,
|
||||
} from './client.js';
|
||||
import { findCompressSplitPoint } from '../services/chatCompressionService.js';
|
||||
import {
|
||||
AuthType,
|
||||
type ContentGenerator,
|
||||
@@ -42,6 +42,7 @@ import { setSimulate429 } from '../utils/testUtils.js';
|
||||
import { tokenLimit } from './tokenLimits.js';
|
||||
import { ideContextStore } from '../ide/ideContext.js';
|
||||
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
|
||||
import { QwenLogger } from '../telemetry/index.js';
|
||||
|
||||
// Mock fs module to prevent actual file system operations during tests
|
||||
const mockFileSystem = new Map<string, string>();
|
||||
@@ -100,22 +101,6 @@ vi.mock('../utils/errorReporting', () => ({ reportError: vi.fn() }));
|
||||
vi.mock('../utils/nextSpeakerChecker', () => ({
|
||||
checkNextSpeaker: vi.fn().mockResolvedValue(null),
|
||||
}));
|
||||
vi.mock('../utils/environmentContext', () => ({
|
||||
getEnvironmentContext: vi
|
||||
.fn()
|
||||
.mockResolvedValue([{ text: 'Mocked env context' }]),
|
||||
getInitialChatHistory: vi.fn(async (_config, extraHistory) => [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Mocked env context' }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
]),
|
||||
}));
|
||||
vi.mock('../utils/generateContentResponseUtilities', () => ({
|
||||
getResponseText: (result: GenerateContentResponse) =>
|
||||
result.candidates?.[0]?.content?.parts?.map((part) => part.text).join('') ||
|
||||
@@ -151,10 +136,6 @@ vi.mock('../ide/ideContext.js');
|
||||
vi.mock('../telemetry/uiTelemetry.js', () => ({
|
||||
uiTelemetryService: mockUiTelemetryService,
|
||||
}));
|
||||
vi.mock('../telemetry/loggers.js', () => ({
|
||||
logChatCompression: vi.fn(),
|
||||
logNextSpeakerCheck: vi.fn(),
|
||||
}));
|
||||
|
||||
/**
|
||||
* Array.fromAsync ponyfill, which will be available in es 2024.
|
||||
@@ -638,8 +619,7 @@ describe('Gemini Client (client.ts)', () => {
|
||||
});
|
||||
|
||||
it('logs a telemetry event when compressing', async () => {
|
||||
const { logChatCompression } = await import('../telemetry/loggers.js');
|
||||
vi.mocked(logChatCompression).mockClear();
|
||||
vi.spyOn(QwenLogger.prototype, 'logChatCompressionEvent');
|
||||
|
||||
const MOCKED_TOKEN_LIMIT = 1000;
|
||||
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
|
||||
@@ -647,37 +627,19 @@ describe('Gemini Client (client.ts)', () => {
|
||||
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
|
||||
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
|
||||
});
|
||||
// Need multiple history items so there's something to compress
|
||||
const history = [
|
||||
{ role: 'user', parts: [{ text: '...history 1...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 2...' }] },
|
||||
{ role: 'user', parts: [{ text: '...history 3...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 4...' }] },
|
||||
];
|
||||
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
|
||||
mockGetHistory.mockReturnValue(history);
|
||||
|
||||
// Token count needs to be ABOVE the threshold to trigger compression
|
||||
const originalTokenCount =
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD + 1;
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
|
||||
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(
|
||||
originalTokenCount,
|
||||
);
|
||||
|
||||
// Mock the summary response from the chat
|
||||
// We need to control the estimated new token count.
|
||||
// We mock startChat to return a chat with a known history.
|
||||
const summaryText = 'This is a summary.';
|
||||
mockGenerateContentFn.mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: summaryText }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
|
||||
// Mock startChat to complete the compression flow
|
||||
const splitPoint = findCompressSplitPoint(history, 0.7);
|
||||
const historyToKeep = history.slice(splitPoint);
|
||||
const newCompressedHistory: Content[] = [
|
||||
@@ -697,36 +659,52 @@ describe('Gemini Client (client.ts)', () => {
|
||||
.fn()
|
||||
.mockResolvedValue(mockNewChat as GeminiChat);
|
||||
|
||||
const totalChars = newCompressedHistory.reduce(
|
||||
(total, content) => total + JSON.stringify(content).length,
|
||||
0,
|
||||
);
|
||||
const newTokenCount = Math.floor(totalChars / 4);
|
||||
|
||||
// Mock the summary response from the chat
|
||||
mockGenerateContentFn.mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: summaryText }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
|
||||
await client.tryCompressChat('prompt-id-3', false);
|
||||
|
||||
expect(logChatCompression).toHaveBeenCalledWith(
|
||||
expect.anything(),
|
||||
expect(QwenLogger.prototype.logChatCompressionEvent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalled();
|
||||
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalledWith(
|
||||
newTokenCount,
|
||||
);
|
||||
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalledTimes(
|
||||
1,
|
||||
);
|
||||
});
|
||||
|
||||
it('should trigger summarization if token count is above threshold with contextPercentageThreshold setting', async () => {
|
||||
it('should trigger summarization if token count is at threshold with contextPercentageThreshold setting', async () => {
|
||||
const MOCKED_TOKEN_LIMIT = 1000;
|
||||
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
|
||||
vi.mocked(tokenLimit).mockReturnValue(MOCKED_TOKEN_LIMIT);
|
||||
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
|
||||
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
|
||||
});
|
||||
// Need multiple history items so there's something to compress
|
||||
const history = [
|
||||
{ role: 'user', parts: [{ text: '...history 1...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 2...' }] },
|
||||
{ role: 'user', parts: [{ text: '...history 3...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 4...' }] },
|
||||
];
|
||||
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
|
||||
mockGetHistory.mockReturnValue(history);
|
||||
|
||||
// Token count needs to be ABOVE the threshold to trigger compression
|
||||
const originalTokenCount =
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD + 1;
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
|
||||
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(
|
||||
originalTokenCount,
|
||||
@@ -886,13 +864,7 @@ describe('Gemini Client (client.ts)', () => {
|
||||
});
|
||||
|
||||
it('should always trigger summarization when force is true, regardless of token count', async () => {
|
||||
// Need multiple history items so there's something to compress
|
||||
const history = [
|
||||
{ role: 'user', parts: [{ text: '...history 1...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 2...' }] },
|
||||
{ role: 'user', parts: [{ text: '...history 3...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 4...' }] },
|
||||
];
|
||||
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
|
||||
mockGetHistory.mockReturnValue(history);
|
||||
|
||||
const originalTokenCount = 100; // Well below threshold, but > estimated new count
|
||||
|
||||
@@ -25,11 +25,13 @@ import {
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { GeminiChat } from './geminiChat.js';
|
||||
import {
|
||||
getCompressionPrompt,
|
||||
getCoreSystemPrompt,
|
||||
getCustomSystemPrompt,
|
||||
getPlanModeSystemReminder,
|
||||
getSubagentSystemReminder,
|
||||
} from './prompts.js';
|
||||
import { tokenLimit } from './tokenLimits.js';
|
||||
import {
|
||||
CompressionStatus,
|
||||
GeminiEventType,
|
||||
@@ -40,11 +42,6 @@ import {
|
||||
|
||||
// Services
|
||||
import { type ChatRecordingService } from '../services/chatRecordingService.js';
|
||||
import {
|
||||
ChatCompressionService,
|
||||
COMPRESSION_PRESERVE_THRESHOLD,
|
||||
COMPRESSION_TOKEN_THRESHOLD,
|
||||
} from '../services/chatCompressionService.js';
|
||||
import { LoopDetectionService } from '../services/loopDetectionService.js';
|
||||
|
||||
// Tools
|
||||
@@ -53,18 +50,21 @@ import { TaskTool } from '../tools/task.js';
|
||||
// Telemetry
|
||||
import {
|
||||
NextSpeakerCheckEvent,
|
||||
logChatCompression,
|
||||
logNextSpeakerCheck,
|
||||
makeChatCompressionEvent,
|
||||
uiTelemetryService,
|
||||
} from '../telemetry/index.js';
|
||||
|
||||
// Utilities
|
||||
import {
|
||||
getDirectoryContextString,
|
||||
getInitialChatHistory,
|
||||
getEnvironmentContext,
|
||||
} from '../utils/environmentContext.js';
|
||||
import { reportError } from '../utils/errorReporting.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
import { checkNextSpeaker } from '../utils/nextSpeakerChecker.js';
|
||||
import { flatMapTextParts } from '../utils/partUtils.js';
|
||||
import { flatMapTextParts, getResponseText } from '../utils/partUtils.js';
|
||||
import { retryWithBackoff } from '../utils/retry.js';
|
||||
|
||||
// IDE integration
|
||||
@@ -85,8 +85,68 @@ export function isThinkingDefault(model: string) {
|
||||
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the index of the oldest item to keep when compressing. May return
|
||||
* contents.length which indicates that everything should be compressed.
|
||||
*
|
||||
* Exported for testing purposes.
|
||||
*/
|
||||
export function findCompressSplitPoint(
|
||||
contents: Content[],
|
||||
fraction: number,
|
||||
): number {
|
||||
if (fraction <= 0 || fraction >= 1) {
|
||||
throw new Error('Fraction must be between 0 and 1');
|
||||
}
|
||||
|
||||
const charCounts = contents.map((content) => JSON.stringify(content).length);
|
||||
const totalCharCount = charCounts.reduce((a, b) => a + b, 0);
|
||||
const targetCharCount = totalCharCount * fraction;
|
||||
|
||||
let lastSplitPoint = 0; // 0 is always valid (compress nothing)
|
||||
let cumulativeCharCount = 0;
|
||||
for (let i = 0; i < contents.length; i++) {
|
||||
const content = contents[i];
|
||||
if (
|
||||
content.role === 'user' &&
|
||||
!content.parts?.some((part) => !!part.functionResponse)
|
||||
) {
|
||||
if (cumulativeCharCount >= targetCharCount) {
|
||||
return i;
|
||||
}
|
||||
lastSplitPoint = i;
|
||||
}
|
||||
cumulativeCharCount += charCounts[i];
|
||||
}
|
||||
|
||||
// We found no split points after targetCharCount.
|
||||
// Check if it's safe to compress everything.
|
||||
const lastContent = contents[contents.length - 1];
|
||||
if (
|
||||
lastContent?.role === 'model' &&
|
||||
!lastContent?.parts?.some((part) => part.functionCall)
|
||||
) {
|
||||
return contents.length;
|
||||
}
|
||||
|
||||
// Can't compress everything so just compress at last splitpoint.
|
||||
return lastSplitPoint;
|
||||
}
|
||||
|
||||
const MAX_TURNS = 100;
|
||||
|
||||
/**
|
||||
* Threshold for compression token count as a fraction of the model's token limit.
|
||||
* If the chat history exceeds this threshold, it will be compressed.
|
||||
*/
|
||||
const COMPRESSION_TOKEN_THRESHOLD = 0.7;
|
||||
|
||||
/**
|
||||
* The fraction of the latest chat history to keep. A value of 0.3
|
||||
* means that only the last 30% of the chat history will be kept after compression.
|
||||
*/
|
||||
const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
|
||||
|
||||
export class GeminiClient {
|
||||
private chat?: GeminiChat;
|
||||
private readonly generateContentConfig: GenerateContentConfig = {
|
||||
@@ -183,13 +243,23 @@ export class GeminiClient {
|
||||
async startChat(extraHistory?: Content[]): Promise<GeminiChat> {
|
||||
this.forceFullIdeContext = true;
|
||||
this.hasFailedCompressionAttempt = false;
|
||||
const envParts = await getEnvironmentContext(this.config);
|
||||
|
||||
const toolRegistry = this.config.getToolRegistry();
|
||||
const toolDeclarations = toolRegistry.getFunctionDeclarations();
|
||||
const tools: Tool[] = [{ functionDeclarations: toolDeclarations }];
|
||||
|
||||
const history = await getInitialChatHistory(this.config, extraHistory);
|
||||
|
||||
const history: Content[] = [
|
||||
{
|
||||
role: 'user',
|
||||
parts: envParts,
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
];
|
||||
try {
|
||||
const userMemory = this.config.getUserMemory();
|
||||
const model = this.config.getModel();
|
||||
@@ -433,15 +503,14 @@ export class GeminiClient {
|
||||
userMemory,
|
||||
this.config.getModel(),
|
||||
);
|
||||
const initialHistory = await getInitialChatHistory(this.config);
|
||||
const environment = await getEnvironmentContext(this.config);
|
||||
|
||||
// Create a mock request content to count total tokens
|
||||
const mockRequestContent = [
|
||||
{
|
||||
role: 'system' as const,
|
||||
parts: [{ text: systemPrompt }],
|
||||
parts: [{ text: systemPrompt }, ...environment],
|
||||
},
|
||||
...initialHistory,
|
||||
...currentHistory,
|
||||
];
|
||||
|
||||
@@ -663,37 +732,127 @@ export class GeminiClient {
|
||||
prompt_id: string,
|
||||
force: boolean = false,
|
||||
): Promise<ChatCompressionInfo> {
|
||||
const compressionService = new ChatCompressionService();
|
||||
const model = this.config.getModel();
|
||||
|
||||
const { newHistory, info } = await compressionService.compress(
|
||||
this.getChat(),
|
||||
prompt_id,
|
||||
force,
|
||||
this.config.getModel(),
|
||||
this.config,
|
||||
this.hasFailedCompressionAttempt,
|
||||
);
|
||||
const curatedHistory = this.getChat().getHistory(true);
|
||||
|
||||
// Handle compression result
|
||||
if (info.compressionStatus === CompressionStatus.COMPRESSED) {
|
||||
// Success: update chat with new compressed history
|
||||
if (newHistory) {
|
||||
this.chat = await this.startChat(newHistory);
|
||||
this.forceFullIdeContext = true;
|
||||
}
|
||||
} else if (
|
||||
info.compressionStatus ===
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT ||
|
||||
info.compressionStatus ===
|
||||
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY
|
||||
// Regardless of `force`, don't do anything if the history is empty.
|
||||
if (
|
||||
curatedHistory.length === 0 ||
|
||||
(this.hasFailedCompressionAttempt && !force)
|
||||
) {
|
||||
// Track failed attempts (only mark as failed if not forced)
|
||||
if (!force) {
|
||||
this.hasFailedCompressionAttempt = true;
|
||||
return {
|
||||
originalTokenCount: 0,
|
||||
newTokenCount: 0,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
};
|
||||
}
|
||||
|
||||
const originalTokenCount = uiTelemetryService.getLastPromptTokenCount();
|
||||
|
||||
const contextPercentageThreshold =
|
||||
this.config.getChatCompression()?.contextPercentageThreshold;
|
||||
|
||||
// Don't compress if not forced and we are under the limit.
|
||||
if (!force) {
|
||||
const threshold =
|
||||
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
|
||||
if (originalTokenCount < threshold * tokenLimit(model)) {
|
||||
return {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return info;
|
||||
const splitPoint = findCompressSplitPoint(
|
||||
curatedHistory,
|
||||
1 - COMPRESSION_PRESERVE_THRESHOLD,
|
||||
);
|
||||
|
||||
const historyToCompress = curatedHistory.slice(0, splitPoint);
|
||||
const historyToKeep = curatedHistory.slice(splitPoint);
|
||||
|
||||
const summaryResponse = await this.config
|
||||
.getContentGenerator()
|
||||
.generateContent(
|
||||
{
|
||||
model,
|
||||
contents: [
|
||||
...historyToCompress,
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
text: 'First, reason in your scratchpad. Then, generate the <state_snapshot>.',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
config: {
|
||||
systemInstruction: { text: getCompressionPrompt() },
|
||||
},
|
||||
},
|
||||
prompt_id,
|
||||
);
|
||||
const summary = getResponseText(summaryResponse) ?? '';
|
||||
|
||||
const chat = await this.startChat([
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: summary }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the additional context!' }],
|
||||
},
|
||||
...historyToKeep,
|
||||
]);
|
||||
this.forceFullIdeContext = true;
|
||||
|
||||
// Estimate token count 1 token ≈ 4 characters
|
||||
const newTokenCount = Math.floor(
|
||||
chat
|
||||
.getHistory()
|
||||
.reduce((total, content) => total + JSON.stringify(content).length, 0) /
|
||||
4,
|
||||
);
|
||||
|
||||
logChatCompression(
|
||||
this.config,
|
||||
makeChatCompressionEvent({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
|
||||
if (newTokenCount > originalTokenCount) {
|
||||
this.hasFailedCompressionAttempt = !force && true;
|
||||
return {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus:
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
|
||||
};
|
||||
} else {
|
||||
this.chat = chat; // Chat compression successful, set new state.
|
||||
uiTelemetryService.setLastPromptTokenCount(newTokenCount);
|
||||
}
|
||||
|
||||
logChatCompression(
|
||||
this.config,
|
||||
makeChatCompressionEvent({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus: CompressionStatus.COMPRESSED,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,9 +4,13 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import {
|
||||
createContentGenerator,
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
} from './contentGenerator.js';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
@@ -106,3 +110,83 @@ describe('createContentGenerator', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createContentGeneratorConfig', () => {
|
||||
const mockConfig = {
|
||||
getModel: vi.fn().mockReturnValue('gemini-pro'),
|
||||
setModel: vi.fn(),
|
||||
flashFallbackHandler: vi.fn(),
|
||||
getProxy: vi.fn(),
|
||||
getEnableOpenAILogging: vi.fn().mockReturnValue(false),
|
||||
getSamplingParams: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorTimeout: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorMaxRetries: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorDisableCacheControl: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorSamplingParams: vi.fn().mockReturnValue(undefined),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset modules to re-evaluate imports and environment variables
|
||||
vi.resetModules();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should configure for Gemini using GEMINI_API_KEY when set', async () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', 'env-gemini-key');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_GEMINI,
|
||||
);
|
||||
expect(config.apiKey).toBe('env-gemini-key');
|
||||
expect(config.vertexai).toBe(false);
|
||||
});
|
||||
|
||||
it('should not configure for Gemini if GEMINI_API_KEY is empty', async () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', '');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_GEMINI,
|
||||
);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
expect(config.vertexai).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should configure for Vertex AI using GOOGLE_API_KEY when set', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', 'env-google-key');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.apiKey).toBe('env-google-key');
|
||||
expect(config.vertexai).toBe(true);
|
||||
});
|
||||
|
||||
it('should configure for Vertex AI using GCP project and location when set', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'env-gcp-project');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', 'env-gcp-location');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.vertexai).toBe(true);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not configure for Vertex AI if required env vars are empty', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', '');
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', '');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
expect(config.vertexai).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -14,8 +14,8 @@ import type {
|
||||
} from '@google/genai';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
|
||||
import type { UserTierId } from '../code_assist/types.js';
|
||||
import { InstallationManager } from '../utils/installationManager.js';
|
||||
@@ -58,7 +58,6 @@ export type ContentGeneratorConfig = {
|
||||
vertexai?: boolean;
|
||||
authType?: AuthType | undefined;
|
||||
enableOpenAILogging?: boolean;
|
||||
openAILoggingDir?: string;
|
||||
// Timeout configuration in milliseconds
|
||||
timeout?: number;
|
||||
// Maximum retries for failed requests
|
||||
@@ -83,37 +82,53 @@ export function createContentGeneratorConfig(
|
||||
authType: AuthType | undefined,
|
||||
generationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): ContentGeneratorConfig {
|
||||
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
const geminiApiKey = process.env['GEMINI_API_KEY'] || undefined;
|
||||
const googleApiKey = process.env['GOOGLE_API_KEY'] || undefined;
|
||||
const googleCloudProject = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
|
||||
const googleCloudLocation = process.env['GOOGLE_CLOUD_LOCATION'] || undefined;
|
||||
|
||||
const newContentGeneratorConfig: ContentGeneratorConfig = {
|
||||
...(generationConfig || {}),
|
||||
model: generationConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
authType,
|
||||
proxy: config?.getProxy(),
|
||||
};
|
||||
|
||||
// If we are using Google auth or we are in Cloud Shell, there is nothing else to validate for now
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_GEMINI && geminiApiKey) {
|
||||
newContentGeneratorConfig.apiKey = geminiApiKey;
|
||||
newContentGeneratorConfig.vertexai = false;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (
|
||||
authType === AuthType.USE_VERTEX_AI &&
|
||||
(googleApiKey || (googleCloudProject && googleCloudLocation))
|
||||
) {
|
||||
newContentGeneratorConfig.apiKey = googleApiKey;
|
||||
newContentGeneratorConfig.vertexai = true;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.QWEN_OAUTH) {
|
||||
// For Qwen OAuth, we'll handle the API key dynamically in createContentGenerator
|
||||
// Set a special marker to indicate this is Qwen OAuth
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: DEFAULT_QWEN_MODEL,
|
||||
apiKey: 'QWEN_OAUTH_DYNAMIC_TOKEN',
|
||||
} as ContentGeneratorConfig;
|
||||
newContentGeneratorConfig.apiKey = 'QWEN_OAUTH_DYNAMIC_TOKEN';
|
||||
newContentGeneratorConfig.model = DEFAULT_QWEN_MODEL;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_OPENAI) {
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || 'qwen3-coder-plus',
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
} as ContentGeneratorConfig;
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
export async function createContentGenerator(
|
||||
|
||||
@@ -1,8 +1,2 @@
|
||||
export const DEFAULT_TIMEOUT = 120000;
|
||||
export const DEFAULT_MAX_RETRIES = 3;
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1';
|
||||
export const DEFAULT_DASHSCOPE_BASE_URL =
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
||||
export const DEFAULT_DEEPSEEK_BASE_URL = 'https://api.deepseek.com/v1';
|
||||
export const DEFAULT_OPEN_ROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
|
||||
|
||||
@@ -32,7 +32,6 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
telemetryService: new DefaultTelemetryService(
|
||||
cliConfig,
|
||||
contentGeneratorConfig.enableOpenAILogging,
|
||||
contentGeneratorConfig.openAILoggingDir,
|
||||
),
|
||||
errorHandler: new EnhancedErrorHandler(
|
||||
(error: unknown, request: GenerateContentParameters) =>
|
||||
|
||||
@@ -2,11 +2,7 @@ import OpenAI from 'openai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { AuthType } from '../../contentGenerator.js';
|
||||
import {
|
||||
DEFAULT_TIMEOUT,
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_DASHSCOPE_BASE_URL,
|
||||
} from '../constants.js';
|
||||
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
|
||||
import { tokenLimit } from '../../tokenLimits.js';
|
||||
import type {
|
||||
OpenAICompatibleProvider,
|
||||
@@ -57,7 +53,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
buildClient(): OpenAI {
|
||||
const {
|
||||
apiKey,
|
||||
baseUrl = DEFAULT_DASHSCOPE_BASE_URL,
|
||||
baseUrl,
|
||||
timeout = DEFAULT_TIMEOUT,
|
||||
maxRetries = DEFAULT_MAX_RETRIES,
|
||||
} = this.contentGeneratorConfig;
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { logApiError, logApiResponse } from '../../telemetry/loggers.js';
|
||||
import { ApiErrorEvent, ApiResponseEvent } from '../../telemetry/types.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
import { openaiLogger } from '../../utils/openaiLogger.js';
|
||||
import type { GenerateContentResponse } from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
|
||||
@@ -43,17 +43,10 @@ export interface TelemetryService {
|
||||
}
|
||||
|
||||
export class DefaultTelemetryService implements TelemetryService {
|
||||
private logger: OpenAILogger;
|
||||
|
||||
constructor(
|
||||
private config: Config,
|
||||
private enableOpenAILogging: boolean = false,
|
||||
openAILoggingDir?: string,
|
||||
) {
|
||||
// Always create a new logger instance to ensure correct working directory
|
||||
// If no custom directory is provided, undefined will use the default path
|
||||
this.logger = new OpenAILogger(openAILoggingDir);
|
||||
}
|
||||
) {}
|
||||
|
||||
async logSuccess(
|
||||
context: RequestContext,
|
||||
@@ -75,7 +68,7 @@ export class DefaultTelemetryService implements TelemetryService {
|
||||
|
||||
// Log interaction if enabled
|
||||
if (this.enableOpenAILogging && openaiRequest && openaiResponse) {
|
||||
await this.logger.logInteraction(openaiRequest, openaiResponse);
|
||||
await openaiLogger.logInteraction(openaiRequest, openaiResponse);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,7 +97,7 @@ export class DefaultTelemetryService implements TelemetryService {
|
||||
|
||||
// Log error interaction if enabled
|
||||
if (this.enableOpenAILogging && openaiRequest) {
|
||||
await this.logger.logInteraction(
|
||||
await openaiLogger.logInteraction(
|
||||
openaiRequest,
|
||||
undefined,
|
||||
error as Error,
|
||||
@@ -144,7 +137,7 @@ export class DefaultTelemetryService implements TelemetryService {
|
||||
openaiChunks.length > 0
|
||||
) {
|
||||
const combinedResponse = this.combineOpenAIChunksForLogging(openaiChunks);
|
||||
await this.logger.logInteraction(openaiRequest, combinedResponse);
|
||||
await openaiLogger.logInteraction(openaiRequest, combinedResponse);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -64,12 +64,6 @@ describe('normalize', () => {
|
||||
expect(normalize('qwen-vl-max-latest')).toBe('qwen-vl-max-latest');
|
||||
});
|
||||
|
||||
it('should preserve date suffixes for Kimi K2 models', () => {
|
||||
expect(normalize('kimi-k2-0905-preview')).toBe('kimi-k2-0905');
|
||||
expect(normalize('kimi-k2-0711-preview')).toBe('kimi-k2-0711');
|
||||
expect(normalize('kimi-k2-turbo-preview')).toBe('kimi-k2-turbo');
|
||||
});
|
||||
|
||||
it('should remove date like suffixes', () => {
|
||||
expect(normalize('deepseek-r1-0528')).toBe('deepseek-r1');
|
||||
});
|
||||
@@ -219,7 +213,7 @@ describe('tokenLimit', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('DeepSeek', () => {
|
||||
describe('Other models', () => {
|
||||
it('should return the correct limit for deepseek-r1', () => {
|
||||
expect(tokenLimit('deepseek-r1')).toBe(131072);
|
||||
});
|
||||
@@ -232,27 +226,9 @@ describe('tokenLimit', () => {
|
||||
it('should return the correct limit for deepseek-v3.2', () => {
|
||||
expect(tokenLimit('deepseek-v3.2-exp')).toBe(131072);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Moonshot Kimi', () => {
|
||||
it('should return the correct limit for kimi-k2-0905-preview', () => {
|
||||
expect(tokenLimit('kimi-k2-0905-preview')).toBe(262144); // 256K
|
||||
expect(tokenLimit('kimi-k2-0905')).toBe(262144);
|
||||
});
|
||||
it('should return the correct limit for kimi-k2-turbo-preview', () => {
|
||||
expect(tokenLimit('kimi-k2-turbo-preview')).toBe(262144); // 256K
|
||||
expect(tokenLimit('kimi-k2-turbo')).toBe(262144);
|
||||
});
|
||||
it('should return the correct limit for kimi-k2-0711-preview', () => {
|
||||
expect(tokenLimit('kimi-k2-0711-preview')).toBe(131072); // 128K
|
||||
expect(tokenLimit('kimi-k2-0711')).toBe(131072);
|
||||
});
|
||||
it('should return the correct limit for kimi-k2-instruct', () => {
|
||||
expect(tokenLimit('kimi-k2-instruct')).toBe(131072); // 128K
|
||||
expect(tokenLimit('kimi-k2-instruct')).toBe(131072);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Other models', () => {
|
||||
it('should return the correct limit for gpt-oss', () => {
|
||||
expect(tokenLimit('gpt-oss')).toBe(131072);
|
||||
});
|
||||
|
||||
@@ -47,13 +47,8 @@ export function normalize(model: string): string {
|
||||
// remove trailing build / date / revision suffixes:
|
||||
// - dates (e.g., -20250219), -v1, version numbers, 'latest', 'preview' etc.
|
||||
s = s.replace(/-preview/g, '');
|
||||
// Special handling for model names that include date/version as part of the model identifier
|
||||
// - Qwen models: qwen-plus-latest, qwen-flash-latest, qwen-vl-max-latest
|
||||
// - Kimi models: kimi-k2-0905, kimi-k2-0711, etc. (keep date for version distinction)
|
||||
if (
|
||||
!s.match(/^qwen-(?:plus|flash|vl-max)-latest$/) &&
|
||||
!s.match(/^kimi-k2-\d{4}$/)
|
||||
) {
|
||||
// Special handling for Qwen model names that include "-latest" as part of the model name
|
||||
if (!s.match(/^qwen-(?:plus|flash|vl-max)-latest$/)) {
|
||||
// Regex breakdown:
|
||||
// -(?:...)$ - Non-capturing group for suffixes at the end of the string
|
||||
// The following patterns are matched within the group:
|
||||
@@ -170,16 +165,9 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
|
||||
[/^deepseek-v3(?:\.\d+)?(?:-.*)?$/, LIMITS['128k']],
|
||||
|
||||
// -------------------
|
||||
// Moonshot / Kimi
|
||||
// -------------------
|
||||
[/^kimi-k2-0905$/, LIMITS['256k']], // Kimi-k2-0905-preview: 256K context
|
||||
[/^kimi-k2-turbo.*$/, LIMITS['256k']], // Kimi-k2-turbo-preview: 256K context
|
||||
[/^kimi-k2-0711$/, LIMITS['128k']], // Kimi-k2-0711-preview: 128K context
|
||||
[/^kimi-k2-instruct.*$/, LIMITS['128k']], // Kimi-k2-instruct: 128K context
|
||||
|
||||
// -------------------
|
||||
// GPT-OSS / Llama & Mistral examples
|
||||
// GPT-OSS / Kimi / Llama & Mistral examples
|
||||
// -------------------
|
||||
[/^kimi-k2-instruct.*$/, LIMITS['128k']],
|
||||
[/^gpt-oss.*$/, LIMITS['128k']],
|
||||
[/^llama-4-scout.*$/, LIMITS['10m']],
|
||||
[/^mistral-large-2.*$/, LIMITS['128k']],
|
||||
|
||||
@@ -153,9 +153,6 @@ export enum CompressionStatus {
|
||||
/** The compression failed due to an error counting tokens */
|
||||
COMPRESSION_FAILED_TOKEN_COUNT_ERROR,
|
||||
|
||||
/** The compression failed due to receiving an empty or null summary */
|
||||
COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
|
||||
/** The compression was not necessary and no action was taken */
|
||||
NOOP,
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ describe('IdeClient', () => {
|
||||
'utf8',
|
||||
);
|
||||
expect(StreamableHTTPClientTransport).toHaveBeenCalledWith(
|
||||
new URL('http://127.0.0.1:8080/mcp'),
|
||||
new URL('http://localhost:8080/mcp'),
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(mockClient.connect).toHaveBeenCalledWith(mockHttpTransport);
|
||||
@@ -181,7 +181,7 @@ describe('IdeClient', () => {
|
||||
await ideClient.connect();
|
||||
|
||||
expect(StreamableHTTPClientTransport).toHaveBeenCalledWith(
|
||||
new URL('http://127.0.0.1:9090/mcp'),
|
||||
new URL('http://localhost:9090/mcp'),
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(mockClient.connect).toHaveBeenCalledWith(mockHttpTransport);
|
||||
@@ -230,7 +230,7 @@ describe('IdeClient', () => {
|
||||
await ideClient.connect();
|
||||
|
||||
expect(StreamableHTTPClientTransport).toHaveBeenCalledWith(
|
||||
new URL('http://127.0.0.1:8080/mcp'),
|
||||
new URL('http://localhost:8080/mcp'),
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(ideClient.getConnectionStatus().status).toBe(
|
||||
@@ -665,7 +665,7 @@ describe('IdeClient', () => {
|
||||
await ideClient.connect();
|
||||
|
||||
expect(StreamableHTTPClientTransport).toHaveBeenCalledWith(
|
||||
new URL('http://127.0.0.1:8080/mcp'),
|
||||
new URL('http://localhost:8080/mcp'),
|
||||
expect.objectContaining({
|
||||
requestInit: {
|
||||
headers: {
|
||||
|
||||
@@ -667,10 +667,10 @@ export class IdeClient {
|
||||
}
|
||||
|
||||
private createProxyAwareFetch() {
|
||||
// ignore proxy for '127.0.0.1' by deafult to allow connecting to the ide mcp server
|
||||
// ignore proxy for 'localhost' by deafult to allow connecting to the ide mcp server
|
||||
const existingNoProxy = process.env['NO_PROXY'] || '';
|
||||
const agent = new EnvHttpProxyAgent({
|
||||
noProxy: [existingNoProxy, '127.0.0.1'].filter(Boolean).join(','),
|
||||
noProxy: [existingNoProxy, 'localhost'].filter(Boolean).join(','),
|
||||
});
|
||||
const undiciPromise = import('undici');
|
||||
return async (url: string | URL, init?: RequestInit): Promise<Response> => {
|
||||
@@ -851,5 +851,5 @@ export class IdeClient {
|
||||
function getIdeServerHost() {
|
||||
const isInContainer =
|
||||
fs.existsSync('/.dockerenv') || fs.existsSync('/run/.containerenv');
|
||||
return isInContainer ? 'host.docker.internal' : '127.0.0.1';
|
||||
return isInContainer ? 'host.docker.internal' : 'localhost';
|
||||
}
|
||||
|
||||
@@ -112,19 +112,14 @@ describe('ide-installer', () => {
|
||||
platform: 'linux',
|
||||
});
|
||||
await installer.install();
|
||||
|
||||
// Note: The implementation uses process.platform, not the mocked platform
|
||||
const isActuallyWindows = process.platform === 'win32';
|
||||
const expectedCommand = isActuallyWindows ? '"code"' : 'code';
|
||||
|
||||
expect(child_process.spawnSync).toHaveBeenCalledWith(
|
||||
expectedCommand,
|
||||
'code',
|
||||
[
|
||||
'--install-extension',
|
||||
'qwenlm.qwen-code-vscode-ide-companion',
|
||||
'--force',
|
||||
],
|
||||
{ stdio: 'pipe', shell: isActuallyWindows },
|
||||
{ stdio: 'pipe' },
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -117,16 +117,15 @@ class VsCodeInstaller implements IdeInstaller {
|
||||
};
|
||||
}
|
||||
|
||||
const isWindows = process.platform === 'win32';
|
||||
try {
|
||||
const result = child_process.spawnSync(
|
||||
isWindows ? `"${commandPath}"` : commandPath,
|
||||
commandPath,
|
||||
[
|
||||
'--install-extension',
|
||||
'qwenlm.qwen-code-vscode-ide-companion',
|
||||
'--force',
|
||||
],
|
||||
{ stdio: 'pipe', shell: isWindows },
|
||||
{ stdio: 'pipe' },
|
||||
);
|
||||
|
||||
if (result.status !== 0) {
|
||||
|
||||
@@ -48,7 +48,6 @@ export * from './utils/systemEncoding.js';
|
||||
export * from './utils/textUtils.js';
|
||||
export * from './utils/formatters.js';
|
||||
export * from './utils/generateContentResponseUtilities.js';
|
||||
export * from './utils/ripgrepUtils.js';
|
||||
export * from './utils/filesearch/fileSearch.js';
|
||||
export * from './utils/errorParsing.js';
|
||||
export * from './utils/workspaceContext.js';
|
||||
@@ -98,7 +97,7 @@ export * from './tools/write-file.js';
|
||||
export * from './tools/web-fetch.js';
|
||||
export * from './tools/memoryTool.js';
|
||||
export * from './tools/shell.js';
|
||||
export * from './tools/web-search/index.js';
|
||||
export * from './tools/web-search.js';
|
||||
export * from './tools/read-many-files.js';
|
||||
export * from './tools/mcp-client.js';
|
||||
export * from './tools/mcp-tool.js';
|
||||
|
||||
@@ -8,7 +8,7 @@ import { OpenAIContentGenerator } from '../core/openaiContentGenerator/index.js'
|
||||
import { DashScopeOpenAICompatibleProvider } from '../core/openaiContentGenerator/provider/dashscope.js';
|
||||
import type { IQwenOAuth2Client } from './qwenOAuth2.js';
|
||||
import { SharedTokenManager } from './sharedTokenManager.js';
|
||||
import { type Config } from '../config/config.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
@@ -18,7 +18,10 @@ import type {
|
||||
EmbedContentResponse,
|
||||
} from '@google/genai';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
|
||||
// Default fallback base URL if no endpoint is provided
|
||||
const DEFAULT_QWEN_BASE_URL =
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
||||
|
||||
/**
|
||||
* Qwen Content Generator that uses Qwen OAuth tokens with automatic refresh
|
||||
@@ -55,7 +58,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
* Get the current endpoint URL with proper protocol and /v1 suffix
|
||||
*/
|
||||
private getCurrentEndpoint(resourceUrl?: string): string {
|
||||
const baseEndpoint = resourceUrl || DEFAULT_DASHSCOPE_BASE_URL;
|
||||
const baseEndpoint = resourceUrl || DEFAULT_QWEN_BASE_URL;
|
||||
const suffix = '/v1';
|
||||
|
||||
// Normalize the URL: add protocol if missing, ensure /v1 suffix
|
||||
|
||||
@@ -1,372 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
ChatCompressionService,
|
||||
findCompressSplitPoint,
|
||||
} from './chatCompressionService.js';
|
||||
import type { Content, GenerateContentResponse } from '@google/genai';
|
||||
import { CompressionStatus } from '../core/turn.js';
|
||||
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
|
||||
import { tokenLimit } from '../core/tokenLimits.js';
|
||||
import type { GeminiChat } from '../core/geminiChat.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { getInitialChatHistory } from '../utils/environmentContext.js';
|
||||
import type { ContentGenerator } from '../core/contentGenerator.js';
|
||||
|
||||
vi.mock('../telemetry/uiTelemetry.js');
|
||||
vi.mock('../core/tokenLimits.js');
|
||||
vi.mock('../telemetry/loggers.js');
|
||||
vi.mock('../utils/environmentContext.js');
|
||||
|
||||
describe('findCompressSplitPoint', () => {
|
||||
it('should throw an error for non-positive numbers', () => {
|
||||
expect(() => findCompressSplitPoint([], 0)).toThrow(
|
||||
'Fraction must be between 0 and 1',
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error for a fraction greater than or equal to 1', () => {
|
||||
expect(() => findCompressSplitPoint([], 1)).toThrow(
|
||||
'Fraction must be between 0 and 1',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle an empty history', () => {
|
||||
expect(findCompressSplitPoint([], 0.5)).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle a fraction in the middle', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (19%)
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (40%)
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (60%)
|
||||
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (80%)
|
||||
{ role: 'user', parts: [{ text: 'This is the fifth message.' }] }, // JSON length: 65 (100%)
|
||||
];
|
||||
expect(findCompressSplitPoint(history, 0.5)).toBe(4);
|
||||
});
|
||||
|
||||
it('should handle a fraction of last index', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (19%)
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (40%)
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (60%)
|
||||
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (80%)
|
||||
{ role: 'user', parts: [{ text: 'This is the fifth message.' }] }, // JSON length: 65 (100%)
|
||||
];
|
||||
expect(findCompressSplitPoint(history, 0.9)).toBe(4);
|
||||
});
|
||||
|
||||
it('should handle a fraction of after last index', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (24%)
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (50%)
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (74%)
|
||||
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (100%)
|
||||
];
|
||||
expect(findCompressSplitPoint(history, 0.8)).toBe(4);
|
||||
});
|
||||
|
||||
it('should return earlier splitpoint if no valid ones are after threshhold', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] },
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] },
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] },
|
||||
{ role: 'model', parts: [{ functionCall: { name: 'foo', args: {} } }] },
|
||||
];
|
||||
// Can't return 4 because the previous item has a function call.
|
||||
expect(findCompressSplitPoint(history, 0.99)).toBe(2);
|
||||
});
|
||||
|
||||
it('should handle a history with only one item', () => {
|
||||
const historyWithEmptyParts: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'Message 1' }] },
|
||||
];
|
||||
expect(findCompressSplitPoint(historyWithEmptyParts, 0.5)).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle history with weird parts', () => {
|
||||
const historyWithEmptyParts: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'Message 1' }] },
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ fileData: { fileUri: 'derp', mimeType: 'text/plain' } }],
|
||||
},
|
||||
{ role: 'user', parts: [{ text: 'Message 2' }] },
|
||||
];
|
||||
expect(findCompressSplitPoint(historyWithEmptyParts, 0.5)).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ChatCompressionService', () => {
|
||||
let service: ChatCompressionService;
|
||||
let mockChat: GeminiChat;
|
||||
let mockConfig: Config;
|
||||
const mockModel = 'gemini-pro';
|
||||
const mockPromptId = 'test-prompt-id';
|
||||
|
||||
beforeEach(() => {
|
||||
service = new ChatCompressionService();
|
||||
mockChat = {
|
||||
getHistory: vi.fn(),
|
||||
} as unknown as GeminiChat;
|
||||
mockConfig = {
|
||||
getChatCompression: vi.fn(),
|
||||
getContentGenerator: vi.fn(),
|
||||
} as unknown as Config;
|
||||
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(500);
|
||||
vi.mocked(getInitialChatHistory).mockImplementation(
|
||||
async (_config, extraHistory) => extraHistory || [],
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should return NOOP if history is empty', async () => {
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue([]);
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should return NOOP if previously failed and not forced', async () => {
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue([
|
||||
{ role: 'user', parts: [{ text: 'hi' }] },
|
||||
]);
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
true,
|
||||
);
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should return NOOP if under token threshold and not forced', async () => {
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue([
|
||||
{ role: 'user', parts: [{ text: 'hi' }] },
|
||||
]);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(600);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
// Threshold is 0.7 * 1000 = 700. 600 < 700, so NOOP.
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should compress if over token threshold', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
{ role: 'user', parts: [{ text: 'msg3' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg4' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(800);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: 'Summary' }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.COMPRESSED);
|
||||
expect(result.newHistory).not.toBeNull();
|
||||
expect(result.newHistory![0].parts![0].text).toBe('Summary');
|
||||
expect(mockGenerateContent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should force compress even if under threshold', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
{ role: 'user', parts: [{ text: 'msg3' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg4' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: 'Summary' }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true, // forced
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.COMPRESSED);
|
||||
expect(result.newHistory).not.toBeNull();
|
||||
});
|
||||
|
||||
it('should return FAILED if new token count is inflated', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(10);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const longSummary = 'a'.repeat(1000); // Long summary to inflate token count
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: longSummary }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
|
||||
);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should return FAILED if summary is empty string', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: '' }], // Empty summary
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(
|
||||
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
);
|
||||
expect(result.newHistory).toBeNull();
|
||||
expect(result.info.originalTokenCount).toBe(100);
|
||||
expect(result.info.newTokenCount).toBe(100);
|
||||
});
|
||||
|
||||
it('should return FAILED if summary is only whitespace', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: ' \n\t ' }], // Only whitespace
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(
|
||||
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
});
|
||||
@@ -1,235 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Content } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import type { GeminiChat } from '../core/geminiChat.js';
|
||||
import { type ChatCompressionInfo, CompressionStatus } from '../core/turn.js';
|
||||
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
|
||||
import { tokenLimit } from '../core/tokenLimits.js';
|
||||
import { getCompressionPrompt } from '../core/prompts.js';
|
||||
import { getResponseText } from '../utils/partUtils.js';
|
||||
import { logChatCompression } from '../telemetry/loggers.js';
|
||||
import { makeChatCompressionEvent } from '../telemetry/types.js';
|
||||
import { getInitialChatHistory } from '../utils/environmentContext.js';
|
||||
|
||||
/**
|
||||
* Threshold for compression token count as a fraction of the model's token limit.
|
||||
* If the chat history exceeds this threshold, it will be compressed.
|
||||
*/
|
||||
export const COMPRESSION_TOKEN_THRESHOLD = 0.7;
|
||||
|
||||
/**
|
||||
* The fraction of the latest chat history to keep. A value of 0.3
|
||||
* means that only the last 30% of the chat history will be kept after compression.
|
||||
*/
|
||||
export const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
|
||||
|
||||
/**
|
||||
* Returns the index of the oldest item to keep when compressing. May return
|
||||
* contents.length which indicates that everything should be compressed.
|
||||
*
|
||||
* Exported for testing purposes.
|
||||
*/
|
||||
export function findCompressSplitPoint(
|
||||
contents: Content[],
|
||||
fraction: number,
|
||||
): number {
|
||||
if (fraction <= 0 || fraction >= 1) {
|
||||
throw new Error('Fraction must be between 0 and 1');
|
||||
}
|
||||
|
||||
const charCounts = contents.map((content) => JSON.stringify(content).length);
|
||||
const totalCharCount = charCounts.reduce((a, b) => a + b, 0);
|
||||
const targetCharCount = totalCharCount * fraction;
|
||||
|
||||
let lastSplitPoint = 0; // 0 is always valid (compress nothing)
|
||||
let cumulativeCharCount = 0;
|
||||
for (let i = 0; i < contents.length; i++) {
|
||||
const content = contents[i];
|
||||
if (
|
||||
content.role === 'user' &&
|
||||
!content.parts?.some((part) => !!part.functionResponse)
|
||||
) {
|
||||
if (cumulativeCharCount >= targetCharCount) {
|
||||
return i;
|
||||
}
|
||||
lastSplitPoint = i;
|
||||
}
|
||||
cumulativeCharCount += charCounts[i];
|
||||
}
|
||||
|
||||
// We found no split points after targetCharCount.
|
||||
// Check if it's safe to compress everything.
|
||||
const lastContent = contents[contents.length - 1];
|
||||
if (
|
||||
lastContent?.role === 'model' &&
|
||||
!lastContent?.parts?.some((part) => part.functionCall)
|
||||
) {
|
||||
return contents.length;
|
||||
}
|
||||
|
||||
// Can't compress everything so just compress at last splitpoint.
|
||||
return lastSplitPoint;
|
||||
}
|
||||
|
||||
export class ChatCompressionService {
|
||||
async compress(
|
||||
chat: GeminiChat,
|
||||
promptId: string,
|
||||
force: boolean,
|
||||
model: string,
|
||||
config: Config,
|
||||
hasFailedCompressionAttempt: boolean,
|
||||
): Promise<{ newHistory: Content[] | null; info: ChatCompressionInfo }> {
|
||||
const curatedHistory = chat.getHistory(true);
|
||||
|
||||
// Regardless of `force`, don't do anything if the history is empty.
|
||||
if (
|
||||
curatedHistory.length === 0 ||
|
||||
(hasFailedCompressionAttempt && !force)
|
||||
) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount: 0,
|
||||
newTokenCount: 0,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const originalTokenCount = uiTelemetryService.getLastPromptTokenCount();
|
||||
|
||||
const contextPercentageThreshold =
|
||||
config.getChatCompression()?.contextPercentageThreshold;
|
||||
|
||||
// Don't compress if not forced and we are under the limit.
|
||||
if (!force) {
|
||||
const threshold =
|
||||
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
|
||||
if (originalTokenCount < threshold * tokenLimit(model)) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const splitPoint = findCompressSplitPoint(
|
||||
curatedHistory,
|
||||
1 - COMPRESSION_PRESERVE_THRESHOLD,
|
||||
);
|
||||
|
||||
const historyToCompress = curatedHistory.slice(0, splitPoint);
|
||||
const historyToKeep = curatedHistory.slice(splitPoint);
|
||||
|
||||
if (historyToCompress.length === 0) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const summaryResponse = await config.getContentGenerator().generateContent(
|
||||
{
|
||||
model,
|
||||
contents: [
|
||||
...historyToCompress,
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
text: 'First, reason in your scratchpad. Then, generate the <state_snapshot>.',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
config: {
|
||||
systemInstruction: getCompressionPrompt(),
|
||||
},
|
||||
},
|
||||
promptId,
|
||||
);
|
||||
const summary = getResponseText(summaryResponse) ?? '';
|
||||
const isSummaryEmpty = !summary || summary.trim().length === 0;
|
||||
|
||||
let newTokenCount = originalTokenCount;
|
||||
let extraHistory: Content[] = [];
|
||||
|
||||
if (!isSummaryEmpty) {
|
||||
extraHistory = [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: summary }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the additional context!' }],
|
||||
},
|
||||
...historyToKeep,
|
||||
];
|
||||
|
||||
// Use a shared utility to construct the initial history for an accurate token count.
|
||||
const fullNewHistory = await getInitialChatHistory(config, extraHistory);
|
||||
|
||||
// Estimate token count 1 token ≈ 4 characters
|
||||
newTokenCount = Math.floor(
|
||||
fullNewHistory.reduce(
|
||||
(total, content) => total + JSON.stringify(content).length,
|
||||
0,
|
||||
) / 4,
|
||||
);
|
||||
}
|
||||
|
||||
logChatCompression(
|
||||
config,
|
||||
makeChatCompressionEvent({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
|
||||
if (isSummaryEmpty) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
},
|
||||
};
|
||||
} else if (newTokenCount > originalTokenCount) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus:
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
|
||||
},
|
||||
};
|
||||
} else {
|
||||
uiTelemetryService.setLastPromptTokenCount(newTokenCount);
|
||||
return {
|
||||
newHistory: extraHistory,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus: CompressionStatus.COMPRESSED,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,6 +32,7 @@ import { GeminiChat } from '../core/geminiChat.js';
|
||||
import { executeToolCall } from '../core/nonInteractiveToolExecutor.js';
|
||||
import type { ToolRegistry } from '../tools/tool-registry.js';
|
||||
import { type AnyDeclarativeTool } from '../tools/tools.js';
|
||||
import { getEnvironmentContext } from '../utils/environmentContext.js';
|
||||
import { ContextState, SubAgentScope } from './subagent.js';
|
||||
import type {
|
||||
ModelConfig,
|
||||
@@ -43,20 +44,7 @@ import { SubagentTerminateMode } from './types.js';
|
||||
|
||||
vi.mock('../core/geminiChat.js');
|
||||
vi.mock('../core/contentGenerator.js');
|
||||
vi.mock('../utils/environmentContext.js', () => ({
|
||||
getEnvironmentContext: vi.fn().mockResolvedValue([{ text: 'Env Context' }]),
|
||||
getInitialChatHistory: vi.fn(async (_config, extraHistory) => [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Env Context' }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
]),
|
||||
}));
|
||||
vi.mock('../utils/environmentContext.js');
|
||||
vi.mock('../core/nonInteractiveToolExecutor.js');
|
||||
vi.mock('../ide/ide-client.js');
|
||||
vi.mock('../core/client.js');
|
||||
@@ -186,6 +174,9 @@ describe('subagent.ts', () => {
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
vi.mocked(getEnvironmentContext).mockResolvedValue([
|
||||
{ text: 'Env Context' },
|
||||
]);
|
||||
vi.mocked(createContentGenerator).mockResolvedValue({
|
||||
getGenerativeModel: vi.fn(),
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
|
||||
@@ -16,7 +16,7 @@ import type {
|
||||
ToolConfirmationOutcome,
|
||||
ToolCallConfirmationDetails,
|
||||
} from '../tools/tools.js';
|
||||
import { getInitialChatHistory } from '../utils/environmentContext.js';
|
||||
import { getEnvironmentContext } from '../utils/environmentContext.js';
|
||||
import type {
|
||||
Content,
|
||||
Part,
|
||||
@@ -807,7 +807,11 @@ export class SubAgentScope {
|
||||
);
|
||||
}
|
||||
|
||||
const envHistory = await getInitialChatHistory(this.runtimeContext);
|
||||
const envParts = await getEnvironmentContext(this.runtimeContext);
|
||||
const envHistory: Content[] = [
|
||||
{ role: 'user', parts: envParts },
|
||||
{ role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
|
||||
];
|
||||
|
||||
const start_history = [
|
||||
...envHistory,
|
||||
|
||||
@@ -23,7 +23,6 @@ import { createMockWorkspaceContext } from '../test-utils/mockWorkspaceContext.j
|
||||
import type { ChildProcess } from 'node:child_process';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
|
||||
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
|
||||
|
||||
// Mock ripgrepUtils
|
||||
vi.mock('../utils/ripgrepUtils.js', () => ({
|
||||
@@ -43,17 +42,11 @@ function createMockSpawn(
|
||||
outputData?: string;
|
||||
exitCode?: number;
|
||||
signal?: string;
|
||||
onCall?: (
|
||||
command: string,
|
||||
args: readonly string[],
|
||||
spawnOptions?: unknown,
|
||||
) => void;
|
||||
} = {},
|
||||
) {
|
||||
const { outputData, exitCode = 0, signal, onCall } = options;
|
||||
const { outputData, exitCode = 0, signal } = options;
|
||||
|
||||
return (command: string, args: readonly string[], spawnOptions?: unknown) => {
|
||||
onCall?.(command, args, spawnOptions);
|
||||
return () => {
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
on: vi.fn(),
|
||||
@@ -94,29 +87,19 @@ function createMockSpawn(
|
||||
describe('RipGrepTool', () => {
|
||||
let tempRootDir: string;
|
||||
let grepTool: RipGrepTool;
|
||||
let fileExclusionsMock: { getGlobExcludes: () => string[] };
|
||||
const abortSignal = new AbortController().signal;
|
||||
|
||||
const mockConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () => createMockWorkspaceContext(tempRootDir),
|
||||
getWorkingDir: () => tempRootDir,
|
||||
getDebugMode: () => false,
|
||||
getUseBuiltinRipgrep: () => true,
|
||||
} as unknown as Config;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
(ensureRipgrepPath as Mock).mockResolvedValue('/mock/path/to/rg');
|
||||
mockSpawn.mockReset();
|
||||
mockSpawn.mockClear();
|
||||
tempRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'grep-tool-root-'));
|
||||
fileExclusionsMock = {
|
||||
getGlobExcludes: vi.fn().mockReturnValue([]),
|
||||
};
|
||||
Object.assign(mockConfig, {
|
||||
getFileExclusions: () => fileExclusionsMock,
|
||||
getFileFilteringOptions: () => DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
});
|
||||
grepTool = new RipGrepTool(mockConfig);
|
||||
|
||||
// Create some test files and directories
|
||||
@@ -154,11 +137,11 @@ describe('RipGrepTool', () => {
|
||||
expect(grepTool.validateToolParams(params)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for valid params (pattern, path, and glob)', () => {
|
||||
it('should return null for valid params (pattern, path, and include)', () => {
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'hello',
|
||||
path: '.',
|
||||
glob: '*.txt',
|
||||
include: '*.txt',
|
||||
};
|
||||
expect(grepTool.validateToolParams(params)).toBeNull();
|
||||
});
|
||||
@@ -170,11 +153,9 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should surface an error for invalid regex pattern', () => {
|
||||
it('should return null for what would be an invalid regex pattern', () => {
|
||||
const params: RipGrepToolParams = { pattern: '[[' };
|
||||
expect(grepTool.validateToolParams(params)).toContain(
|
||||
'Invalid regular expression pattern: [[',
|
||||
);
|
||||
expect(grepTool.validateToolParams(params)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return error if path does not exist', () => {
|
||||
@@ -213,11 +194,13 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 3 matches for pattern "world" in the workspace directory',
|
||||
);
|
||||
expect(result.llmContent).toContain('fileA.txt:1:hello world');
|
||||
expect(result.llmContent).toContain('fileA.txt:2:second line with world');
|
||||
expect(result.llmContent).toContain('File: fileA.txt');
|
||||
expect(result.llmContent).toContain('L1: hello world');
|
||||
expect(result.llmContent).toContain('L2: second line with world');
|
||||
expect(result.llmContent).toContain(
|
||||
'sub/fileC.txt:1:another world in sub dir',
|
||||
`File: ${path.join('sub', 'fileC.txt')}`,
|
||||
);
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
expect(result.returnDisplay).toBe('Found 3 matches');
|
||||
});
|
||||
|
||||
@@ -236,33 +219,12 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "world" in path "sub"',
|
||||
);
|
||||
expect(result.llmContent).toContain(
|
||||
'fileC.txt:1:another world in sub dir',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileC.txt'); // Path relative to 'sub'
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should use target directory when path is not provided', async () => {
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
outputData: `fileA.txt:1:hello world${EOL}`,
|
||||
exitCode: 0,
|
||||
onCall: (_, args) => {
|
||||
// Should search in the target directory (tempRootDir)
|
||||
expect(args[args.length - 1]).toBe(tempRootDir);
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'world' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "world" in the workspace directory',
|
||||
);
|
||||
});
|
||||
|
||||
it('should find matches with a glob filter', async () => {
|
||||
it('should find matches with an include glob', async () => {
|
||||
// Setup specific mock for this test
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
@@ -271,19 +233,20 @@ describe('RipGrepTool', () => {
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'hello', glob: '*.js' };
|
||||
const params: RipGrepToolParams = { pattern: 'hello', include: '*.js' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "hello" in the workspace directory (filter: "*.js"):',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileB.js');
|
||||
expect(result.llmContent).toContain(
|
||||
'fileB.js:2:function baz() { return "hello"; }',
|
||||
'L2: function baz() { return "hello"; }',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should find matches with a glob filter and path', async () => {
|
||||
it('should find matches with an include glob and path', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, 'sub', 'another.js'),
|
||||
'const greeting = "hello";',
|
||||
@@ -328,115 +291,18 @@ describe('RipGrepTool', () => {
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'hello',
|
||||
path: 'sub',
|
||||
glob: '*.js',
|
||||
include: '*.js',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "hello" in path "sub" (filter: "*.js")',
|
||||
);
|
||||
expect(result.llmContent).toContain(
|
||||
'another.js:1:const greeting = "hello";',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: another.js');
|
||||
expect(result.llmContent).toContain('L1: const greeting = "hello";');
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should pass .qwenignore to ripgrep when respected', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, '.qwenignore'),
|
||||
'ignored.txt\n',
|
||||
);
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
exitCode: 1,
|
||||
onCall: (_, args) => {
|
||||
expect(args).toContain('--ignore-file');
|
||||
expect(args).toContain(path.join(tempRootDir, '.qwenignore'));
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'secret' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'No matches found for pattern "secret" in the workspace directory.',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('No matches found');
|
||||
});
|
||||
|
||||
it('should include .qwenignore matches when disabled in config', async () => {
|
||||
await fs.writeFile(path.join(tempRootDir, '.qwenignore'), 'kept.txt\n');
|
||||
await fs.writeFile(path.join(tempRootDir, 'kept.txt'), 'keep me');
|
||||
Object.assign(mockConfig, {
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: true,
|
||||
respectQwenIgnore: false,
|
||||
}),
|
||||
});
|
||||
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
outputData: `kept.txt:1:keep me${EOL}`,
|
||||
exitCode: 0,
|
||||
onCall: (_, args) => {
|
||||
expect(args).not.toContain('--ignore-file');
|
||||
expect(args).not.toContain(path.join(tempRootDir, '.qwenignore'));
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'keep' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "keep" in the workspace directory:',
|
||||
);
|
||||
expect(result.llmContent).toContain('kept.txt:1:keep me');
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should disable gitignore when configured', async () => {
|
||||
Object.assign(mockConfig, {
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: false,
|
||||
respectQwenIgnore: true,
|
||||
}),
|
||||
});
|
||||
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
exitCode: 1,
|
||||
onCall: (_, args) => {
|
||||
expect(args).toContain('--no-ignore-vcs');
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'ignored' };
|
||||
const invocation = grepTool.build(params);
|
||||
await invocation.execute(abortSignal);
|
||||
});
|
||||
|
||||
it('should truncate llm content when exceeding maximum length', async () => {
|
||||
const longMatch = 'fileA.txt:1:' + 'a'.repeat(25_000);
|
||||
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
outputData: `${longMatch}${EOL}`,
|
||||
exitCode: 0,
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'a+' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
expect(String(result.llmContent).length).toBeLessThanOrEqual(20_000);
|
||||
expect(result.llmContent).toMatch(/\[\d+ lines? truncated\] \.\.\./);
|
||||
expect(result.returnDisplay).toContain('truncated');
|
||||
});
|
||||
|
||||
it('should return "No matches found" when pattern does not exist', async () => {
|
||||
// Setup specific mock for no matches
|
||||
mockSpawn.mockImplementationOnce(
|
||||
@@ -454,10 +320,19 @@ describe('RipGrepTool', () => {
|
||||
expect(result.returnDisplay).toBe('No matches found');
|
||||
});
|
||||
|
||||
it('should throw validation error for invalid regex pattern', async () => {
|
||||
it('should return an error from ripgrep for invalid regex pattern', async () => {
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
exitCode: 2,
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: '[[' };
|
||||
expect(() => grepTool.build(params)).toThrow(
|
||||
'Invalid regular expression pattern: [[',
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain('ripgrep exited with code 2');
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Error: ripgrep exited with code 2',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -504,7 +379,8 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "foo.*bar" in the workspace directory:',
|
||||
);
|
||||
expect(result.llmContent).toContain('fileB.js:1:const foo = "bar";');
|
||||
expect(result.llmContent).toContain('File: fileB.js');
|
||||
expect(result.llmContent).toContain('L1: const foo = "bar";');
|
||||
});
|
||||
|
||||
it('should be case-insensitive by default (JS fallback)', async () => {
|
||||
@@ -554,9 +430,11 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 2 matches for pattern "HELLO" in the workspace directory:',
|
||||
);
|
||||
expect(result.llmContent).toContain('fileA.txt:1:hello world');
|
||||
expect(result.llmContent).toContain('File: fileA.txt');
|
||||
expect(result.llmContent).toContain('L1: hello world');
|
||||
expect(result.llmContent).toContain('File: fileB.js');
|
||||
expect(result.llmContent).toContain(
|
||||
'fileB.js:2:function baz() { return "hello"; }',
|
||||
'L2: function baz() { return "hello"; }',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -584,6 +462,191 @@ describe('RipGrepTool', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('multi-directory workspace', () => {
|
||||
it('should search across all workspace directories when no path is specified', async () => {
|
||||
// Create additional directory with test files
|
||||
const secondDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), 'grep-tool-second-'),
|
||||
);
|
||||
await fs.writeFile(
|
||||
path.join(secondDir, 'other.txt'),
|
||||
'hello from second directory\nworld in second',
|
||||
);
|
||||
await fs.writeFile(
|
||||
path.join(secondDir, 'another.js'),
|
||||
'function world() { return "test"; }',
|
||||
);
|
||||
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, [secondDir]),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
// Setup specific mock for this test - multi-directory search for 'world'
|
||||
// Mock will be called twice - once for each directory
|
||||
let callCount = 0;
|
||||
mockSpawn.mockImplementation(() => {
|
||||
callCount++;
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
stderr: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
};
|
||||
|
||||
setTimeout(() => {
|
||||
const stdoutDataHandler = mockProcess.stdout.on.mock.calls.find(
|
||||
(call) => call[0] === 'data',
|
||||
)?.[1];
|
||||
|
||||
const closeHandler = mockProcess.on.mock.calls.find(
|
||||
(call) => call[0] === 'close',
|
||||
)?.[1];
|
||||
|
||||
let outputData = '';
|
||||
if (callCount === 1) {
|
||||
// First directory (tempRootDir)
|
||||
outputData =
|
||||
[
|
||||
'fileA.txt:1:hello world',
|
||||
'fileA.txt:2:second line with world',
|
||||
'sub/fileC.txt:1:another world in sub dir',
|
||||
].join(EOL) + EOL;
|
||||
} else if (callCount === 2) {
|
||||
// Second directory (secondDir)
|
||||
outputData =
|
||||
[
|
||||
'other.txt:2:world in second',
|
||||
'another.js:1:function world() { return "test"; }',
|
||||
].join(EOL) + EOL;
|
||||
}
|
||||
|
||||
if (stdoutDataHandler && outputData) {
|
||||
stdoutDataHandler(Buffer.from(outputData));
|
||||
}
|
||||
|
||||
if (closeHandler) {
|
||||
closeHandler(0);
|
||||
}
|
||||
}, 0);
|
||||
|
||||
return mockProcess as unknown as ChildProcess;
|
||||
});
|
||||
|
||||
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
|
||||
const params: RipGrepToolParams = { pattern: 'world' };
|
||||
const invocation = multiDirGrepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
// Should find matches in both directories
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 5 matches for pattern "world"',
|
||||
);
|
||||
|
||||
// Matches from first directory
|
||||
expect(result.llmContent).toContain('fileA.txt');
|
||||
expect(result.llmContent).toContain('L1: hello world');
|
||||
expect(result.llmContent).toContain('L2: second line with world');
|
||||
expect(result.llmContent).toContain('fileC.txt');
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
|
||||
// Matches from both directories
|
||||
expect(result.llmContent).toContain('other.txt');
|
||||
expect(result.llmContent).toContain('L2: world in second');
|
||||
expect(result.llmContent).toContain('another.js');
|
||||
expect(result.llmContent).toContain('L1: function world()');
|
||||
|
||||
// Clean up
|
||||
await fs.rm(secondDir, { recursive: true, force: true });
|
||||
mockSpawn.mockClear();
|
||||
});
|
||||
|
||||
it('should search only specified path within workspace directories', async () => {
|
||||
// Create additional directory
|
||||
const secondDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), 'grep-tool-second-'),
|
||||
);
|
||||
await fs.mkdir(path.join(secondDir, 'sub'));
|
||||
await fs.writeFile(
|
||||
path.join(secondDir, 'sub', 'test.txt'),
|
||||
'hello from second sub directory',
|
||||
);
|
||||
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, [secondDir]),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
// Setup specific mock for this test - searching in 'sub' should only return matches from that directory
|
||||
mockSpawn.mockImplementationOnce(() => {
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
stderr: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
};
|
||||
|
||||
setTimeout(() => {
|
||||
const onData = mockProcess.stdout.on.mock.calls.find(
|
||||
(call) => call[0] === 'data',
|
||||
)?.[1];
|
||||
const onClose = mockProcess.on.mock.calls.find(
|
||||
(call) => call[0] === 'close',
|
||||
)?.[1];
|
||||
|
||||
if (onData) {
|
||||
onData(Buffer.from(`fileC.txt:1:another world in sub dir${EOL}`));
|
||||
}
|
||||
if (onClose) {
|
||||
onClose(0);
|
||||
}
|
||||
}, 0);
|
||||
|
||||
return mockProcess as unknown as ChildProcess;
|
||||
});
|
||||
|
||||
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
|
||||
|
||||
// Search only in the 'sub' directory of the first workspace
|
||||
const params: RipGrepToolParams = { pattern: 'world', path: 'sub' };
|
||||
const invocation = multiDirGrepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
// Should only find matches in the specified sub directory
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "world" in path "sub"',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileC.txt');
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
|
||||
// Should not contain matches from second directory
|
||||
expect(result.llmContent).not.toContain('test.txt');
|
||||
|
||||
// Clean up
|
||||
await fs.rm(secondDir, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
describe('abort signal handling', () => {
|
||||
it('should handle AbortSignal during search', async () => {
|
||||
const controller = new AbortController();
|
||||
@@ -999,8 +1062,8 @@ describe('RipGrepTool', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('glob pattern filtering', () => {
|
||||
it('should handle multiple file extensions in glob pattern', async () => {
|
||||
describe('include pattern filtering', () => {
|
||||
it('should handle multiple file extensions in include pattern', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, 'test.ts'),
|
||||
'typescript content',
|
||||
@@ -1012,7 +1075,7 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
await fs.writeFile(path.join(tempRootDir, 'test.txt'), 'text content');
|
||||
|
||||
// Setup specific mock for this test - glob pattern should filter to only ts/tsx files
|
||||
// Setup specific mock for this test - include pattern should filter to only ts/tsx files
|
||||
mockSpawn.mockImplementationOnce(() => {
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
@@ -1053,7 +1116,7 @@ describe('RipGrepTool', () => {
|
||||
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'content',
|
||||
glob: '*.{ts,tsx}',
|
||||
include: '*.{ts,tsx}',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
@@ -1064,7 +1127,7 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).not.toContain('test.txt');
|
||||
});
|
||||
|
||||
it('should handle directory patterns in glob', async () => {
|
||||
it('should handle directory patterns in include', async () => {
|
||||
await fs.mkdir(path.join(tempRootDir, 'src'), { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, 'src', 'main.ts'),
|
||||
@@ -1072,7 +1135,7 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
await fs.writeFile(path.join(tempRootDir, 'other.ts'), 'other code');
|
||||
|
||||
// Setup specific mock for this test - glob pattern should filter to only src/** files
|
||||
// Setup specific mock for this test - include pattern should filter to only src/** files
|
||||
mockSpawn.mockImplementationOnce(() => {
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
@@ -1109,7 +1172,7 @@ describe('RipGrepTool', () => {
|
||||
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'code',
|
||||
glob: 'src/**',
|
||||
include: 'src/**',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
@@ -1126,10 +1189,10 @@ describe('RipGrepTool', () => {
|
||||
expect(invocation.getDescription()).toBe("'testPattern'");
|
||||
});
|
||||
|
||||
it('should generate correct description with pattern and glob', () => {
|
||||
it('should generate correct description with pattern and include', () => {
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'testPattern',
|
||||
glob: '*.ts',
|
||||
include: '*.ts',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
expect(invocation.getDescription()).toBe("'testPattern' in *.ts");
|
||||
@@ -1148,18 +1211,29 @@ describe('RipGrepTool', () => {
|
||||
expect(invocation.getDescription()).toContain(path.join('src', 'app'));
|
||||
});
|
||||
|
||||
it('should generate correct description with default search path', () => {
|
||||
it('should indicate searching across all workspace directories when no path specified', () => {
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, ['/another/dir']),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
|
||||
const params: RipGrepToolParams = { pattern: 'testPattern' };
|
||||
const invocation = grepTool.build(params);
|
||||
expect(invocation.getDescription()).toBe("'testPattern'");
|
||||
const invocation = multiDirGrepTool.build(params);
|
||||
expect(invocation.getDescription()).toBe(
|
||||
"'testPattern' across all workspace directories",
|
||||
);
|
||||
});
|
||||
|
||||
it('should generate correct description with pattern, glob, and path', async () => {
|
||||
it('should generate correct description with pattern, include, and path', async () => {
|
||||
const dirPath = path.join(tempRootDir, 'src', 'app');
|
||||
await fs.mkdir(dirPath, { recursive: true });
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'testPattern',
|
||||
glob: '*.ts',
|
||||
include: '*.ts',
|
||||
path: path.join('src', 'app'),
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
|
||||
@@ -10,19 +10,16 @@ import { EOL } from 'node:os';
|
||||
import { spawn } from 'node:child_process';
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { SchemaValidator } from '../utils/schemaValidator.js';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import { getErrorMessage, isNodeError } from '../utils/errors.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
|
||||
import { SchemaValidator } from '../utils/schemaValidator.js';
|
||||
import type { FileFilteringOptions } from '../config/constants.js';
|
||||
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
|
||||
|
||||
const MAX_LLM_CONTENT_LENGTH = 20_000;
|
||||
const DEFAULT_TOTAL_MAX_MATCHES = 20000;
|
||||
|
||||
/**
|
||||
* Parameters for the GrepTool (Simplified)
|
||||
* Parameters for the GrepTool
|
||||
*/
|
||||
export interface RipGrepToolParams {
|
||||
/**
|
||||
@@ -36,14 +33,18 @@ export interface RipGrepToolParams {
|
||||
path?: string;
|
||||
|
||||
/**
|
||||
* Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}")
|
||||
* File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")
|
||||
*/
|
||||
glob?: string;
|
||||
include?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum number of matching lines to return (optional, shows all if not specified)
|
||||
*/
|
||||
limit?: number;
|
||||
/**
|
||||
* Result object for a single grep match
|
||||
*/
|
||||
interface GrepMatch {
|
||||
filePath: string;
|
||||
lineNumber: number;
|
||||
line: string;
|
||||
}
|
||||
|
||||
class GrepToolInvocation extends BaseToolInvocation<
|
||||
@@ -60,15 +61,18 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
/**
|
||||
* Checks if a path is within the root directory and resolves it.
|
||||
* @param relativePath Path relative to the root directory (or undefined for root).
|
||||
* @returns The absolute path to search within.
|
||||
* @returns The absolute path if valid and exists, or null if no path specified (to search all directories).
|
||||
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
|
||||
*/
|
||||
private resolveAndValidatePath(relativePath?: string): string {
|
||||
const targetDir = this.config.getTargetDir();
|
||||
const targetPath = relativePath
|
||||
? path.resolve(targetDir, relativePath)
|
||||
: targetDir;
|
||||
private resolveAndValidatePath(relativePath?: string): string | null {
|
||||
// If no path specified, return null to indicate searching all workspace directories
|
||||
if (!relativePath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const targetPath = path.resolve(this.config.getTargetDir(), relativePath);
|
||||
|
||||
// Security Check: Ensure the resolved path is within workspace boundaries
|
||||
const workspaceContext = this.config.getWorkspaceContext();
|
||||
if (!workspaceContext.isPathWithinWorkspace(targetPath)) {
|
||||
const directories = workspaceContext.getDirectories();
|
||||
@@ -77,10 +81,7 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
);
|
||||
}
|
||||
|
||||
return this.ensureDirectory(targetPath);
|
||||
}
|
||||
|
||||
private ensureDirectory(targetPath: string): string {
|
||||
// Check existence and type after resolving
|
||||
try {
|
||||
const stats = fs.statSync(targetPath);
|
||||
if (!stats.isDirectory()) {
|
||||
@@ -100,81 +101,104 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
|
||||
async execute(signal: AbortSignal): Promise<ToolResult> {
|
||||
try {
|
||||
const workspaceContext = this.config.getWorkspaceContext();
|
||||
const searchDirAbs = this.resolveAndValidatePath(this.params.path);
|
||||
const searchDirDisplay = this.params.path || '.';
|
||||
|
||||
// Get raw ripgrep output
|
||||
const rawOutput = await this.performRipgrepSearch({
|
||||
pattern: this.params.pattern,
|
||||
path: searchDirAbs,
|
||||
glob: this.params.glob,
|
||||
signal,
|
||||
});
|
||||
// Determine which directories to search
|
||||
let searchDirectories: readonly string[];
|
||||
if (searchDirAbs === null) {
|
||||
// No path specified - search all workspace directories
|
||||
searchDirectories = workspaceContext.getDirectories();
|
||||
} else {
|
||||
// Specific path provided - search only that directory
|
||||
searchDirectories = [searchDirAbs];
|
||||
}
|
||||
|
||||
// Build search description
|
||||
const searchLocationDescription = this.params.path
|
||||
? `in path "${searchDirDisplay}"`
|
||||
: `in the workspace directory`;
|
||||
let allMatches: GrepMatch[] = [];
|
||||
const totalMaxMatches = DEFAULT_TOTAL_MAX_MATCHES;
|
||||
|
||||
const filterDescription = this.params.glob
|
||||
? ` (filter: "${this.params.glob}")`
|
||||
: '';
|
||||
if (this.config.getDebugMode()) {
|
||||
console.log(`[GrepTool] Total result limit: ${totalMaxMatches}`);
|
||||
}
|
||||
|
||||
// Check if we have any matches
|
||||
if (!rawOutput.trim()) {
|
||||
const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}.`;
|
||||
for (const searchDir of searchDirectories) {
|
||||
const searchResult = await this.performRipgrepSearch({
|
||||
pattern: this.params.pattern,
|
||||
path: searchDir,
|
||||
include: this.params.include,
|
||||
signal,
|
||||
});
|
||||
|
||||
if (searchDirectories.length > 1) {
|
||||
const dirName = path.basename(searchDir);
|
||||
searchResult.forEach((match) => {
|
||||
match.filePath = path.join(dirName, match.filePath);
|
||||
});
|
||||
}
|
||||
|
||||
allMatches = allMatches.concat(searchResult);
|
||||
|
||||
if (allMatches.length >= totalMaxMatches) {
|
||||
allMatches = allMatches.slice(0, totalMaxMatches);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let searchLocationDescription: string;
|
||||
if (searchDirAbs === null) {
|
||||
const numDirs = workspaceContext.getDirectories().length;
|
||||
searchLocationDescription =
|
||||
numDirs > 1
|
||||
? `across ${numDirs} workspace directories`
|
||||
: `in the workspace directory`;
|
||||
} else {
|
||||
searchLocationDescription = `in path "${searchDirDisplay}"`;
|
||||
}
|
||||
|
||||
if (allMatches.length === 0) {
|
||||
const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}.`;
|
||||
return { llmContent: noMatchMsg, returnDisplay: `No matches found` };
|
||||
}
|
||||
|
||||
// Split into lines and count total matches
|
||||
const allLines = rawOutput.split(EOL).filter((line) => line.trim());
|
||||
const totalMatches = allLines.length;
|
||||
const matchTerm = totalMatches === 1 ? 'match' : 'matches';
|
||||
const wasTruncated = allMatches.length >= totalMaxMatches;
|
||||
|
||||
// Build header early to calculate available space
|
||||
const header = `Found ${totalMatches} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}:\n---\n`;
|
||||
const maxTruncationNoticeLength = 100; // "[... N more matches truncated]"
|
||||
const maxGrepOutputLength =
|
||||
MAX_LLM_CONTENT_LENGTH - header.length - maxTruncationNoticeLength;
|
||||
const matchesByFile = allMatches.reduce(
|
||||
(acc, match) => {
|
||||
const fileKey = match.filePath;
|
||||
if (!acc[fileKey]) {
|
||||
acc[fileKey] = [];
|
||||
}
|
||||
acc[fileKey].push(match);
|
||||
acc[fileKey].sort((a, b) => a.lineNumber - b.lineNumber);
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, GrepMatch[]>,
|
||||
);
|
||||
|
||||
// Apply line limit first (if specified)
|
||||
let truncatedByLineLimit = false;
|
||||
let linesToInclude = allLines;
|
||||
if (
|
||||
this.params.limit !== undefined &&
|
||||
allLines.length > this.params.limit
|
||||
) {
|
||||
linesToInclude = allLines.slice(0, this.params.limit);
|
||||
truncatedByLineLimit = true;
|
||||
const matchCount = allMatches.length;
|
||||
const matchTerm = matchCount === 1 ? 'match' : 'matches';
|
||||
|
||||
let llmContent = `Found ${matchCount} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}`;
|
||||
|
||||
if (wasTruncated) {
|
||||
llmContent += ` (results limited to ${totalMaxMatches} matches for performance)`;
|
||||
}
|
||||
|
||||
// Join lines back into grep output
|
||||
let grepOutput = linesToInclude.join(EOL);
|
||||
llmContent += `:\n---\n`;
|
||||
|
||||
// Apply character limit as safety net
|
||||
let truncatedByCharLimit = false;
|
||||
if (grepOutput.length > maxGrepOutputLength) {
|
||||
grepOutput = grepOutput.slice(0, maxGrepOutputLength) + '...';
|
||||
truncatedByCharLimit = true;
|
||||
for (const filePath in matchesByFile) {
|
||||
llmContent += `File: ${filePath}\n`;
|
||||
matchesByFile[filePath].forEach((match) => {
|
||||
const trimmedLine = match.line.trim();
|
||||
llmContent += `L${match.lineNumber}: ${trimmedLine}\n`;
|
||||
});
|
||||
llmContent += '---\n';
|
||||
}
|
||||
|
||||
// Count how many lines we actually included after character truncation
|
||||
const finalLines = grepOutput.split(EOL).filter((line) => line.trim());
|
||||
const includedLines = finalLines.length;
|
||||
|
||||
// Build result
|
||||
let llmContent = header + grepOutput;
|
||||
|
||||
// Add truncation notice if needed
|
||||
if (truncatedByLineLimit || truncatedByCharLimit) {
|
||||
const omittedMatches = totalMatches - includedLines;
|
||||
llmContent += ` [${omittedMatches} ${omittedMatches === 1 ? 'line' : 'lines'} truncated] ...`;
|
||||
}
|
||||
|
||||
// Build display message (show real count, not truncated)
|
||||
let displayMessage = `Found ${totalMatches} ${matchTerm}`;
|
||||
if (truncatedByLineLimit || truncatedByCharLimit) {
|
||||
displayMessage += ` (truncated)`;
|
||||
let displayMessage = `Found ${matchCount} ${matchTerm}`;
|
||||
if (wasTruncated) {
|
||||
displayMessage += ` (limited)`;
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -191,15 +215,53 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
}
|
||||
}
|
||||
|
||||
private parseRipgrepOutput(output: string, basePath: string): GrepMatch[] {
|
||||
const results: GrepMatch[] = [];
|
||||
if (!output) return results;
|
||||
|
||||
const lines = output.split(EOL);
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
|
||||
const firstColonIndex = line.indexOf(':');
|
||||
if (firstColonIndex === -1) continue;
|
||||
|
||||
const secondColonIndex = line.indexOf(':', firstColonIndex + 1);
|
||||
if (secondColonIndex === -1) continue;
|
||||
|
||||
const filePathRaw = line.substring(0, firstColonIndex);
|
||||
const lineNumberStr = line.substring(
|
||||
firstColonIndex + 1,
|
||||
secondColonIndex,
|
||||
);
|
||||
const lineContent = line.substring(secondColonIndex + 1);
|
||||
|
||||
const lineNumber = parseInt(lineNumberStr, 10);
|
||||
|
||||
if (!isNaN(lineNumber)) {
|
||||
const absoluteFilePath = path.resolve(basePath, filePathRaw);
|
||||
const relativeFilePath = path.relative(basePath, absoluteFilePath);
|
||||
|
||||
results.push({
|
||||
filePath: relativeFilePath || path.basename(absoluteFilePath),
|
||||
lineNumber,
|
||||
line: lineContent,
|
||||
});
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
private async performRipgrepSearch(options: {
|
||||
pattern: string;
|
||||
path: string;
|
||||
glob?: string;
|
||||
include?: string;
|
||||
signal: AbortSignal;
|
||||
}): Promise<string> {
|
||||
const { pattern, path: absolutePath, glob } = options;
|
||||
}): Promise<GrepMatch[]> {
|
||||
const { pattern, path: absolutePath, include } = options;
|
||||
|
||||
const rgArgs: string[] = [
|
||||
const rgArgs = [
|
||||
'--line-number',
|
||||
'--no-heading',
|
||||
'--with-filename',
|
||||
@@ -208,34 +270,29 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
pattern,
|
||||
];
|
||||
|
||||
// Add file exclusions from .gitignore and .qwenignore
|
||||
const filteringOptions = this.getFileFilteringOptions();
|
||||
if (!filteringOptions.respectGitIgnore) {
|
||||
rgArgs.push('--no-ignore-vcs');
|
||||
if (include) {
|
||||
rgArgs.push('--glob', include);
|
||||
}
|
||||
|
||||
if (filteringOptions.respectQwenIgnore) {
|
||||
const qwenIgnorePath = path.join(
|
||||
this.config.getTargetDir(),
|
||||
'.qwenignore',
|
||||
);
|
||||
if (fs.existsSync(qwenIgnorePath)) {
|
||||
rgArgs.push('--ignore-file', qwenIgnorePath);
|
||||
}
|
||||
}
|
||||
|
||||
// Add glob pattern if provided
|
||||
if (glob) {
|
||||
rgArgs.push('--glob', glob);
|
||||
}
|
||||
const excludes = [
|
||||
'.git',
|
||||
'node_modules',
|
||||
'bower_components',
|
||||
'*.log',
|
||||
'*.tmp',
|
||||
'build',
|
||||
'dist',
|
||||
'coverage',
|
||||
];
|
||||
excludes.forEach((exclude) => {
|
||||
rgArgs.push('--glob', `!${exclude}`);
|
||||
});
|
||||
|
||||
rgArgs.push('--threads', '4');
|
||||
rgArgs.push(absolutePath);
|
||||
|
||||
try {
|
||||
const rgPath = this.config.getUseBuiltinRipgrep()
|
||||
? await ensureRipgrepPath()
|
||||
: 'rg';
|
||||
const rgPath = await ensureRipgrepPath();
|
||||
const output = await new Promise<string>((resolve, reject) => {
|
||||
const child = spawn(rgPath, rgArgs, {
|
||||
windowsHide: true,
|
||||
@@ -277,33 +334,22 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
});
|
||||
});
|
||||
|
||||
return output;
|
||||
return this.parseRipgrepOutput(output, absolutePath);
|
||||
} catch (error: unknown) {
|
||||
console.error(`GrepLogic: ripgrep failed: ${getErrorMessage(error)}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private getFileFilteringOptions(): FileFilteringOptions {
|
||||
const options = this.config.getFileFilteringOptions?.();
|
||||
return {
|
||||
respectGitIgnore:
|
||||
options?.respectGitIgnore ??
|
||||
DEFAULT_FILE_FILTERING_OPTIONS.respectGitIgnore,
|
||||
respectQwenIgnore:
|
||||
options?.respectQwenIgnore ??
|
||||
DEFAULT_FILE_FILTERING_OPTIONS.respectQwenIgnore,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a description of the grep operation
|
||||
* @param params Parameters for the grep operation
|
||||
* @returns A string describing the grep
|
||||
*/
|
||||
getDescription(): string {
|
||||
let description = `'${this.params.pattern}'`;
|
||||
if (this.params.glob) {
|
||||
description += ` in ${this.params.glob}`;
|
||||
if (this.params.include) {
|
||||
description += ` in ${this.params.include}`;
|
||||
}
|
||||
if (this.params.path) {
|
||||
const resolvedPath = path.resolve(
|
||||
@@ -335,41 +381,36 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the Grep tool logic
|
||||
* Implementation of the Grep tool logic (moved from CLI)
|
||||
*/
|
||||
export class RipGrepTool extends BaseDeclarativeTool<
|
||||
RipGrepToolParams,
|
||||
ToolResult
|
||||
> {
|
||||
static readonly Name = ToolNames.GREP;
|
||||
static readonly Name = 'search_file_content';
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
RipGrepTool.Name,
|
||||
'Grep',
|
||||
'A powerful search tool built on ripgrep\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")\n - Filter files with glob parameter (e.g., "*.js", "**/*.tsx")\n - Use Task tool for open-ended searches requiring multiple rounds\n - Pattern syntax: Uses ripgrep (not grep) - special regex characters need escaping (use `interface\\{\\}` to find `interface{}` in Go code)\n',
|
||||
'SearchText',
|
||||
'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers. Total results limited to 20,000 matches like VSCode.',
|
||||
Kind.Search,
|
||||
{
|
||||
properties: {
|
||||
pattern: {
|
||||
type: 'string',
|
||||
description:
|
||||
'The regular expression pattern to search for in file contents',
|
||||
},
|
||||
glob: {
|
||||
"The regular expression (regex) pattern to search for within file contents (e.g., 'function\\s+myFunction', 'import\\s+\\{.*\\}\\s+from\\s+.*').",
|
||||
type: 'string',
|
||||
description:
|
||||
'Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}") - maps to rg --glob',
|
||||
},
|
||||
path: {
|
||||
description:
|
||||
'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.',
|
||||
type: 'string',
|
||||
description:
|
||||
'File or directory to search in (rg PATH). Defaults to current working directory.',
|
||||
},
|
||||
limit: {
|
||||
type: 'number',
|
||||
include: {
|
||||
description:
|
||||
'Limit output to first N lines/entries. Optional - shows all matches if not specified.',
|
||||
"Optional: A glob pattern to filter which files are searched (e.g., '*.js', '*.{ts,tsx}', 'src/**'). If omitted, searches all files (respecting potential global ignores).",
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
@@ -381,13 +422,13 @@ export class RipGrepTool extends BaseDeclarativeTool<
|
||||
/**
|
||||
* Checks if a path is within the root directory and resolves it.
|
||||
* @param relativePath Path relative to the root directory (or undefined for root).
|
||||
* @returns The absolute path to search within.
|
||||
* @returns The absolute path if valid and exists, or null if no path specified (to search all directories).
|
||||
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
|
||||
*/
|
||||
private resolveAndValidatePath(relativePath?: string): string {
|
||||
// If no path specified, search within the workspace root directory
|
||||
private resolveAndValidatePath(relativePath?: string): string | null {
|
||||
// If no path specified, return null to indicate searching all workspace directories
|
||||
if (!relativePath) {
|
||||
return this.config.getTargetDir();
|
||||
return null;
|
||||
}
|
||||
|
||||
const targetPath = path.resolve(this.config.getTargetDir(), relativePath);
|
||||
@@ -424,9 +465,7 @@ export class RipGrepTool extends BaseDeclarativeTool<
|
||||
* @param params Parameters to validate
|
||||
* @returns An error message string if invalid, null otherwise
|
||||
*/
|
||||
protected override validateToolParamValues(
|
||||
params: RipGrepToolParams,
|
||||
): string | null {
|
||||
override validateToolParams(params: RipGrepToolParams): string | null {
|
||||
const errors = SchemaValidator.validate(
|
||||
this.schema.parametersJsonSchema,
|
||||
params,
|
||||
@@ -435,13 +474,6 @@ export class RipGrepTool extends BaseDeclarativeTool<
|
||||
return errors;
|
||||
}
|
||||
|
||||
// Validate pattern is a valid regex
|
||||
try {
|
||||
new RegExp(params.pattern);
|
||||
} catch (error) {
|
||||
return `Invalid regular expression pattern: ${params.pattern}. Error: ${getErrorMessage(error)}`;
|
||||
}
|
||||
|
||||
// Only validate path if one is provided
|
||||
if (params.path) {
|
||||
try {
|
||||
|
||||
@@ -14,7 +14,7 @@ export const ToolNames = {
|
||||
WRITE_FILE: 'write_file',
|
||||
READ_FILE: 'read_file',
|
||||
READ_MANY_FILES: 'read_many_files',
|
||||
GREP: 'grep_search',
|
||||
GREP: 'search_file_content',
|
||||
GLOB: 'glob',
|
||||
SHELL: 'run_shell_command',
|
||||
TODO_WRITE: 'todo_write',
|
||||
|
||||
166
packages/core/src/tools/web-search.test.ts
Normal file
166
packages/core/src/tools/web-search.test.ts
Normal file
@@ -0,0 +1,166 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { WebSearchTool, type WebSearchToolParams } from './web-search.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
|
||||
// Mock GeminiClient and Config constructor
|
||||
vi.mock('../core/client.js');
|
||||
vi.mock('../config/config.js');
|
||||
|
||||
// Mock global fetch
|
||||
const mockFetch = vi.fn();
|
||||
global.fetch = mockFetch;
|
||||
|
||||
describe('WebSearchTool', () => {
|
||||
const abortSignal = new AbortController().signal;
|
||||
let mockGeminiClient: GeminiClient;
|
||||
let tool: WebSearchTool;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
const mockConfigInstance = {
|
||||
getGeminiClient: () => mockGeminiClient,
|
||||
getProxy: () => undefined,
|
||||
getTavilyApiKey: () => 'test-api-key', // Add the missing method
|
||||
} as unknown as Config;
|
||||
mockGeminiClient = new GeminiClient(mockConfigInstance);
|
||||
tool = new WebSearchTool(mockConfigInstance);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('build', () => {
|
||||
it('should return an invocation for a valid query', () => {
|
||||
const params: WebSearchToolParams = { query: 'test query' };
|
||||
const invocation = tool.build(params);
|
||||
expect(invocation).toBeDefined();
|
||||
expect(invocation.params).toEqual(params);
|
||||
});
|
||||
|
||||
it('should throw an error for an empty query', () => {
|
||||
const params: WebSearchToolParams = { query: '' };
|
||||
expect(() => tool.build(params)).toThrow(
|
||||
"The 'query' parameter cannot be empty.",
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error for a query with only whitespace', () => {
|
||||
const params: WebSearchToolParams = { query: ' ' };
|
||||
expect(() => tool.build(params)).toThrow(
|
||||
"The 'query' parameter cannot be empty.",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getDescription', () => {
|
||||
it('should return a description of the search', () => {
|
||||
const params: WebSearchToolParams = { query: 'test query' };
|
||||
const invocation = tool.build(params);
|
||||
expect(invocation.getDescription()).toBe(
|
||||
'Searching the web for: "test query"',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('execute', () => {
|
||||
it('should return search results for a successful query', async () => {
|
||||
const params: WebSearchToolParams = { query: 'successful query' };
|
||||
|
||||
// Mock the fetch response
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
answer: 'Here are your results.',
|
||||
results: [],
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
expect(result.llmContent).toBe(
|
||||
'Web search results for "successful query":\n\nHere are your results.',
|
||||
);
|
||||
expect(result.returnDisplay).toBe(
|
||||
'Search results for "successful query" returned.',
|
||||
);
|
||||
expect(result.sources).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle no search results found', async () => {
|
||||
const params: WebSearchToolParams = { query: 'no results query' };
|
||||
|
||||
// Mock the fetch response
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
answer: '',
|
||||
results: [],
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
expect(result.llmContent).toBe(
|
||||
'No search results or information found for query: "no results query"',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('No information found.');
|
||||
});
|
||||
|
||||
it('should handle API errors gracefully', async () => {
|
||||
const params: WebSearchToolParams = { query: 'error query' };
|
||||
|
||||
// Mock the fetch to reject
|
||||
mockFetch.mockRejectedValueOnce(new Error('API Failure'));
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('Error:');
|
||||
expect(result.llmContent).toContain('API Failure');
|
||||
expect(result.returnDisplay).toBe('Error performing web search.');
|
||||
});
|
||||
|
||||
it('should correctly format results with sources', async () => {
|
||||
const params: WebSearchToolParams = { query: 'grounding query' };
|
||||
|
||||
// Mock the fetch response
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
answer: 'This is a test response.',
|
||||
results: [
|
||||
{ title: 'Example Site', url: 'https://example.com' },
|
||||
{ title: 'Google', url: 'https://google.com' },
|
||||
],
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
const expectedLlmContent = `Web search results for "grounding query":
|
||||
|
||||
This is a test response.
|
||||
|
||||
Sources:
|
||||
[1] Example Site (https://example.com)
|
||||
[2] Google (https://google.com)`;
|
||||
|
||||
expect(result.llmContent).toBe(expectedLlmContent);
|
||||
expect(result.returnDisplay).toBe(
|
||||
'Search results for "grounding query" returned.',
|
||||
);
|
||||
expect(result.sources).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
218
packages/core/src/tools/web-search.ts
Normal file
218
packages/core/src/tools/web-search.ts
Normal file
@@ -0,0 +1,218 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
BaseDeclarativeTool,
|
||||
BaseToolInvocation,
|
||||
Kind,
|
||||
type ToolInvocation,
|
||||
type ToolResult,
|
||||
type ToolCallConfirmationDetails,
|
||||
type ToolInfoConfirmationDetails,
|
||||
ToolConfirmationOutcome,
|
||||
} from './tools.js';
|
||||
|
||||
import type { Config } from '../config/config.js';
|
||||
import { ApprovalMode } from '../config/config.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
|
||||
interface TavilyResultItem {
|
||||
title: string;
|
||||
url: string;
|
||||
content?: string;
|
||||
score?: number;
|
||||
published_date?: string;
|
||||
}
|
||||
|
||||
interface TavilySearchResponse {
|
||||
query: string;
|
||||
answer?: string;
|
||||
results: TavilyResultItem[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for the WebSearchTool.
|
||||
*/
|
||||
export interface WebSearchToolParams {
|
||||
/**
|
||||
* The search query.
|
||||
*/
|
||||
query: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extends ToolResult to include sources for web search.
|
||||
*/
|
||||
export interface WebSearchToolResult extends ToolResult {
|
||||
sources?: Array<{ title: string; url: string }>;
|
||||
}
|
||||
|
||||
class WebSearchToolInvocation extends BaseToolInvocation<
|
||||
WebSearchToolParams,
|
||||
WebSearchToolResult
|
||||
> {
|
||||
constructor(
|
||||
private readonly config: Config,
|
||||
params: WebSearchToolParams,
|
||||
) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
override getDescription(): string {
|
||||
return `Searching the web for: "${this.params.query}"`;
|
||||
}
|
||||
|
||||
override async shouldConfirmExecute(
|
||||
_abortSignal: AbortSignal,
|
||||
): Promise<ToolCallConfirmationDetails | false> {
|
||||
if (this.config.getApprovalMode() === ApprovalMode.AUTO_EDIT) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const confirmationDetails: ToolInfoConfirmationDetails = {
|
||||
type: 'info',
|
||||
title: 'Confirm Web Search',
|
||||
prompt: `Search the web for: "${this.params.query}"`,
|
||||
onConfirm: async (outcome: ToolConfirmationOutcome) => {
|
||||
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
|
||||
this.config.setApprovalMode(ApprovalMode.AUTO_EDIT);
|
||||
}
|
||||
},
|
||||
};
|
||||
return confirmationDetails;
|
||||
}
|
||||
|
||||
async execute(signal: AbortSignal): Promise<WebSearchToolResult> {
|
||||
const apiKey = this.config.getTavilyApiKey();
|
||||
if (!apiKey) {
|
||||
return {
|
||||
llmContent:
|
||||
'Web search is disabled because TAVILY_API_KEY is not configured. Please set it in your settings.json, .env file, or via --tavily-api-key command line argument to enable web search.',
|
||||
returnDisplay:
|
||||
'Web search disabled. Configure TAVILY_API_KEY to enable Tavily search.',
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('https://api.tavily.com/search', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
api_key: apiKey,
|
||||
query: this.params.query,
|
||||
search_depth: 'advanced',
|
||||
max_results: 5,
|
||||
include_answer: true,
|
||||
}),
|
||||
signal,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text().catch(() => '');
|
||||
throw new Error(
|
||||
`Tavily API error: ${response.status} ${response.statusText}${text ? ` - ${text}` : ''}`,
|
||||
);
|
||||
}
|
||||
|
||||
const data = (await response.json()) as TavilySearchResponse;
|
||||
|
||||
const sources = (data.results || []).map((r) => ({
|
||||
title: r.title,
|
||||
url: r.url,
|
||||
}));
|
||||
|
||||
const sourceListFormatted = sources.map(
|
||||
(s, i) => `[${i + 1}] ${s.title || 'Untitled'} (${s.url})`,
|
||||
);
|
||||
|
||||
let content = data.answer?.trim() || '';
|
||||
if (!content) {
|
||||
// Fallback: build a concise summary from top results
|
||||
content = sources
|
||||
.slice(0, 3)
|
||||
.map((s, i) => `${i + 1}. ${s.title} - ${s.url}`)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
if (sourceListFormatted.length > 0) {
|
||||
content += `\n\nSources:\n${sourceListFormatted.join('\n')}`;
|
||||
}
|
||||
|
||||
if (!content.trim()) {
|
||||
return {
|
||||
llmContent: `No search results or information found for query: "${this.params.query}"`,
|
||||
returnDisplay: 'No information found.',
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
llmContent: `Web search results for "${this.params.query}":\n\n${content}`,
|
||||
returnDisplay: `Search results for "${this.params.query}" returned.`,
|
||||
sources,
|
||||
};
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = `Error during web search for query "${this.params.query}": ${getErrorMessage(
|
||||
error,
|
||||
)}`;
|
||||
console.error(errorMessage, error);
|
||||
return {
|
||||
llmContent: `Error: ${errorMessage}`,
|
||||
returnDisplay: `Error performing web search.`,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A tool to perform web searches using Tavily Search via the Tavily API.
|
||||
*/
|
||||
export class WebSearchTool extends BaseDeclarativeTool<
|
||||
WebSearchToolParams,
|
||||
WebSearchToolResult
|
||||
> {
|
||||
static readonly Name: string = 'web_search';
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
WebSearchTool.Name,
|
||||
'WebSearch',
|
||||
'Performs a web search using the Tavily API and returns a concise answer with sources. Requires the TAVILY_API_KEY environment variable.',
|
||||
Kind.Search,
|
||||
{
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: {
|
||||
type: 'string',
|
||||
description: 'The search query to find information on the web.',
|
||||
},
|
||||
},
|
||||
required: ['query'],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the parameters for the WebSearchTool.
|
||||
* @param params The parameters to validate
|
||||
* @returns An error message string if validation fails, null if valid
|
||||
*/
|
||||
protected override validateToolParamValues(
|
||||
params: WebSearchToolParams,
|
||||
): string | null {
|
||||
if (!params.query || params.query.trim() === '') {
|
||||
return "The 'query' parameter cannot be empty.";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected createInvocation(
|
||||
params: WebSearchToolParams,
|
||||
): ToolInvocation<WebSearchToolParams, WebSearchToolResult> {
|
||||
return new WebSearchToolInvocation(this.config, params);
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { WebSearchProvider, WebSearchResult } from './types.js';
|
||||
|
||||
/**
|
||||
* Base implementation for web search providers.
|
||||
* Provides common functionality for error handling.
|
||||
*/
|
||||
export abstract class BaseWebSearchProvider implements WebSearchProvider {
|
||||
abstract readonly name: string;
|
||||
|
||||
/**
|
||||
* Check if the provider is available (has required configuration).
|
||||
*/
|
||||
abstract isAvailable(): boolean;
|
||||
|
||||
/**
|
||||
* Perform the actual search implementation.
|
||||
* @param query The search query
|
||||
* @param signal Abort signal for cancellation
|
||||
* @returns Promise resolving to search results
|
||||
*/
|
||||
protected abstract performSearch(
|
||||
query: string,
|
||||
signal: AbortSignal,
|
||||
): Promise<WebSearchResult>;
|
||||
|
||||
/**
|
||||
* Execute a web search with error handling.
|
||||
* @param query The search query
|
||||
* @param signal Abort signal for cancellation
|
||||
* @returns Promise resolving to search results
|
||||
*/
|
||||
async search(query: string, signal: AbortSignal): Promise<WebSearchResult> {
|
||||
if (!this.isAvailable()) {
|
||||
throw new Error(
|
||||
`[${this.name}] Provider is not available. Please check your configuration.`,
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
return await this.performSearch(query, signal);
|
||||
} catch (error: unknown) {
|
||||
if (
|
||||
error instanceof Error &&
|
||||
error.message.startsWith(`[${this.name}]`)
|
||||
) {
|
||||
throw error;
|
||||
}
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
throw new Error(`[${this.name}] Search failed: ${message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,312 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { WebSearchTool } from './index.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type { WebSearchConfig } from './types.js';
|
||||
import { ApprovalMode } from '../../config/config.js';
|
||||
|
||||
describe('WebSearchTool', () => {
|
||||
let mockConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
mockConfig = {
|
||||
getApprovalMode: vi.fn(() => ApprovalMode.AUTO_EDIT),
|
||||
setApprovalMode: vi.fn(),
|
||||
getWebSearchConfig: vi.fn(),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
describe('formatSearchResults', () => {
|
||||
it('should use answer when available and append sources', async () => {
|
||||
const webSearchConfig: WebSearchConfig = {
|
||||
provider: [
|
||||
{
|
||||
type: 'tavily',
|
||||
apiKey: 'test-key',
|
||||
},
|
||||
],
|
||||
default: 'tavily',
|
||||
};
|
||||
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(webSearchConfig);
|
||||
|
||||
// Mock fetch to return search results with answer
|
||||
global.fetch = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
query: 'test query',
|
||||
answer: 'This is a concise answer from the search provider.',
|
||||
results: [
|
||||
{
|
||||
title: 'Result 1',
|
||||
url: 'https://example.com/1',
|
||||
content: 'Content 1',
|
||||
},
|
||||
{
|
||||
title: 'Result 2',
|
||||
url: 'https://example.com/2',
|
||||
content: 'Content 2',
|
||||
},
|
||||
],
|
||||
}),
|
||||
});
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result.llmContent).toContain(
|
||||
'This is a concise answer from the search provider.',
|
||||
);
|
||||
expect(result.llmContent).toContain('Sources:');
|
||||
expect(result.llmContent).toContain(
|
||||
'[1] Result 1 (https://example.com/1)',
|
||||
);
|
||||
expect(result.llmContent).toContain(
|
||||
'[2] Result 2 (https://example.com/2)',
|
||||
);
|
||||
});
|
||||
|
||||
it('should build informative summary when answer is not available', async () => {
|
||||
const webSearchConfig: WebSearchConfig = {
|
||||
provider: [
|
||||
{
|
||||
type: 'google',
|
||||
apiKey: 'test-key',
|
||||
searchEngineId: 'test-engine',
|
||||
},
|
||||
],
|
||||
default: 'google',
|
||||
};
|
||||
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(webSearchConfig);
|
||||
|
||||
// Mock fetch to return search results without answer
|
||||
global.fetch = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
items: [
|
||||
{
|
||||
title: 'Google Result 1',
|
||||
link: 'https://example.com/1',
|
||||
snippet: 'This is a helpful snippet from the first result.',
|
||||
},
|
||||
{
|
||||
title: 'Google Result 2',
|
||||
link: 'https://example.com/2',
|
||||
snippet: 'This is a helpful snippet from the second result.',
|
||||
},
|
||||
],
|
||||
}),
|
||||
});
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
// Should contain formatted results with title, snippet, and source
|
||||
expect(result.llmContent).toContain('1. **Google Result 1**');
|
||||
expect(result.llmContent).toContain(
|
||||
'This is a helpful snippet from the first result.',
|
||||
);
|
||||
expect(result.llmContent).toContain('Source: https://example.com/1');
|
||||
expect(result.llmContent).toContain('2. **Google Result 2**');
|
||||
expect(result.llmContent).toContain(
|
||||
'This is a helpful snippet from the second result.',
|
||||
);
|
||||
expect(result.llmContent).toContain('Source: https://example.com/2');
|
||||
|
||||
// Should include web_fetch hint
|
||||
expect(result.llmContent).toContain('web_fetch tool');
|
||||
});
|
||||
|
||||
it('should include optional fields when available', async () => {
|
||||
const webSearchConfig: WebSearchConfig = {
|
||||
provider: [
|
||||
{
|
||||
type: 'tavily',
|
||||
apiKey: 'test-key',
|
||||
},
|
||||
],
|
||||
default: 'tavily',
|
||||
};
|
||||
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(webSearchConfig);
|
||||
|
||||
// Mock fetch to return results with score and publishedDate
|
||||
global.fetch = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
query: 'test query',
|
||||
results: [
|
||||
{
|
||||
title: 'Result with metadata',
|
||||
url: 'https://example.com',
|
||||
content: 'Content with metadata',
|
||||
score: 0.95,
|
||||
published_date: '2024-01-15',
|
||||
},
|
||||
],
|
||||
}),
|
||||
});
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
// Should include relevance score
|
||||
expect(result.llmContent).toContain('Relevance: 95%');
|
||||
// Should include published date
|
||||
expect(result.llmContent).toContain('Published: 2024-01-15');
|
||||
});
|
||||
|
||||
it('should handle empty results gracefully', async () => {
|
||||
const webSearchConfig: WebSearchConfig = {
|
||||
provider: [
|
||||
{
|
||||
type: 'google',
|
||||
apiKey: 'test-key',
|
||||
searchEngineId: 'test-engine',
|
||||
},
|
||||
],
|
||||
default: 'google',
|
||||
};
|
||||
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(webSearchConfig);
|
||||
|
||||
// Mock fetch to return empty results
|
||||
global.fetch = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
items: [],
|
||||
}),
|
||||
});
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result.llmContent).toContain('No search results found');
|
||||
});
|
||||
|
||||
it('should limit to top 5 results in fallback mode', async () => {
|
||||
const webSearchConfig: WebSearchConfig = {
|
||||
provider: [
|
||||
{
|
||||
type: 'google',
|
||||
apiKey: 'test-key',
|
||||
searchEngineId: 'test-engine',
|
||||
},
|
||||
],
|
||||
default: 'google',
|
||||
};
|
||||
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(webSearchConfig);
|
||||
|
||||
// Mock fetch to return 10 results
|
||||
const items = Array.from({ length: 10 }, (_, i) => ({
|
||||
title: `Result ${i + 1}`,
|
||||
link: `https://example.com/${i + 1}`,
|
||||
snippet: `Snippet ${i + 1}`,
|
||||
}));
|
||||
|
||||
global.fetch = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({ items }),
|
||||
});
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
// Should only contain first 5 results
|
||||
expect(result.llmContent).toContain('1. **Result 1**');
|
||||
expect(result.llmContent).toContain('5. **Result 5**');
|
||||
expect(result.llmContent).not.toContain('6. **Result 6**');
|
||||
expect(result.llmContent).not.toContain('10. **Result 10**');
|
||||
});
|
||||
});
|
||||
|
||||
describe('validation', () => {
|
||||
it('should throw validation error when query is empty', () => {
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
expect(() => tool.build({ query: '' })).toThrow(
|
||||
"The 'query' parameter cannot be empty",
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw validation error when provider is empty string', () => {
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
expect(() => tool.build({ query: 'test', provider: '' })).toThrow(
|
||||
"The 'provider' parameter cannot be empty",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('configuration', () => {
|
||||
it('should return error when web search is not configured', async () => {
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(null);
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result.error?.message).toContain('Web search is disabled');
|
||||
expect(result.llmContent).toContain('Web search is disabled');
|
||||
});
|
||||
|
||||
it('should return descriptive message in getDescription when web search is not configured', () => {
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(null);
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const description = invocation.getDescription();
|
||||
|
||||
expect(description).toBe(
|
||||
' (Web search is disabled - configure a provider in settings.json)',
|
||||
);
|
||||
});
|
||||
|
||||
it('should return provider name in getDescription when web search is configured', () => {
|
||||
const webSearchConfig: WebSearchConfig = {
|
||||
provider: [
|
||||
{
|
||||
type: 'tavily',
|
||||
apiKey: 'test-key',
|
||||
},
|
||||
],
|
||||
default: 'tavily',
|
||||
};
|
||||
|
||||
(
|
||||
mockConfig.getWebSearchConfig as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(webSearchConfig);
|
||||
|
||||
const tool = new WebSearchTool(mockConfig);
|
||||
const invocation = tool.build({ query: 'test query' });
|
||||
const description = invocation.getDescription();
|
||||
|
||||
expect(description).toBe(' (Searching the web via tavily)');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,336 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
BaseDeclarativeTool,
|
||||
BaseToolInvocation,
|
||||
Kind,
|
||||
type ToolInvocation,
|
||||
type ToolCallConfirmationDetails,
|
||||
type ToolInfoConfirmationDetails,
|
||||
ToolConfirmationOutcome,
|
||||
} from '../tools.js';
|
||||
import { ToolErrorType } from '../tool-error.js';
|
||||
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { ApprovalMode } from '../../config/config.js';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { buildContentWithSources } from './utils.js';
|
||||
import { TavilyProvider } from './providers/tavily-provider.js';
|
||||
import { GoogleProvider } from './providers/google-provider.js';
|
||||
import { DashScopeProvider } from './providers/dashscope-provider.js';
|
||||
import type {
|
||||
WebSearchToolParams,
|
||||
WebSearchToolResult,
|
||||
WebSearchProvider,
|
||||
WebSearchResultItem,
|
||||
WebSearchProviderConfig,
|
||||
DashScopeProviderConfig,
|
||||
} from './types.js';
|
||||
|
||||
class WebSearchToolInvocation extends BaseToolInvocation<
|
||||
WebSearchToolParams,
|
||||
WebSearchToolResult
|
||||
> {
|
||||
constructor(
|
||||
private readonly config: Config,
|
||||
params: WebSearchToolParams,
|
||||
) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
override getDescription(): string {
|
||||
const webSearchConfig = this.config.getWebSearchConfig();
|
||||
if (!webSearchConfig) {
|
||||
return ' (Web search is disabled - configure a provider in settings.json)';
|
||||
}
|
||||
const provider = this.params.provider || webSearchConfig.default;
|
||||
return ` (Searching the web via ${provider})`;
|
||||
}
|
||||
|
||||
override async shouldConfirmExecute(
|
||||
_abortSignal: AbortSignal,
|
||||
): Promise<ToolCallConfirmationDetails | false> {
|
||||
if (this.config.getApprovalMode() === ApprovalMode.AUTO_EDIT) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const confirmationDetails: ToolInfoConfirmationDetails = {
|
||||
type: 'info',
|
||||
title: 'Confirm Web Search',
|
||||
prompt: `Search the web for: "${this.params.query}"`,
|
||||
onConfirm: async (outcome: ToolConfirmationOutcome) => {
|
||||
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
|
||||
this.config.setApprovalMode(ApprovalMode.AUTO_EDIT);
|
||||
}
|
||||
},
|
||||
};
|
||||
return confirmationDetails;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a provider instance from configuration.
|
||||
*/
|
||||
private createProvider(config: WebSearchProviderConfig): WebSearchProvider {
|
||||
switch (config.type) {
|
||||
case 'tavily':
|
||||
return new TavilyProvider(config);
|
||||
case 'google':
|
||||
return new GoogleProvider(config);
|
||||
case 'dashscope': {
|
||||
// Pass auth type to DashScope provider for availability check
|
||||
const authType = this.config.getAuthType();
|
||||
const dashscopeConfig: DashScopeProviderConfig = {
|
||||
...config,
|
||||
authType: authType as string | undefined,
|
||||
};
|
||||
return new DashScopeProvider(dashscopeConfig);
|
||||
}
|
||||
default:
|
||||
throw new Error('Unknown provider type');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create all configured providers.
|
||||
*/
|
||||
private createProviders(
|
||||
configs: WebSearchProviderConfig[],
|
||||
): Map<string, WebSearchProvider> {
|
||||
const providers = new Map<string, WebSearchProvider>();
|
||||
|
||||
for (const config of configs) {
|
||||
try {
|
||||
const provider = this.createProvider(config);
|
||||
if (provider.isAvailable()) {
|
||||
providers.set(config.type, provider);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(`Failed to create ${config.type} provider:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
return providers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Select the appropriate provider based on configuration and parameters.
|
||||
* Throws error if provider not found.
|
||||
*/
|
||||
private selectProvider(
|
||||
providers: Map<string, WebSearchProvider>,
|
||||
requestedProvider?: string,
|
||||
defaultProvider?: string,
|
||||
): WebSearchProvider {
|
||||
// Use requested provider if specified
|
||||
if (requestedProvider) {
|
||||
const provider = providers.get(requestedProvider);
|
||||
if (!provider) {
|
||||
const available = Array.from(providers.keys()).join(', ');
|
||||
throw new Error(
|
||||
`The specified provider "${requestedProvider}" is not available. Available: ${available}`,
|
||||
);
|
||||
}
|
||||
return provider;
|
||||
}
|
||||
|
||||
// Use default provider if specified and available
|
||||
if (defaultProvider && providers.has(defaultProvider)) {
|
||||
return providers.get(defaultProvider)!;
|
||||
}
|
||||
|
||||
// Fallback to first available provider
|
||||
const firstProvider = providers.values().next().value;
|
||||
if (!firstProvider) {
|
||||
throw new Error('No web search providers are available.');
|
||||
}
|
||||
return firstProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format search results into a content string.
|
||||
*/
|
||||
private formatSearchResults(searchResult: {
|
||||
answer?: string;
|
||||
results: WebSearchResultItem[];
|
||||
}): {
|
||||
content: string;
|
||||
sources: Array<{ title: string; url: string }>;
|
||||
} {
|
||||
const sources = searchResult.results.map((r) => ({
|
||||
title: r.title,
|
||||
url: r.url,
|
||||
}));
|
||||
|
||||
let content = searchResult.answer?.trim() || '';
|
||||
|
||||
if (!content) {
|
||||
// Fallback: Build an informative summary with title + snippet + source link
|
||||
// This provides enough context for the LLM while keeping token usage efficient
|
||||
content = searchResult.results
|
||||
.slice(0, 5) // Top 5 results
|
||||
.map((r, i) => {
|
||||
const parts = [`${i + 1}. **${r.title}**`];
|
||||
|
||||
// Include snippet/content if available
|
||||
if (r.content?.trim()) {
|
||||
parts.push(` ${r.content.trim()}`);
|
||||
}
|
||||
|
||||
// Always include the source URL
|
||||
parts.push(` Source: ${r.url}`);
|
||||
|
||||
// Optionally include relevance score if available
|
||||
if (r.score !== undefined) {
|
||||
parts.push(` Relevance: ${(r.score * 100).toFixed(0)}%`);
|
||||
}
|
||||
|
||||
// Optionally include publish date if available
|
||||
if (r.publishedDate) {
|
||||
parts.push(` Published: ${r.publishedDate}`);
|
||||
}
|
||||
|
||||
return parts.join('\n');
|
||||
})
|
||||
.join('\n\n');
|
||||
|
||||
// Add a note about using web_fetch for detailed content
|
||||
if (content) {
|
||||
content +=
|
||||
'\n\n*Note: For detailed content from any source above, use the web_fetch tool with the URL.*';
|
||||
}
|
||||
} else {
|
||||
// When answer is available, append sources section
|
||||
content = buildContentWithSources(content, sources);
|
||||
}
|
||||
|
||||
return { content, sources };
|
||||
}
|
||||
|
||||
async execute(signal: AbortSignal): Promise<WebSearchToolResult> {
|
||||
// Check if web search is configured
|
||||
const webSearchConfig = this.config.getWebSearchConfig();
|
||||
if (!webSearchConfig) {
|
||||
return {
|
||||
llmContent:
|
||||
'Web search is disabled. Please configure a web search provider in your settings.',
|
||||
returnDisplay: 'Web search is disabled.',
|
||||
error: {
|
||||
message: 'Web search is disabled',
|
||||
type: ToolErrorType.EXECUTION_FAILED,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
// Create and select provider
|
||||
const providers = this.createProviders(webSearchConfig.provider);
|
||||
const provider = this.selectProvider(
|
||||
providers,
|
||||
this.params.provider,
|
||||
webSearchConfig.default,
|
||||
);
|
||||
|
||||
// Perform search
|
||||
const searchResult = await provider.search(this.params.query, signal);
|
||||
const { content, sources } = this.formatSearchResults(searchResult);
|
||||
|
||||
// Guard: Check if we got results
|
||||
if (!content.trim()) {
|
||||
return {
|
||||
llmContent: `No search results found for query: "${this.params.query}" (via ${provider.name})`,
|
||||
returnDisplay: `No information found for "${this.params.query}".`,
|
||||
};
|
||||
}
|
||||
|
||||
// Success result
|
||||
return {
|
||||
llmContent: `Web search results for "${this.params.query}" (via ${provider.name}):\n\n${content}`,
|
||||
returnDisplay: `Search results for "${this.params.query}".`,
|
||||
sources,
|
||||
};
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = `Error during web search: ${getErrorMessage(error)}`;
|
||||
console.error(errorMessage, error);
|
||||
return {
|
||||
llmContent: errorMessage,
|
||||
returnDisplay: 'Error performing web search.',
|
||||
error: {
|
||||
message: errorMessage,
|
||||
type: ToolErrorType.EXECUTION_FAILED,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A tool to perform web searches using configurable providers.
|
||||
*/
|
||||
export class WebSearchTool extends BaseDeclarativeTool<
|
||||
WebSearchToolParams,
|
||||
WebSearchToolResult
|
||||
> {
|
||||
static readonly Name: string = 'web_search';
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
WebSearchTool.Name,
|
||||
'WebSearch',
|
||||
'Allows searching the web and using results to inform responses. Provides up-to-date information for current events and recent data beyond the training data cutoff. Returns search results formatted with concise answers and source links. Use this tool when accessing information that may be outdated or beyond the knowledge cutoff.',
|
||||
Kind.Search,
|
||||
{
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: {
|
||||
type: 'string',
|
||||
description: 'The search query to find information on the web.',
|
||||
},
|
||||
provider: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Optional provider to use for the search (e.g., "tavily", "google", "dashscope"). IMPORTANT: Only specify this parameter if you explicitly know which provider to use. Otherwise, omit this parameter entirely and let the system automatically select the appropriate provider based on availability and configuration. The system will choose the best available provider automatically.',
|
||||
},
|
||||
},
|
||||
required: ['query'],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the parameters for the WebSearchTool.
|
||||
* @param params The parameters to validate
|
||||
* @returns An error message string if validation fails, null if valid
|
||||
*/
|
||||
protected override validateToolParamValues(
|
||||
params: WebSearchToolParams,
|
||||
): string | null {
|
||||
if (!params.query || params.query.trim() === '') {
|
||||
return "The 'query' parameter cannot be empty.";
|
||||
}
|
||||
|
||||
// Validate provider parameter if provided
|
||||
if (params.provider !== undefined && params.provider.trim() === '') {
|
||||
return "The 'provider' parameter cannot be empty if specified.";
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
protected createInvocation(
|
||||
params: WebSearchToolParams,
|
||||
): ToolInvocation<WebSearchToolParams, WebSearchToolResult> {
|
||||
return new WebSearchToolInvocation(this.config, params);
|
||||
}
|
||||
}
|
||||
|
||||
// Re-export types for external use
|
||||
export type {
|
||||
WebSearchToolParams,
|
||||
WebSearchToolResult,
|
||||
WebSearchConfig,
|
||||
WebSearchProviderConfig,
|
||||
} from './types.js';
|
||||
@@ -1,199 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { promises as fs } from 'node:fs';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
import { BaseWebSearchProvider } from '../base-provider.js';
|
||||
import type {
|
||||
WebSearchResult,
|
||||
WebSearchResultItem,
|
||||
DashScopeProviderConfig,
|
||||
} from '../types.js';
|
||||
import type { QwenCredentials } from '../../../qwen/qwenOAuth2.js';
|
||||
|
||||
interface DashScopeSearchItem {
|
||||
_id: string;
|
||||
snippet: string;
|
||||
title: string;
|
||||
url: string;
|
||||
timestamp: number;
|
||||
timestamp_format: string;
|
||||
hostname: string;
|
||||
hostlogo?: string;
|
||||
web_main_body?: string;
|
||||
_score?: number;
|
||||
}
|
||||
|
||||
interface DashScopeSearchResponse {
|
||||
headers: Record<string, unknown>;
|
||||
rid: string;
|
||||
status: number;
|
||||
message: string | null;
|
||||
data: {
|
||||
total: number;
|
||||
totalDistinct: number;
|
||||
docs: DashScopeSearchItem[];
|
||||
keywords?: string[];
|
||||
qpInfos?: Array<{
|
||||
query: string;
|
||||
cleanQuery: string;
|
||||
sensitive: boolean;
|
||||
spellchecked: string;
|
||||
spellcheck: boolean;
|
||||
tokenized: string[];
|
||||
stopWords: string[];
|
||||
synonymWords: string[];
|
||||
recognitions: unknown[];
|
||||
rewrite: string;
|
||||
operator: string;
|
||||
}>;
|
||||
aggs?: unknown;
|
||||
extras?: Record<string, unknown>;
|
||||
};
|
||||
debug?: unknown;
|
||||
success: boolean;
|
||||
}
|
||||
|
||||
// File System Configuration
|
||||
const QWEN_DIR = '.qwen';
|
||||
const QWEN_CREDENTIAL_FILENAME = 'oauth_creds.json';
|
||||
|
||||
/**
|
||||
* Get the path to the cached OAuth credentials file.
|
||||
*/
|
||||
function getQwenCachedCredentialPath(): string {
|
||||
return path.join(os.homedir(), QWEN_DIR, QWEN_CREDENTIAL_FILENAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load cached Qwen OAuth credentials from disk.
|
||||
*/
|
||||
async function loadQwenCredentials(): Promise<QwenCredentials | null> {
|
||||
try {
|
||||
const keyFile = getQwenCachedCredentialPath();
|
||||
const creds = await fs.readFile(keyFile, 'utf-8');
|
||||
return JSON.parse(creds) as QwenCredentials;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Web search provider using Alibaba Cloud DashScope API.
|
||||
*/
|
||||
export class DashScopeProvider extends BaseWebSearchProvider {
|
||||
readonly name = 'DashScope';
|
||||
|
||||
constructor(private readonly config: DashScopeProviderConfig) {
|
||||
super();
|
||||
}
|
||||
|
||||
isAvailable(): boolean {
|
||||
// DashScope provider is only available when auth type is QWEN_OAUTH
|
||||
// This ensures it's only used when OAuth credentials are available
|
||||
return this.config.authType === 'qwen-oauth';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the access token and API endpoint for authentication and web search.
|
||||
* Tries OAuth credentials first, falls back to apiKey if OAuth is not available.
|
||||
* Returns both token and endpoint to avoid loading credentials multiple times.
|
||||
*/
|
||||
private async getAuthConfig(): Promise<{
|
||||
accessToken: string | null;
|
||||
apiEndpoint: string;
|
||||
}> {
|
||||
// Load credentials once
|
||||
const credentials = await loadQwenCredentials();
|
||||
|
||||
// Get access token: try OAuth credentials first, fallback to apiKey
|
||||
let accessToken: string | null = null;
|
||||
if (credentials?.access_token) {
|
||||
// Check if token is not expired
|
||||
if (credentials.expiry_date && credentials.expiry_date > Date.now()) {
|
||||
accessToken = credentials.access_token;
|
||||
}
|
||||
}
|
||||
if (!accessToken) {
|
||||
accessToken = this.config.apiKey || null;
|
||||
}
|
||||
|
||||
// Get API endpoint: use resource_url from credentials
|
||||
if (!credentials?.resource_url) {
|
||||
throw new Error(
|
||||
'No resource_url found in credentials. Please authenticate using OAuth',
|
||||
);
|
||||
}
|
||||
|
||||
// Normalize the URL: add protocol if missing
|
||||
const baseUrl = credentials.resource_url.startsWith('http')
|
||||
? credentials.resource_url
|
||||
: `https://${credentials.resource_url}`;
|
||||
// Remove trailing slash if present
|
||||
const normalizedBaseUrl = baseUrl.replace(/\/$/, '');
|
||||
const apiEndpoint = `${normalizedBaseUrl}/api/v1/indices/plugin/web_search`;
|
||||
|
||||
return { accessToken, apiEndpoint };
|
||||
}
|
||||
|
||||
protected async performSearch(
|
||||
query: string,
|
||||
signal: AbortSignal,
|
||||
): Promise<WebSearchResult> {
|
||||
// Get access token and API endpoint (loads credentials once)
|
||||
const { accessToken, apiEndpoint } = await this.getAuthConfig();
|
||||
if (!accessToken) {
|
||||
throw new Error(
|
||||
'No access token available. Please authenticate using OAuth',
|
||||
);
|
||||
}
|
||||
|
||||
const requestBody = {
|
||||
uq: query,
|
||||
page: 1,
|
||||
rows: this.config.maxResults || 10,
|
||||
};
|
||||
|
||||
const response = await fetch(apiEndpoint, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
},
|
||||
body: JSON.stringify(requestBody),
|
||||
signal,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text().catch(() => '');
|
||||
throw new Error(
|
||||
`API error: ${response.status} ${response.statusText}${text ? ` - ${text}` : ''}`,
|
||||
);
|
||||
}
|
||||
|
||||
const data = (await response.json()) as DashScopeSearchResponse;
|
||||
|
||||
if (data.status !== 0) {
|
||||
throw new Error(`API error: ${data.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
const results: WebSearchResultItem[] = (data.data?.docs || []).map(
|
||||
(item) => ({
|
||||
title: item.title,
|
||||
url: item.url,
|
||||
content: item.snippet,
|
||||
score: item._score,
|
||||
publishedDate: item.timestamp_format,
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
query,
|
||||
results,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { BaseWebSearchProvider } from '../base-provider.js';
|
||||
import type {
|
||||
WebSearchResult,
|
||||
WebSearchResultItem,
|
||||
GoogleProviderConfig,
|
||||
} from '../types.js';
|
||||
|
||||
interface GoogleSearchItem {
|
||||
title: string;
|
||||
link: string;
|
||||
snippet?: string;
|
||||
displayLink?: string;
|
||||
formattedUrl?: string;
|
||||
}
|
||||
|
||||
interface GoogleSearchResponse {
|
||||
items?: GoogleSearchItem[];
|
||||
searchInformation?: {
|
||||
totalResults?: string;
|
||||
searchTime?: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Web search provider using Google Custom Search API.
|
||||
*/
|
||||
export class GoogleProvider extends BaseWebSearchProvider {
|
||||
readonly name = 'Google';
|
||||
|
||||
constructor(private readonly config: GoogleProviderConfig) {
|
||||
super();
|
||||
}
|
||||
|
||||
isAvailable(): boolean {
|
||||
return !!(this.config.apiKey && this.config.searchEngineId);
|
||||
}
|
||||
|
||||
protected async performSearch(
|
||||
query: string,
|
||||
signal: AbortSignal,
|
||||
): Promise<WebSearchResult> {
|
||||
const params = new URLSearchParams({
|
||||
key: this.config.apiKey!,
|
||||
cx: this.config.searchEngineId!,
|
||||
q: query,
|
||||
num: String(this.config.maxResults || 10),
|
||||
safe: this.config.safeSearch || 'medium',
|
||||
});
|
||||
|
||||
if (this.config.language) {
|
||||
params.append('lr', `lang_${this.config.language}`);
|
||||
}
|
||||
|
||||
if (this.config.country) {
|
||||
params.append('cr', `country${this.config.country}`);
|
||||
}
|
||||
|
||||
const url = `https://www.googleapis.com/customsearch/v1?${params.toString()}`;
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
signal,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text().catch(() => '');
|
||||
throw new Error(
|
||||
`API error: ${response.status} ${response.statusText}${text ? ` - ${text}` : ''}`,
|
||||
);
|
||||
}
|
||||
|
||||
const data = (await response.json()) as GoogleSearchResponse;
|
||||
|
||||
const results: WebSearchResultItem[] = (data.items || []).map((item) => ({
|
||||
title: item.title,
|
||||
url: item.link,
|
||||
content: item.snippet,
|
||||
}));
|
||||
|
||||
return {
|
||||
query,
|
||||
results,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { BaseWebSearchProvider } from '../base-provider.js';
|
||||
import type {
|
||||
WebSearchResult,
|
||||
WebSearchResultItem,
|
||||
TavilyProviderConfig,
|
||||
} from '../types.js';
|
||||
|
||||
interface TavilyResultItem {
|
||||
title: string;
|
||||
url: string;
|
||||
content?: string;
|
||||
score?: number;
|
||||
published_date?: string;
|
||||
}
|
||||
|
||||
interface TavilySearchResponse {
|
||||
query: string;
|
||||
answer?: string;
|
||||
results: TavilyResultItem[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Web search provider using Tavily API.
|
||||
*/
|
||||
export class TavilyProvider extends BaseWebSearchProvider {
|
||||
readonly name = 'Tavily';
|
||||
|
||||
constructor(private readonly config: TavilyProviderConfig) {
|
||||
super();
|
||||
}
|
||||
|
||||
isAvailable(): boolean {
|
||||
return !!this.config.apiKey;
|
||||
}
|
||||
|
||||
protected async performSearch(
|
||||
query: string,
|
||||
signal: AbortSignal,
|
||||
): Promise<WebSearchResult> {
|
||||
const response = await fetch('https://api.tavily.com/search', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
api_key: this.config.apiKey,
|
||||
query,
|
||||
search_depth: this.config.searchDepth || 'advanced',
|
||||
max_results: this.config.maxResults || 5,
|
||||
include_answer: this.config.includeAnswer !== false,
|
||||
}),
|
||||
signal,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text().catch(() => '');
|
||||
throw new Error(
|
||||
`API error: ${response.status} ${response.statusText}${text ? ` - ${text}` : ''}`,
|
||||
);
|
||||
}
|
||||
|
||||
const data = (await response.json()) as TavilySearchResponse;
|
||||
|
||||
const results: WebSearchResultItem[] = (data.results || []).map((r) => ({
|
||||
title: r.title,
|
||||
url: r.url,
|
||||
content: r.content,
|
||||
score: r.score,
|
||||
publishedDate: r.published_date,
|
||||
}));
|
||||
|
||||
return {
|
||||
query,
|
||||
answer: data.answer?.trim(),
|
||||
results,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { ToolResult } from '../tools.js';
|
||||
|
||||
/**
|
||||
* Common interface for all web search providers.
|
||||
*/
|
||||
export interface WebSearchProvider {
|
||||
/**
|
||||
* The name of the provider.
|
||||
*/
|
||||
readonly name: string;
|
||||
|
||||
/**
|
||||
* Whether the provider is available (has required configuration).
|
||||
*/
|
||||
isAvailable(): boolean;
|
||||
|
||||
/**
|
||||
* Perform a web search with the given query.
|
||||
* @param query The search query
|
||||
* @param signal Abort signal for cancellation
|
||||
* @returns Promise resolving to search results
|
||||
*/
|
||||
search(query: string, signal: AbortSignal): Promise<WebSearchResult>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result item from a web search.
|
||||
*/
|
||||
export interface WebSearchResultItem {
|
||||
title: string;
|
||||
url: string;
|
||||
content?: string;
|
||||
score?: number;
|
||||
publishedDate?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from a web search operation.
|
||||
*/
|
||||
export interface WebSearchResult {
|
||||
/**
|
||||
* The search query that was executed.
|
||||
*/
|
||||
query: string;
|
||||
|
||||
/**
|
||||
* A concise answer if available from the provider.
|
||||
*/
|
||||
answer?: string;
|
||||
|
||||
/**
|
||||
* List of search result items.
|
||||
*/
|
||||
results: WebSearchResultItem[];
|
||||
|
||||
/**
|
||||
* Provider-specific metadata.
|
||||
*/
|
||||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extended tool result that includes sources for web search.
|
||||
*/
|
||||
export interface WebSearchToolResult extends ToolResult {
|
||||
sources?: Array<{ title: string; url: string }>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for the WebSearchTool.
|
||||
*/
|
||||
export interface WebSearchToolParams {
|
||||
/**
|
||||
* The search query.
|
||||
*/
|
||||
query: string;
|
||||
|
||||
/**
|
||||
* Optional provider to use for the search.
|
||||
* If not specified, the default provider will be used.
|
||||
*/
|
||||
provider?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for web search providers.
|
||||
*/
|
||||
export interface WebSearchConfig {
|
||||
/**
|
||||
* List of available providers with their configurations.
|
||||
*/
|
||||
provider: WebSearchProviderConfig[];
|
||||
|
||||
/**
|
||||
* The default provider to use.
|
||||
*/
|
||||
default: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Base configuration for Tavily provider.
|
||||
*/
|
||||
export interface TavilyProviderConfig {
|
||||
type: 'tavily';
|
||||
apiKey?: string;
|
||||
searchDepth?: 'basic' | 'advanced';
|
||||
maxResults?: number;
|
||||
includeAnswer?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Base configuration for Google provider.
|
||||
*/
|
||||
export interface GoogleProviderConfig {
|
||||
type: 'google';
|
||||
apiKey?: string;
|
||||
searchEngineId?: string;
|
||||
maxResults?: number;
|
||||
safeSearch?: 'off' | 'medium' | 'high';
|
||||
language?: string;
|
||||
country?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Base configuration for DashScope provider.
|
||||
*/
|
||||
export interface DashScopeProviderConfig {
|
||||
type: 'dashscope';
|
||||
apiKey?: string;
|
||||
uid?: string;
|
||||
appId?: string;
|
||||
maxResults?: number;
|
||||
scene?: string;
|
||||
timeout?: number;
|
||||
/**
|
||||
* Optional auth type to determine provider availability.
|
||||
* If set to 'qwen-oauth', the provider will be available.
|
||||
* If set to other values or undefined, the provider will check auth type dynamically.
|
||||
*/
|
||||
authType?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Discriminated union type for web search provider configurations.
|
||||
* This ensures type safety when working with different provider configs.
|
||||
*/
|
||||
export type WebSearchProviderConfig =
|
||||
| TavilyProviderConfig
|
||||
| GoogleProviderConfig
|
||||
| DashScopeProviderConfig;
|
||||
@@ -1,42 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Utility functions for web search formatting and processing.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Build content string with appended sources section.
|
||||
* @param content Main content text
|
||||
* @param sources Array of source objects
|
||||
* @returns Combined content with sources
|
||||
*/
|
||||
export function buildContentWithSources(
|
||||
content: string,
|
||||
sources: Array<{ title: string; url: string }>,
|
||||
): string {
|
||||
if (!sources.length) return content;
|
||||
const sourceList = sources
|
||||
.map((s, i) => `[${i + 1}] ${s.title || 'Untitled'} (${s.url})`)
|
||||
.join('\n');
|
||||
return `${content}\n\nSources:\n${sourceList}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a concise summary from top search results.
|
||||
* @param sources Array of source objects
|
||||
* @param maxResults Maximum number of results to include
|
||||
* @returns Concise summary string
|
||||
*/
|
||||
export function buildSummary(
|
||||
sources: Array<{ title: string; url: string }>,
|
||||
maxResults: number = 3,
|
||||
): string {
|
||||
return sources
|
||||
.slice(0, maxResults)
|
||||
.map((s, i) => `${i + 1}. ${s.title} - ${s.url}`)
|
||||
.join('\n');
|
||||
}
|
||||
@@ -339,7 +339,6 @@ describe('editor utils', () => {
|
||||
diffCommand.args,
|
||||
{
|
||||
stdio: 'inherit',
|
||||
shell: process.platform === 'win32',
|
||||
},
|
||||
);
|
||||
expect(mockSpawnOn).toHaveBeenCalledWith('close', expect.any(Function));
|
||||
|
||||
@@ -195,7 +195,6 @@ export async function openDiff(
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const childProcess = spawn(diffCommand.command, diffCommand.args, {
|
||||
stdio: 'inherit',
|
||||
shell: process.platform === 'win32',
|
||||
});
|
||||
|
||||
childProcess.on('close', (code) => {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Content, Part } from '@google/genai';
|
||||
import type { Part } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { getFolderStructure } from './getFolderStructure.js';
|
||||
|
||||
@@ -107,23 +107,3 @@ ${directoryContext}
|
||||
|
||||
return initialParts;
|
||||
}
|
||||
|
||||
export async function getInitialChatHistory(
|
||||
config: Config,
|
||||
extraHistory?: Content[],
|
||||
): Promise<Content[]> {
|
||||
const envParts = await getEnvironmentContext(config);
|
||||
const envContextString = envParts.map((part) => part.text || '').join('\n\n');
|
||||
|
||||
return [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: envContextString }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,381 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import * as path from 'node:path';
|
||||
import * as os from 'os';
|
||||
import { promises as fs } from 'node:fs';
|
||||
import { OpenAILogger } from './openaiLogger.js';
|
||||
|
||||
describe('OpenAILogger', () => {
|
||||
let originalCwd: string;
|
||||
let testTempDir: string;
|
||||
const createdDirs: string[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
originalCwd = process.cwd();
|
||||
testTempDir = path.join(os.tmpdir(), `openai-logger-test-${Date.now()}`);
|
||||
createdDirs.length = 0; // Clear array
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Clean up all created directories
|
||||
const cleanupPromises = [
|
||||
testTempDir,
|
||||
...createdDirs,
|
||||
path.resolve(process.cwd(), 'relative-logs'),
|
||||
path.resolve(process.cwd(), 'custom-logs'),
|
||||
path.resolve(process.cwd(), 'test-relative-logs'),
|
||||
path.join(os.homedir(), 'custom-logs'),
|
||||
path.join(os.homedir(), 'test-openai-logs'),
|
||||
].map(async (dir) => {
|
||||
try {
|
||||
await fs.rm(dir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.all(cleanupPromises);
|
||||
process.chdir(originalCwd);
|
||||
});
|
||||
|
||||
describe('constructor', () => {
|
||||
it('should use default directory when no custom directory is provided', () => {
|
||||
const logger = new OpenAILogger();
|
||||
// We can't directly access private logDir, but we can verify behavior
|
||||
expect(logger).toBeInstanceOf(OpenAILogger);
|
||||
});
|
||||
|
||||
it('should accept absolute path as custom directory', () => {
|
||||
const customDir = '/absolute/path/to/logs';
|
||||
const logger = new OpenAILogger(customDir);
|
||||
expect(logger).toBeInstanceOf(OpenAILogger);
|
||||
});
|
||||
|
||||
it('should resolve relative path to absolute path', async () => {
|
||||
const relativeDir = 'custom-logs';
|
||||
const logger = new OpenAILogger(relativeDir);
|
||||
const expectedDir = path.resolve(process.cwd(), relativeDir);
|
||||
createdDirs.push(expectedDir);
|
||||
expect(logger).toBeInstanceOf(OpenAILogger);
|
||||
});
|
||||
|
||||
it('should expand ~ to home directory', () => {
|
||||
const customDir = '~/custom-logs';
|
||||
const logger = new OpenAILogger(customDir);
|
||||
expect(logger).toBeInstanceOf(OpenAILogger);
|
||||
});
|
||||
|
||||
it('should expand ~/ to home directory', () => {
|
||||
const customDir = '~/custom-logs';
|
||||
const logger = new OpenAILogger(customDir);
|
||||
expect(logger).toBeInstanceOf(OpenAILogger);
|
||||
});
|
||||
|
||||
it('should handle just ~ as home directory', () => {
|
||||
const customDir = '~';
|
||||
const logger = new OpenAILogger(customDir);
|
||||
expect(logger).toBeInstanceOf(OpenAILogger);
|
||||
});
|
||||
});
|
||||
|
||||
describe('initialize', () => {
|
||||
it('should create directory if it does not exist', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
const dirExists = await fs
|
||||
.access(testTempDir)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(dirExists).toBe(true);
|
||||
});
|
||||
|
||||
it('should create nested directories recursively', async () => {
|
||||
const nestedDir = path.join(testTempDir, 'nested', 'deep', 'path');
|
||||
const logger = new OpenAILogger(nestedDir);
|
||||
await logger.initialize();
|
||||
|
||||
const dirExists = await fs
|
||||
.access(nestedDir)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(dirExists).toBe(true);
|
||||
});
|
||||
|
||||
it('should not throw if directory already exists', async () => {
|
||||
await fs.mkdir(testTempDir, { recursive: true });
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await expect(logger.initialize()).resolves.not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('logInteraction', () => {
|
||||
it('should create log file with correct format', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const response = { id: 'test-id', choices: [] };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
|
||||
expect(logPath).toContain(testTempDir);
|
||||
expect(logPath).toMatch(
|
||||
/openai-\d{4}-\d{2}-\d{2}T\d{2}-\d{2}-\d{2}\.\d{3}Z-[a-f0-9]{8}\.json/,
|
||||
);
|
||||
|
||||
const fileExists = await fs
|
||||
.access(logPath)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(fileExists).toBe(true);
|
||||
});
|
||||
|
||||
it('should write correct log data structure', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const response = { id: 'test-id', choices: [] };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
const logContent = JSON.parse(await fs.readFile(logPath, 'utf-8'));
|
||||
|
||||
expect(logContent).toHaveProperty('timestamp');
|
||||
expect(logContent).toHaveProperty('request', request);
|
||||
expect(logContent).toHaveProperty('response', response);
|
||||
expect(logContent).toHaveProperty('error', null);
|
||||
expect(logContent).toHaveProperty('system');
|
||||
expect(logContent.system).toHaveProperty('hostname');
|
||||
expect(logContent.system).toHaveProperty('platform');
|
||||
expect(logContent.system).toHaveProperty('release');
|
||||
expect(logContent.system).toHaveProperty('nodeVersion');
|
||||
});
|
||||
|
||||
it('should log error when provided', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const error = new Error('Test error');
|
||||
|
||||
const logPath = await logger.logInteraction(request, undefined, error);
|
||||
const logContent = JSON.parse(await fs.readFile(logPath, 'utf-8'));
|
||||
|
||||
expect(logContent).toHaveProperty('error');
|
||||
expect(logContent.error).toHaveProperty('message', 'Test error');
|
||||
expect(logContent.error).toHaveProperty('stack');
|
||||
expect(logContent.response).toBeNull();
|
||||
});
|
||||
|
||||
it('should use custom directory when provided', async () => {
|
||||
const customDir = path.join(testTempDir, 'custom-logs');
|
||||
const logger = new OpenAILogger(customDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const response = { id: 'test-id', choices: [] };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
|
||||
expect(logPath).toContain(customDir);
|
||||
expect(logPath.startsWith(customDir)).toBe(true);
|
||||
});
|
||||
|
||||
it('should resolve relative path correctly', async () => {
|
||||
const relativeDir = 'relative-logs';
|
||||
const logger = new OpenAILogger(relativeDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const response = { id: 'test-id', choices: [] };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
const expectedDir = path.resolve(process.cwd(), relativeDir);
|
||||
createdDirs.push(expectedDir);
|
||||
|
||||
expect(logPath).toContain(expectedDir);
|
||||
});
|
||||
|
||||
it('should expand ~ correctly', async () => {
|
||||
const customDir = '~/test-openai-logs';
|
||||
const logger = new OpenAILogger(customDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const response = { id: 'test-id', choices: [] };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
const expectedDir = path.join(os.homedir(), 'test-openai-logs');
|
||||
createdDirs.push(expectedDir);
|
||||
|
||||
expect(logPath).toContain(expectedDir);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getLogFiles', () => {
|
||||
it('should return empty array when directory does not exist', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
const files = await logger.getLogFiles();
|
||||
expect(files).toEqual([]);
|
||||
});
|
||||
|
||||
it('should return log files after initialization', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const response = { id: 'test-id', choices: [] };
|
||||
|
||||
await logger.logInteraction(request, response);
|
||||
const files = await logger.getLogFiles();
|
||||
|
||||
expect(files.length).toBeGreaterThan(0);
|
||||
expect(files[0]).toMatch(/openai-.*\.json$/);
|
||||
});
|
||||
|
||||
it('should return only log files matching pattern', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
// Create a log file
|
||||
await logger.logInteraction({ test: 'request' }, { test: 'response' });
|
||||
|
||||
// Create a non-log file
|
||||
await fs.writeFile(path.join(testTempDir, 'other-file.txt'), 'content');
|
||||
|
||||
const files = await logger.getLogFiles();
|
||||
expect(files.length).toBe(1);
|
||||
expect(files[0]).toMatch(/openai-.*\.json$/);
|
||||
});
|
||||
|
||||
it('should respect limit parameter', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
// Create multiple log files
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await logger.logInteraction(
|
||||
{ test: `request-${i}` },
|
||||
{ test: `response-${i}` },
|
||||
);
|
||||
// Small delay to ensure different timestamps
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
}
|
||||
|
||||
const allFiles = await logger.getLogFiles();
|
||||
expect(allFiles.length).toBe(5);
|
||||
|
||||
const limitedFiles = await logger.getLogFiles(3);
|
||||
expect(limitedFiles.length).toBe(3);
|
||||
});
|
||||
|
||||
it('should return files sorted by most recent first', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
const files: string[] = [];
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const logPath = await logger.logInteraction(
|
||||
{ test: `request-${i}` },
|
||||
{ test: `response-${i}` },
|
||||
);
|
||||
files.push(logPath);
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
}
|
||||
|
||||
const retrievedFiles = await logger.getLogFiles();
|
||||
expect(retrievedFiles[0]).toBe(files[2]); // Most recent first
|
||||
expect(retrievedFiles[1]).toBe(files[1]);
|
||||
expect(retrievedFiles[2]).toBe(files[0]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('readLogFile', () => {
|
||||
it('should read and parse log file correctly', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
await logger.initialize();
|
||||
|
||||
const request = {
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
};
|
||||
const response = { id: 'test-id', choices: [] };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
const logData = await logger.readLogFile(logPath);
|
||||
|
||||
expect(logData).toHaveProperty('timestamp');
|
||||
expect(logData).toHaveProperty('request', request);
|
||||
expect(logData).toHaveProperty('response', response);
|
||||
});
|
||||
|
||||
it('should throw error when file does not exist', async () => {
|
||||
const logger = new OpenAILogger(testTempDir);
|
||||
const nonExistentPath = path.join(testTempDir, 'non-existent.json');
|
||||
|
||||
await expect(logger.readLogFile(nonExistentPath)).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('path resolution', () => {
|
||||
it('should normalize absolute paths', () => {
|
||||
const absolutePath = '/tmp/test/logs';
|
||||
const logger = new OpenAILogger(absolutePath);
|
||||
expect(logger).toBeInstanceOf(OpenAILogger);
|
||||
});
|
||||
|
||||
it('should resolve relative paths based on current working directory', async () => {
|
||||
const relativePath = 'test-relative-logs';
|
||||
const logger = new OpenAILogger(relativePath);
|
||||
await logger.initialize();
|
||||
|
||||
const request = { test: 'request' };
|
||||
const response = { test: 'response' };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
const expectedBaseDir = path.resolve(process.cwd(), relativePath);
|
||||
createdDirs.push(expectedBaseDir);
|
||||
|
||||
expect(logPath).toContain(expectedBaseDir);
|
||||
});
|
||||
|
||||
it('should handle paths with special characters', async () => {
|
||||
const specialPath = path.join(testTempDir, 'logs-with-special-chars');
|
||||
const logger = new OpenAILogger(specialPath);
|
||||
await logger.initialize();
|
||||
|
||||
const request = { test: 'request' };
|
||||
const response = { test: 'response' };
|
||||
|
||||
const logPath = await logger.logInteraction(request, response);
|
||||
expect(logPath).toContain(specialPath);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -18,23 +18,10 @@ export class OpenAILogger {
|
||||
|
||||
/**
|
||||
* Creates a new OpenAI logger
|
||||
* @param customLogDir Optional custom log directory path (supports relative paths, absolute paths, and ~ expansion)
|
||||
* @param customLogDir Optional custom log directory path
|
||||
*/
|
||||
constructor(customLogDir?: string) {
|
||||
if (customLogDir) {
|
||||
// Resolve relative paths to absolute paths
|
||||
// Handle ~ expansion
|
||||
let resolvedPath = customLogDir;
|
||||
if (customLogDir === '~' || customLogDir.startsWith('~/')) {
|
||||
resolvedPath = path.join(os.homedir(), customLogDir.slice(1));
|
||||
} else if (!path.isAbsolute(customLogDir)) {
|
||||
// If it's a relative path, resolve it relative to current working directory
|
||||
resolvedPath = path.resolve(process.cwd(), customLogDir);
|
||||
}
|
||||
this.logDir = path.normalize(resolvedPath);
|
||||
} else {
|
||||
this.logDir = path.join(process.cwd(), 'logs', 'openai');
|
||||
}
|
||||
this.logDir = customLogDir || path.join(process.cwd(), 'logs', 'openai');
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -152,16 +152,7 @@ describe('ripgrepUtils', () => {
|
||||
});
|
||||
|
||||
describe('canUseRipgrep', () => {
|
||||
it('should return true if ripgrep binary exists (builtin)', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
|
||||
const result = await canUseRipgrep(true);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should return true if ripgrep binary exists (default)', async () => {
|
||||
it('should return true if ripgrep binary exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
@@ -170,26 +161,15 @@ describe('ripgrepUtils', () => {
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should fall back to system rg if bundled ripgrep binary does not exist', async () => {
|
||||
it('should return false if ripgrep binary does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
// When useBuiltin is true but bundled binary doesn't exist,
|
||||
// it should fall back to checking system rg (which will spawn a process)
|
||||
// In this test environment, system rg is likely available, so result should be true
|
||||
// unless spawn fails
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
// The test may pass or fail depending on system rg availability
|
||||
// Just verify that fileExists was called to check bundled binary first
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
// Result depends on whether system rg is installed
|
||||
expect(typeof result).toBe('boolean');
|
||||
});
|
||||
|
||||
// Note: Tests for system ripgrep detection (useBuiltin=false) would require mocking
|
||||
// the child_process spawn function, which is complex in ESM. These cases are tested
|
||||
// indirectly through integration tests.
|
||||
|
||||
it('should return false if platform is unsupported', async () => {
|
||||
const originalPlatform = process.platform;
|
||||
|
||||
|
||||
@@ -85,31 +85,13 @@ export function getRipgrepPath(): string {
|
||||
|
||||
/**
|
||||
* Checks if ripgrep binary is available
|
||||
* @param useBuiltin If true, tries bundled ripgrep first, then falls back to system ripgrep.
|
||||
* If false, only checks for system ripgrep.
|
||||
*/
|
||||
export async function canUseRipgrep(
|
||||
useBuiltin: boolean = true,
|
||||
): Promise<boolean> {
|
||||
export async function canUseRipgrep(): Promise<boolean> {
|
||||
try {
|
||||
if (useBuiltin) {
|
||||
// Try bundled ripgrep first
|
||||
const rgPath = getRipgrepPath();
|
||||
if (await fileExists(rgPath)) {
|
||||
return true;
|
||||
}
|
||||
// Fallback to system rg if bundled binary is not available
|
||||
}
|
||||
|
||||
// Check for system ripgrep by trying to spawn 'rg --version'
|
||||
const { spawn } = await import('node:child_process');
|
||||
return await new Promise<boolean>((resolve) => {
|
||||
const proc = spawn('rg', ['--version']);
|
||||
proc.on('error', () => resolve(false));
|
||||
proc.on('exit', (code) => resolve(code === 0));
|
||||
});
|
||||
const rgPath = getRipgrepPath();
|
||||
return await fileExists(rgPath);
|
||||
} catch (_error) {
|
||||
// Unsupported platform/arch or other error
|
||||
// Unsupported platform/arch
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
{
|
||||
"name": "@qwen-code/sdk-typescript",
|
||||
"version": "0.1.0",
|
||||
"description": "TypeScript SDK for programmatic access to qwen-code CLI",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.js"
|
||||
},
|
||||
"./package.json": "./package.json"
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"README.md",
|
||||
"LICENSE"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"lint": "eslint src test",
|
||||
"lint:fix": "eslint src test --fix",
|
||||
"clean": "rm -rf dist",
|
||||
"prepublishOnly": "npm run clean && npm run build"
|
||||
},
|
||||
"keywords": [
|
||||
"qwen",
|
||||
"qwen-code",
|
||||
"ai",
|
||||
"code-assistant",
|
||||
"sdk",
|
||||
"typescript"
|
||||
],
|
||||
"author": "Qwen Team",
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.0.4",
|
||||
"@qwen-code/qwen-code": "file:../../cli"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.14.0",
|
||||
"@typescript-eslint/eslint-plugin": "^7.13.0",
|
||||
"@typescript-eslint/parser": "^7.13.0",
|
||||
"@vitest/coverage-v8": "^1.6.0",
|
||||
"eslint": "^8.57.0",
|
||||
"typescript": "^5.4.5",
|
||||
"vitest": "^1.6.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"typescript": ">=5.0.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/qwen-ai/qwen-code.git",
|
||||
"directory": "packages/sdk/typescript"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/qwen-ai/qwen-code/issues"
|
||||
},
|
||||
"homepage": "https://github.com/qwen-ai/qwen-code#readme"
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
/**
|
||||
* TypeScript SDK for programmatic access to qwen-code CLI
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* import { query } from '@qwen-code/sdk-typescript';
|
||||
*
|
||||
* const q = query({
|
||||
* prompt: 'What files are in this directory?',
|
||||
* options: { cwd: process.cwd() },
|
||||
* });
|
||||
*
|
||||
* for await (const message of q) {
|
||||
* if (message.type === 'assistant') {
|
||||
* console.log(message.message.content);
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* await q.close();
|
||||
* ```
|
||||
*/
|
||||
|
||||
// Main API
|
||||
export { query } from './query/createQuery.js';
|
||||
|
||||
/** @deprecated Use query() instead */
|
||||
export { createQuery } from './query/createQuery.js';
|
||||
|
||||
export { Query } from './query/Query.js';
|
||||
|
||||
// Configuration types
|
||||
export type {
|
||||
CreateQueryOptions,
|
||||
PermissionMode,
|
||||
PermissionCallback,
|
||||
ExternalMcpServerConfig,
|
||||
TransportOptions,
|
||||
} from './types/config.js';
|
||||
|
||||
export type { QueryOptions } from './query/createQuery.js';
|
||||
|
||||
// Protocol types
|
||||
export type {
|
||||
ContentBlock,
|
||||
TextBlock,
|
||||
ThinkingBlock,
|
||||
ToolUseBlock,
|
||||
ToolResultBlock,
|
||||
CLIUserMessage,
|
||||
CLIAssistantMessage,
|
||||
CLISystemMessage,
|
||||
CLIResultMessage,
|
||||
CLIPartialAssistantMessage,
|
||||
CLIMessage,
|
||||
} from './types/protocol.js';
|
||||
|
||||
export {
|
||||
isCLIUserMessage,
|
||||
isCLIAssistantMessage,
|
||||
isCLISystemMessage,
|
||||
isCLIResultMessage,
|
||||
isCLIPartialAssistantMessage,
|
||||
} from './types/protocol.js';
|
||||
|
||||
export type { JSONSchema } from './types/mcp.js';
|
||||
|
||||
export { AbortError, isAbortError } from './types/errors.js';
|
||||
|
||||
// Control Request Types
|
||||
export {
|
||||
ControlRequestType,
|
||||
getAllControlRequestTypes,
|
||||
isValidControlRequestType,
|
||||
} from './types/controlRequests.js';
|
||||
|
||||
// Transport
|
||||
export { ProcessTransport } from './transport/ProcessTransport.js';
|
||||
export type { Transport } from './transport/Transport.js';
|
||||
|
||||
// Utilities
|
||||
export { Stream } from './utils/Stream.js';
|
||||
export {
|
||||
serializeJsonLine,
|
||||
parseJsonLine,
|
||||
parseJsonLineSafe,
|
||||
isValidMessage,
|
||||
parseJsonLinesStream,
|
||||
} from './utils/jsonLines.js';
|
||||
export {
|
||||
findCliPath,
|
||||
resolveCliPath,
|
||||
prepareSpawnInfo,
|
||||
} from './utils/cliPath.js';
|
||||
export type { SpawnInfo } from './utils/cliPath.js';
|
||||
|
||||
// MCP helpers
|
||||
export {
|
||||
createSdkMcpServer,
|
||||
createSimpleMcpServer,
|
||||
} from './mcp/createSdkMcpServer.js';
|
||||
export {
|
||||
tool,
|
||||
createTool,
|
||||
validateToolName,
|
||||
validateInputSchema,
|
||||
} from './mcp/tool.js';
|
||||
|
||||
export type { ToolDefinition } from './types/config.js';
|
||||
@@ -1,153 +0,0 @@
|
||||
/**
|
||||
* SdkControlServerTransport - bridges MCP Server with Query's control plane
|
||||
*
|
||||
* Implements @modelcontextprotocol/sdk Transport interface to enable
|
||||
* SDK-embedded MCP servers. Messages flow bidirectionally:
|
||||
*
|
||||
* MCP Server → send() → Query → control_request (mcp_message) → CLI
|
||||
* CLI → control_request (mcp_message) → Query → handleMessage() → MCP Server
|
||||
*/
|
||||
|
||||
import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js';
|
||||
|
||||
/**
|
||||
* Callback type for sending messages to Query
|
||||
*/
|
||||
export type SendToQueryCallback = (message: JSONRPCMessage) => Promise<void>;
|
||||
|
||||
/**
|
||||
* SdkControlServerTransport options
|
||||
*/
|
||||
export interface SdkControlServerTransportOptions {
|
||||
sendToQuery: SendToQueryCallback;
|
||||
serverName: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transport adapter that bridges MCP Server with Query's control plane
|
||||
*/
|
||||
export class SdkControlServerTransport {
|
||||
public sendToQuery: SendToQueryCallback;
|
||||
private serverName: string;
|
||||
private started = false;
|
||||
|
||||
/**
|
||||
* Callbacks set by MCP Server
|
||||
*/
|
||||
onmessage?: (message: JSONRPCMessage) => void;
|
||||
onerror?: (error: Error) => void;
|
||||
onclose?: () => void;
|
||||
|
||||
constructor(options: SdkControlServerTransportOptions) {
|
||||
this.sendToQuery = options.sendToQuery;
|
||||
this.serverName = options.serverName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the transport
|
||||
*/
|
||||
async start(): Promise<void> {
|
||||
this.started = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send message from MCP Server to CLI via Query's control plane
|
||||
*
|
||||
* @param message - JSON-RPC message from MCP Server
|
||||
*/
|
||||
async send(message: JSONRPCMessage): Promise<void> {
|
||||
if (!this.started) {
|
||||
throw new Error(
|
||||
`SdkControlServerTransport (${this.serverName}) not started. Call start() first.`,
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
// Send via Query's control plane
|
||||
await this.sendToQuery(message);
|
||||
} catch (error) {
|
||||
// Invoke error callback if set
|
||||
if (this.onerror) {
|
||||
this.onerror(error instanceof Error ? error : new Error(String(error)));
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the transport
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
if (!this.started) {
|
||||
return; // Already closed
|
||||
}
|
||||
|
||||
this.started = false;
|
||||
|
||||
// Notify MCP Server
|
||||
if (this.onclose) {
|
||||
this.onclose();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming message from CLI
|
||||
*
|
||||
* @param message - JSON-RPC message from CLI
|
||||
*/
|
||||
handleMessage(message: JSONRPCMessage): void {
|
||||
if (!this.started) {
|
||||
console.warn(
|
||||
`[SdkControlServerTransport] Received message for closed transport (${this.serverName})`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.onmessage) {
|
||||
this.onmessage(message);
|
||||
} else {
|
||||
console.warn(
|
||||
`[SdkControlServerTransport] No onmessage handler set for ${this.serverName}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming error from CLI
|
||||
*
|
||||
* @param error - Error from CLI
|
||||
*/
|
||||
handleError(error: Error): void {
|
||||
if (this.onerror) {
|
||||
this.onerror(error);
|
||||
} else {
|
||||
console.error(
|
||||
`[SdkControlServerTransport] Error for ${this.serverName}:`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if transport is started
|
||||
*/
|
||||
isStarted(): boolean {
|
||||
return this.started;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get server name
|
||||
*/
|
||||
getServerName(): string {
|
||||
return this.serverName;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create SdkControlServerTransport instance
|
||||
*/
|
||||
export function createSdkControlServerTransport(
|
||||
options: SdkControlServerTransportOptions,
|
||||
): SdkControlServerTransport {
|
||||
return new SdkControlServerTransport(options);
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
/**
|
||||
* Factory function to create SDK-embedded MCP servers
|
||||
*
|
||||
* Creates MCP Server instances that run in the user's Node.js process
|
||||
* and are proxied to the CLI via the control plane.
|
||||
*/
|
||||
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
||||
import {
|
||||
ListToolsRequestSchema,
|
||||
CallToolRequestSchema,
|
||||
CallToolResult,
|
||||
} from '@modelcontextprotocol/sdk/types.js';
|
||||
import type { ToolDefinition } from '../types/config.js';
|
||||
import { formatToolResult, formatToolError } from './formatters.js';
|
||||
import { validateToolName } from './tool.js';
|
||||
|
||||
/**
|
||||
* Create an SDK-embedded MCP server with custom tools
|
||||
*
|
||||
* The server runs in your Node.js process and is proxied to the CLI.
|
||||
*
|
||||
* @param name - Server name (must be unique)
|
||||
* @param version - Server version
|
||||
* @param tools - Array of tool definitions
|
||||
* @returns MCP Server instance
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const server = createSdkMcpServer('database', '1.0.0', [
|
||||
* tool({
|
||||
* name: 'query_db',
|
||||
* description: 'Query the database',
|
||||
* inputSchema: {
|
||||
* type: 'object',
|
||||
* properties: { query: { type: 'string' } },
|
||||
* required: ['query']
|
||||
* },
|
||||
* handler: async (input) => db.query(input.query)
|
||||
* })
|
||||
* ]);
|
||||
* ```
|
||||
*/
|
||||
export function createSdkMcpServer(
|
||||
name: string,
|
||||
version: string,
|
||||
tools: ToolDefinition[],
|
||||
): Server {
|
||||
// Validate server name
|
||||
if (!name || typeof name !== 'string') {
|
||||
throw new Error('MCP server name must be a non-empty string');
|
||||
}
|
||||
|
||||
if (!version || typeof version !== 'string') {
|
||||
throw new Error('MCP server version must be a non-empty string');
|
||||
}
|
||||
|
||||
if (!Array.isArray(tools)) {
|
||||
throw new Error('Tools must be an array');
|
||||
}
|
||||
|
||||
// Validate tool names are unique
|
||||
const toolNames = new Set<string>();
|
||||
for (const tool of tools) {
|
||||
validateToolName(tool.name);
|
||||
|
||||
if (toolNames.has(tool.name)) {
|
||||
throw new Error(
|
||||
`Duplicate tool name '${tool.name}' in MCP server '${name}'`,
|
||||
);
|
||||
}
|
||||
toolNames.add(tool.name);
|
||||
}
|
||||
|
||||
// Create MCP Server instance
|
||||
const server = new Server(
|
||||
{
|
||||
name,
|
||||
version,
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {},
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Create tool map for fast lookup
|
||||
const toolMap = new Map<string, ToolDefinition>();
|
||||
for (const tool of tools) {
|
||||
toolMap.set(tool.name, tool);
|
||||
}
|
||||
|
||||
// Register list_tools handler
|
||||
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: tools.map((tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
// Register call_tool handler
|
||||
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
const { name: toolName, arguments: toolArgs } = request.params;
|
||||
|
||||
// Find tool
|
||||
const tool = toolMap.get(toolName);
|
||||
if (!tool) {
|
||||
return formatToolError(
|
||||
new Error(`Tool '${toolName}' not found in server '${name}'`),
|
||||
) as CallToolResult;
|
||||
}
|
||||
|
||||
try {
|
||||
// Invoke tool handler
|
||||
const result = await tool.handler(toolArgs);
|
||||
|
||||
// Format result
|
||||
return formatToolResult(result) as CallToolResult;
|
||||
} catch (error) {
|
||||
// Handle tool execution error
|
||||
return formatToolError(
|
||||
error instanceof Error
|
||||
? error
|
||||
: new Error(`Tool '${toolName}' failed: ${String(error)}`),
|
||||
) as CallToolResult;
|
||||
}
|
||||
});
|
||||
|
||||
return server;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create MCP server with inline tool definitions
|
||||
*
|
||||
* @param name - Server name
|
||||
* @param version - Server version
|
||||
* @param toolDefinitions - Object mapping tool names to definitions
|
||||
* @returns MCP Server instance
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const server = createSimpleMcpServer('utils', '1.0.0', {
|
||||
* greeting: {
|
||||
* description: 'Generate a greeting',
|
||||
* inputSchema: {
|
||||
* type: 'object',
|
||||
* properties: { name: { type: 'string' } },
|
||||
* required: ['name']
|
||||
* },
|
||||
* handler: async ({ name }) => `Hello, ${name}!`
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export function createSimpleMcpServer(
|
||||
name: string,
|
||||
version: string,
|
||||
toolDefinitions: Record<
|
||||
string,
|
||||
Omit<ToolDefinition, 'name'> & { name?: string }
|
||||
>,
|
||||
): Server {
|
||||
const tools: ToolDefinition[] = Object.entries(toolDefinitions).map(
|
||||
([toolName, def]) => ({
|
||||
name: def.name || toolName,
|
||||
description: def.description,
|
||||
inputSchema: def.inputSchema,
|
||||
handler: def.handler,
|
||||
}),
|
||||
);
|
||||
|
||||
return createSdkMcpServer(name, version, tools);
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
/**
|
||||
* Tool result formatting utilities for MCP responses
|
||||
*
|
||||
* Converts various output types to MCP content blocks.
|
||||
*/
|
||||
|
||||
/**
|
||||
* MCP content block types
|
||||
*/
|
||||
export type McpContentBlock =
|
||||
| { type: 'text'; text: string }
|
||||
| { type: 'image'; data: string; mimeType: string }
|
||||
| { type: 'resource'; uri: string; mimeType?: string; text?: string };
|
||||
|
||||
/**
|
||||
* Tool result structure
|
||||
*/
|
||||
export interface ToolResult {
|
||||
content: McpContentBlock[];
|
||||
isError?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format tool result for MCP response
|
||||
*
|
||||
* Converts any value to MCP content blocks (strings, objects, errors, etc.)
|
||||
*
|
||||
* @param result - Tool handler output or error
|
||||
* @returns Formatted tool result
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* formatToolResult('Hello')
|
||||
* // → { content: [{ type: 'text', text: 'Hello' }] }
|
||||
*
|
||||
* formatToolResult({ temperature: 72 })
|
||||
* // → { content: [{ type: 'text', text: '{"temperature":72}' }] }
|
||||
* ```
|
||||
*/
|
||||
export function formatToolResult(result: unknown): ToolResult {
|
||||
// Handle Error objects
|
||||
if (result instanceof Error) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: result.message || 'Unknown error',
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
};
|
||||
}
|
||||
|
||||
// Handle null/undefined
|
||||
if (result === null || result === undefined) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: '',
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
// Handle string
|
||||
if (typeof result === 'string') {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: result,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
// Handle number
|
||||
if (typeof result === 'number') {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: String(result),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
// Handle boolean
|
||||
if (typeof result === 'boolean') {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: String(result),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
// Handle object (including arrays)
|
||||
if (typeof result === 'object') {
|
||||
try {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result, null, 2),
|
||||
},
|
||||
],
|
||||
};
|
||||
} catch {
|
||||
// JSON.stringify failed
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: String(result),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: convert to string
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: String(result),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Format error for MCP response
|
||||
*
|
||||
* @param error - Error object or string
|
||||
* @returns Tool result with error flag
|
||||
*/
|
||||
export function formatToolError(error: Error | string): ToolResult {
|
||||
const message = error instanceof Error ? error.message : error;
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: message,
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Format text content for MCP response
|
||||
*
|
||||
* @param text - Text content
|
||||
* @returns Tool result with text content
|
||||
*/
|
||||
export function formatTextResult(text: string): ToolResult {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Format JSON content for MCP response
|
||||
*
|
||||
* @param data - Data to serialize as JSON
|
||||
* @returns Tool result with JSON text content
|
||||
*/
|
||||
export function formatJsonResult(data: unknown): ToolResult {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(data, null, 2),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge multiple tool results into a single result
|
||||
*
|
||||
* @param results - Array of tool results
|
||||
* @returns Merged tool result
|
||||
*/
|
||||
export function mergeToolResults(results: ToolResult[]): ToolResult {
|
||||
const mergedContent: McpContentBlock[] = [];
|
||||
let hasError = false;
|
||||
|
||||
for (const result of results) {
|
||||
mergedContent.push(...result.content);
|
||||
if (result.isError) {
|
||||
hasError = true;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: mergedContent,
|
||||
isError: hasError,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate MCP content block
|
||||
*
|
||||
* @param block - Content block to validate
|
||||
* @returns True if valid
|
||||
*/
|
||||
export function isValidContentBlock(block: unknown): block is McpContentBlock {
|
||||
if (!block || typeof block !== 'object') {
|
||||
return false;
|
||||
}
|
||||
|
||||
const blockObj = block as Record<string, unknown>;
|
||||
|
||||
if (!blockObj.type || typeof blockObj.type !== 'string') {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (blockObj.type) {
|
||||
case 'text':
|
||||
return typeof blockObj.text === 'string';
|
||||
|
||||
case 'image':
|
||||
return (
|
||||
typeof blockObj.data === 'string' &&
|
||||
typeof blockObj.mimeType === 'string'
|
||||
);
|
||||
|
||||
case 'resource':
|
||||
return typeof blockObj.uri === 'string';
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
/**
|
||||
* Tool definition helper for SDK-embedded MCP servers
|
||||
*
|
||||
* Provides type-safe tool definitions with generic input/output types.
|
||||
*/
|
||||
|
||||
import type { ToolDefinition } from '../types/config.js';
|
||||
|
||||
/**
|
||||
* Create a type-safe tool definition
|
||||
*
|
||||
* Validates the tool definition and provides type inference for input/output types.
|
||||
*
|
||||
* @param def - Tool definition with handler
|
||||
* @returns The same tool definition (for type safety)
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const weatherTool = tool<{ location: string }, { temperature: number }>({
|
||||
* name: 'get_weather',
|
||||
* description: 'Get weather for a location',
|
||||
* inputSchema: {
|
||||
* type: 'object',
|
||||
* properties: {
|
||||
* location: { type: 'string' }
|
||||
* },
|
||||
* required: ['location']
|
||||
* },
|
||||
* handler: async (input) => {
|
||||
* return { temperature: await fetchWeather(input.location) };
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export function tool<TInput = unknown, TOutput = unknown>(
|
||||
def: ToolDefinition<TInput, TOutput>,
|
||||
): ToolDefinition<TInput, TOutput> {
|
||||
// Validate tool definition
|
||||
if (!def.name || typeof def.name !== 'string') {
|
||||
throw new Error('Tool definition must have a name (string)');
|
||||
}
|
||||
|
||||
if (!def.description || typeof def.description !== 'string') {
|
||||
throw new Error(
|
||||
`Tool definition for '${def.name}' must have a description (string)`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!def.inputSchema || typeof def.inputSchema !== 'object') {
|
||||
throw new Error(
|
||||
`Tool definition for '${def.name}' must have an inputSchema (object)`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!def.handler || typeof def.handler !== 'function') {
|
||||
throw new Error(
|
||||
`Tool definition for '${def.name}' must have a handler (function)`,
|
||||
);
|
||||
}
|
||||
|
||||
// Return definition (pass-through for type safety)
|
||||
return def;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate tool name
|
||||
*
|
||||
* Tool names must:
|
||||
* - Start with a letter
|
||||
* - Contain only letters, numbers, and underscores
|
||||
* - Be between 1 and 64 characters
|
||||
*
|
||||
* @param name - Tool name to validate
|
||||
* @throws Error if name is invalid
|
||||
*/
|
||||
export function validateToolName(name: string): void {
|
||||
if (!name) {
|
||||
throw new Error('Tool name cannot be empty');
|
||||
}
|
||||
|
||||
if (name.length > 64) {
|
||||
throw new Error(
|
||||
`Tool name '${name}' is too long (max 64 characters): ${name.length}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!/^[a-zA-Z][a-zA-Z0-9_]*$/.test(name)) {
|
||||
throw new Error(
|
||||
`Tool name '${name}' is invalid. Must start with a letter and contain only letters, numbers, and underscores.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate tool input schema (JSON Schema compliance)
|
||||
*
|
||||
* @param schema - Input schema to validate
|
||||
* @throws Error if schema is invalid
|
||||
*/
|
||||
export function validateInputSchema(schema: unknown): void {
|
||||
if (!schema || typeof schema !== 'object') {
|
||||
throw new Error('Input schema must be an object');
|
||||
}
|
||||
|
||||
const schemaObj = schema as Record<string, unknown>;
|
||||
|
||||
if (!schemaObj.type) {
|
||||
throw new Error('Input schema must have a type field');
|
||||
}
|
||||
|
||||
// For object schemas, validate properties
|
||||
if (schemaObj.type === 'object') {
|
||||
if (schemaObj.properties && typeof schemaObj.properties !== 'object') {
|
||||
throw new Error('Input schema properties must be an object');
|
||||
}
|
||||
|
||||
if (schemaObj.required && !Array.isArray(schemaObj.required)) {
|
||||
throw new Error('Input schema required must be an array');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create tool definition with strict validation
|
||||
*
|
||||
* @param def - Tool definition
|
||||
* @returns Validated tool definition
|
||||
*/
|
||||
export function createTool<TInput = unknown, TOutput = unknown>(
|
||||
def: ToolDefinition<TInput, TOutput>,
|
||||
): ToolDefinition<TInput, TOutput> {
|
||||
// Validate via tool() function
|
||||
const validated = tool(def);
|
||||
|
||||
// Additional validation
|
||||
validateToolName(validated.name);
|
||||
validateInputSchema(validated.inputSchema);
|
||||
|
||||
return validated;
|
||||
}
|
||||
@@ -1,895 +0,0 @@
|
||||
/**
|
||||
* Query class - Main orchestrator for SDK
|
||||
*
|
||||
* Manages SDK workflow, routes messages, and handles lifecycle.
|
||||
* Implements AsyncIterator protocol for message consumption.
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import type {
|
||||
CLIMessage,
|
||||
CLIUserMessage,
|
||||
CLIControlRequest,
|
||||
CLIControlResponse,
|
||||
ControlCancelRequest,
|
||||
PermissionApproval,
|
||||
PermissionSuggestion,
|
||||
} from '../types/protocol.js';
|
||||
import {
|
||||
isCLIUserMessage,
|
||||
isCLIAssistantMessage,
|
||||
isCLISystemMessage,
|
||||
isCLIResultMessage,
|
||||
isCLIPartialAssistantMessage,
|
||||
isControlRequest,
|
||||
isControlResponse,
|
||||
isControlCancel,
|
||||
} from '../types/protocol.js';
|
||||
import type { Transport } from '../transport/Transport.js';
|
||||
import type { CreateQueryOptions } from '../types/config.js';
|
||||
import { Stream } from '../utils/Stream.js';
|
||||
import { serializeJsonLine } from '../utils/jsonLines.js';
|
||||
import { AbortError } from '../types/errors.js';
|
||||
import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js';
|
||||
import type { SdkControlServerTransport } from '../mcp/SdkControlServerTransport.js';
|
||||
import { ControlRequestType } from '../types/controlRequests.js';
|
||||
|
||||
/**
|
||||
* Pending control request tracking
|
||||
*/
|
||||
interface PendingControlRequest {
|
||||
resolve: (response: Record<string, unknown> | null) => void;
|
||||
reject: (error: Error) => void;
|
||||
timeout: NodeJS.Timeout;
|
||||
abortController: AbortController;
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook configuration for SDK initialization
|
||||
*/
|
||||
interface HookRegistration {
|
||||
matcher: Record<string, unknown>;
|
||||
hookCallbackIds: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Transport with input stream control (e.g., ProcessTransport)
|
||||
*/
|
||||
interface TransportWithEndInput extends Transport {
|
||||
endInput(): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query class
|
||||
*
|
||||
* Main entry point for SDK users. Orchestrates communication with CLI,
|
||||
* routes messages, handles control plane, and manages lifecycle.
|
||||
*/
|
||||
export class Query implements AsyncIterable<CLIMessage> {
|
||||
private transport: Transport;
|
||||
private options: CreateQueryOptions;
|
||||
private sessionId: string;
|
||||
private inputStream: Stream<CLIMessage>;
|
||||
private abortController: AbortController;
|
||||
private pendingControlRequests: Map<string, PendingControlRequest> =
|
||||
new Map();
|
||||
private sdkMcpTransports: Map<string, SdkControlServerTransport> = new Map();
|
||||
private initialized: Promise<void> | null = null;
|
||||
private closed = false;
|
||||
private messageRouterStarted = false;
|
||||
|
||||
// First result tracking for MCP servers
|
||||
private firstResultReceivedPromise?: Promise<void>;
|
||||
private firstResultReceivedResolve?: () => void;
|
||||
|
||||
// Hook callbacks tracking
|
||||
private hookCallbacks = new Map<
|
||||
string,
|
||||
(
|
||||
input: unknown,
|
||||
toolUseId: string | null,
|
||||
options: { signal: AbortSignal },
|
||||
) => Promise<unknown>
|
||||
>();
|
||||
private nextCallbackId = 0;
|
||||
|
||||
// Single-turn mode flag
|
||||
private readonly isSingleTurn: boolean;
|
||||
|
||||
constructor(transport: Transport, options: CreateQueryOptions) {
|
||||
this.transport = transport;
|
||||
this.options = options;
|
||||
this.sessionId = randomUUID();
|
||||
this.inputStream = new Stream<CLIMessage>();
|
||||
// Use provided abortController or create a new one
|
||||
this.abortController = options.abortController ?? new AbortController();
|
||||
this.isSingleTurn = options.singleTurn ?? false;
|
||||
|
||||
// Setup first result tracking
|
||||
this.firstResultReceivedPromise = new Promise((resolve) => {
|
||||
this.firstResultReceivedResolve = resolve;
|
||||
});
|
||||
|
||||
// Handle abort signal if controller is provided and already aborted or will be aborted
|
||||
if (this.abortController.signal.aborted) {
|
||||
// Already aborted - set error immediately
|
||||
this.inputStream.setError(new AbortError('Query aborted by user'));
|
||||
this.close().catch((err) => {
|
||||
console.error('[Query] Error during abort cleanup:', err);
|
||||
});
|
||||
} else {
|
||||
// Listen for abort events on the controller's signal
|
||||
this.abortController.signal.addEventListener('abort', () => {
|
||||
// Set abort error on the stream before closing
|
||||
this.inputStream.setError(new AbortError('Query aborted by user'));
|
||||
this.close().catch((err) => {
|
||||
console.error('[Query] Error during abort cleanup:', err);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize immediately (no lazy initialization)
|
||||
this.initialize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the query
|
||||
*/
|
||||
private initialize(): void {
|
||||
// Initialize asynchronously but don't block constructor
|
||||
// Capture the promise immediately so other code can wait for initialization
|
||||
this.initialized = (async () => {
|
||||
try {
|
||||
// Start transport
|
||||
await this.transport.start();
|
||||
|
||||
// Setup SDK-embedded MCP servers
|
||||
await this.setupSdkMcpServers();
|
||||
|
||||
// Prepare hooks configuration
|
||||
let hooks: Record<string, HookRegistration[]> | undefined;
|
||||
if (this.options.hooks) {
|
||||
hooks = {};
|
||||
for (const [event, matchers] of Object.entries(this.options.hooks)) {
|
||||
if (matchers.length > 0) {
|
||||
hooks[event] = matchers.map((matcher) => {
|
||||
const callbackIds: string[] = [];
|
||||
for (const callback of matcher.hooks) {
|
||||
const callbackId = `hook_${this.nextCallbackId++}`;
|
||||
this.hookCallbacks.set(callbackId, callback);
|
||||
callbackIds.push(callbackId);
|
||||
}
|
||||
return {
|
||||
matcher: matcher.matcher,
|
||||
hookCallbackIds: callbackIds,
|
||||
};
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start message router in background
|
||||
this.startMessageRouter();
|
||||
|
||||
// Send initialize control request
|
||||
const sdkMcpServerNames = Array.from(this.sdkMcpTransports.keys());
|
||||
await this.sendControlRequest(ControlRequestType.INITIALIZE, {
|
||||
hooks: hooks ? Object.values(hooks).flat() : null,
|
||||
sdkMcpServers:
|
||||
sdkMcpServerNames.length > 0 ? sdkMcpServerNames : undefined,
|
||||
});
|
||||
|
||||
// Note: Single-turn prompts are sent directly via transport in createQuery.ts
|
||||
} catch (error) {
|
||||
console.error('[Query] Initialization error:', error);
|
||||
throw error;
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup SDK-embedded MCP servers
|
||||
*/
|
||||
private async setupSdkMcpServers(): Promise<void> {
|
||||
if (!this.options.sdkMcpServers) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate no name conflicts with external MCP servers
|
||||
const externalNames = Object.keys(this.options.mcpServers ?? {});
|
||||
const sdkNames = Object.keys(this.options.sdkMcpServers);
|
||||
|
||||
const conflicts = sdkNames.filter((name) => externalNames.includes(name));
|
||||
if (conflicts.length > 0) {
|
||||
throw new Error(
|
||||
`MCP server name conflicts between mcpServers and sdkMcpServers: ${conflicts.join(', ')}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Import SdkControlServerTransport (dynamic to avoid circular deps)
|
||||
const { SdkControlServerTransport } = await import(
|
||||
'../mcp/SdkControlServerTransport.js'
|
||||
);
|
||||
|
||||
// Create SdkControlServerTransport for each server
|
||||
for (const [name, server] of Object.entries(this.options.sdkMcpServers)) {
|
||||
// Create transport that sends MCP messages via control plane
|
||||
const transport = new SdkControlServerTransport({
|
||||
serverName: name,
|
||||
sendToQuery: async (message: JSONRPCMessage) => {
|
||||
// Send MCP message to CLI via control request
|
||||
await this.sendControlRequest(ControlRequestType.MCP_MESSAGE, {
|
||||
server_name: name,
|
||||
message,
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
// Start transport
|
||||
await transport.start();
|
||||
|
||||
// Connect server to transport
|
||||
await server.connect(transport);
|
||||
|
||||
// Store transport for cleanup
|
||||
this.sdkMcpTransports.set(name, transport);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start message router (background task)
|
||||
*/
|
||||
private startMessageRouter(): void {
|
||||
if (this.messageRouterStarted) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.messageRouterStarted = true;
|
||||
|
||||
// Route messages from transport to input stream
|
||||
(async () => {
|
||||
try {
|
||||
for await (const message of this.transport.readMessages()) {
|
||||
await this.routeMessage(message);
|
||||
|
||||
// Stop if closed
|
||||
if (this.closed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Transport completed - check if aborted first
|
||||
if (this.abortController.signal.aborted) {
|
||||
this.inputStream.setError(new AbortError('Query aborted'));
|
||||
} else {
|
||||
this.inputStream.done();
|
||||
}
|
||||
} catch (error) {
|
||||
// Transport error - propagate to stream
|
||||
this.inputStream.setError(
|
||||
error instanceof Error ? error : new Error(String(error)),
|
||||
);
|
||||
}
|
||||
})().catch((err) => {
|
||||
console.error('[Query] Message router error:', err);
|
||||
this.inputStream.setError(
|
||||
err instanceof Error ? err : new Error(String(err)),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Route incoming message
|
||||
*/
|
||||
private async routeMessage(message: unknown): Promise<void> {
|
||||
// Check control messages first
|
||||
if (isControlRequest(message)) {
|
||||
// CLI asking SDK for something (permission, MCP message, hook callback)
|
||||
await this.handleControlRequest(message);
|
||||
return;
|
||||
}
|
||||
|
||||
if (isControlResponse(message)) {
|
||||
// Response to SDK's control request
|
||||
this.handleControlResponse(message);
|
||||
return;
|
||||
}
|
||||
|
||||
if (isControlCancel(message)) {
|
||||
// Cancel pending control request
|
||||
this.handleControlCancelRequest(message);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check data messages
|
||||
if (isCLISystemMessage(message)) {
|
||||
// SystemMessage - contains session info (cwd, tools, model, etc.) that should be passed to user
|
||||
this.inputStream.enqueue(message);
|
||||
return;
|
||||
}
|
||||
|
||||
if (isCLIResultMessage(message)) {
|
||||
// Result message - trigger first result received
|
||||
if (this.firstResultReceivedResolve) {
|
||||
this.firstResultReceivedResolve();
|
||||
}
|
||||
// In single-turn mode, automatically close input after receiving result
|
||||
if (this.isSingleTurn && 'endInput' in this.transport) {
|
||||
(this.transport as TransportWithEndInput).endInput();
|
||||
}
|
||||
// Pass to user
|
||||
this.inputStream.enqueue(message);
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
isCLIAssistantMessage(message) ||
|
||||
isCLIUserMessage(message) ||
|
||||
isCLIPartialAssistantMessage(message)
|
||||
) {
|
||||
// Pass to user
|
||||
this.inputStream.enqueue(message);
|
||||
return;
|
||||
}
|
||||
|
||||
// Unknown message - log and pass through
|
||||
if (process.env['DEBUG_SDK']) {
|
||||
console.warn('[Query] Unknown message type:', message);
|
||||
}
|
||||
this.inputStream.enqueue(message as CLIMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle control request from CLI
|
||||
*/
|
||||
private async handleControlRequest(
|
||||
request: CLIControlRequest,
|
||||
): Promise<void> {
|
||||
const { request_id, request: payload } = request;
|
||||
|
||||
// Create abort controller for this request
|
||||
const requestAbortController = new AbortController();
|
||||
|
||||
try {
|
||||
let response: Record<string, unknown> | null = null;
|
||||
|
||||
switch (payload.subtype) {
|
||||
case 'can_use_tool':
|
||||
response = (await this.handlePermissionRequest(
|
||||
payload.tool_name,
|
||||
payload.input as Record<string, unknown>,
|
||||
payload.permission_suggestions,
|
||||
requestAbortController.signal,
|
||||
)) as unknown as Record<string, unknown>;
|
||||
break;
|
||||
|
||||
case 'mcp_message':
|
||||
response = await this.handleMcpMessage(
|
||||
payload.server_name,
|
||||
payload.message as unknown as JSONRPCMessage,
|
||||
);
|
||||
break;
|
||||
|
||||
case 'hook_callback':
|
||||
response = await this.handleHookCallback(
|
||||
payload.callback_id,
|
||||
payload.input,
|
||||
payload.tool_use_id,
|
||||
requestAbortController.signal,
|
||||
);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new Error(
|
||||
`Unknown control request subtype: ${payload.subtype}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Send success response
|
||||
await this.sendControlResponse(request_id, true, response);
|
||||
} catch (error) {
|
||||
// Send error response
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
await this.sendControlResponse(request_id, false, errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle permission request (can_use_tool)
|
||||
*/
|
||||
private async handlePermissionRequest(
|
||||
toolName: string,
|
||||
toolInput: Record<string, unknown>,
|
||||
permissionSuggestions: PermissionSuggestion[] | null,
|
||||
signal: AbortSignal,
|
||||
): Promise<PermissionApproval> {
|
||||
// Default: allow if no callback provided
|
||||
if (!this.options.canUseTool) {
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
try {
|
||||
// Invoke callback with timeout
|
||||
const timeoutMs = 30000; // 30 seconds
|
||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||
setTimeout(
|
||||
() => reject(new Error('Permission callback timeout')),
|
||||
timeoutMs,
|
||||
);
|
||||
});
|
||||
|
||||
// Call with signal and suggestions
|
||||
const result = await Promise.race([
|
||||
Promise.resolve(
|
||||
this.options.canUseTool(toolName, toolInput, {
|
||||
signal,
|
||||
suggestions: permissionSuggestions,
|
||||
}),
|
||||
),
|
||||
timeoutPromise,
|
||||
]);
|
||||
|
||||
// Support both boolean and object return values
|
||||
if (typeof result === 'boolean') {
|
||||
return { allowed: result };
|
||||
}
|
||||
// Ensure result is a valid PermissionApproval
|
||||
return result as PermissionApproval;
|
||||
} catch (error) {
|
||||
// Timeout or error → deny (fail-safe)
|
||||
console.warn(
|
||||
'[Query] Permission callback error (denying by default):',
|
||||
error instanceof Error ? error.message : String(error),
|
||||
);
|
||||
return { allowed: false };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle MCP message routing
|
||||
*/
|
||||
private async handleMcpMessage(
|
||||
serverName: string,
|
||||
message: JSONRPCMessage,
|
||||
): Promise<Record<string, unknown>> {
|
||||
// Get transport for this server
|
||||
const transport = this.sdkMcpTransports.get(serverName);
|
||||
if (!transport) {
|
||||
throw new Error(
|
||||
`MCP server '${serverName}' not found in SDK-embedded servers`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check if this is a request (has method and id) or notification
|
||||
const isRequest =
|
||||
'method' in message && 'id' in message && message.id !== null;
|
||||
|
||||
if (isRequest) {
|
||||
// Request message - wait for response from MCP server
|
||||
const response = await this.handleMcpRequest(
|
||||
serverName,
|
||||
message,
|
||||
transport,
|
||||
);
|
||||
return { mcp_response: response };
|
||||
} else {
|
||||
// Notification or response - just route it
|
||||
transport.handleMessage(message);
|
||||
// Return acknowledgment for notifications
|
||||
return { mcp_response: { jsonrpc: '2.0', result: {}, id: 0 } };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle MCP request and wait for response
|
||||
*/
|
||||
private handleMcpRequest(
|
||||
_serverName: string,
|
||||
message: JSONRPCMessage,
|
||||
transport: SdkControlServerTransport,
|
||||
): Promise<JSONRPCMessage> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeout = setTimeout(() => {
|
||||
reject(new Error('MCP request timeout'));
|
||||
}, 30000); // 30 seconds
|
||||
|
||||
// Store message ID for matching
|
||||
const messageId = 'id' in message ? message.id : null;
|
||||
|
||||
// Hook into transport to capture response
|
||||
const originalSend = transport.sendToQuery;
|
||||
transport.sendToQuery = async (responseMessage: JSONRPCMessage) => {
|
||||
if ('id' in responseMessage && responseMessage.id === messageId) {
|
||||
clearTimeout(timeout);
|
||||
// Restore original send
|
||||
transport.sendToQuery = originalSend;
|
||||
resolve(responseMessage);
|
||||
}
|
||||
// Forward to original handler
|
||||
return originalSend(responseMessage);
|
||||
};
|
||||
|
||||
// Send message to MCP server
|
||||
transport.handleMessage(message);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle control response from CLI
|
||||
*/
|
||||
private handleControlResponse(response: CLIControlResponse): void {
|
||||
const { response: payload } = response;
|
||||
const request_id = payload.request_id;
|
||||
|
||||
const pending = this.pendingControlRequests.get(request_id);
|
||||
if (!pending) {
|
||||
console.warn(
|
||||
'[Query] Received response for unknown request:',
|
||||
request_id,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Clear timeout
|
||||
clearTimeout(pending.timeout);
|
||||
this.pendingControlRequests.delete(request_id);
|
||||
|
||||
// Resolve or reject based on response type
|
||||
if (payload.subtype === 'success') {
|
||||
pending.resolve(payload.response as Record<string, unknown> | null);
|
||||
} else {
|
||||
// Extract error message from error field (can be string or object)
|
||||
const errorMessage =
|
||||
typeof payload.error === 'string'
|
||||
? payload.error
|
||||
: (payload.error?.message ?? 'Unknown error');
|
||||
pending.reject(new Error(errorMessage));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle control cancel request from CLI
|
||||
*/
|
||||
private handleControlCancelRequest(request: ControlCancelRequest): void {
|
||||
const { request_id } = request;
|
||||
|
||||
if (!request_id) {
|
||||
console.warn('[Query] Received cancel request without request_id');
|
||||
return;
|
||||
}
|
||||
|
||||
const pending = this.pendingControlRequests.get(request_id);
|
||||
if (pending) {
|
||||
// Abort the request
|
||||
pending.abortController.abort();
|
||||
|
||||
// Clean up
|
||||
clearTimeout(pending.timeout);
|
||||
this.pendingControlRequests.delete(request_id);
|
||||
|
||||
// Reject with abort error
|
||||
pending.reject(new AbortError('Request cancelled'));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle hook callback request
|
||||
*/
|
||||
private async handleHookCallback(
|
||||
callbackId: string,
|
||||
input: unknown,
|
||||
toolUseId: string | null,
|
||||
signal: AbortSignal,
|
||||
): Promise<Record<string, unknown>> {
|
||||
const callback = this.hookCallbacks.get(callbackId);
|
||||
if (!callback) {
|
||||
throw new Error(`No hook callback found for ID: ${callbackId}`);
|
||||
}
|
||||
|
||||
// Invoke callback with signal
|
||||
const result = await callback(input, toolUseId, { signal });
|
||||
return result as Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send control request to CLI
|
||||
*/
|
||||
private async sendControlRequest(
|
||||
subtype: string,
|
||||
data: Record<string, unknown> = {},
|
||||
): Promise<Record<string, unknown> | null> {
|
||||
const requestId = randomUUID();
|
||||
|
||||
const request: CLIControlRequest = {
|
||||
type: 'control_request',
|
||||
request_id: requestId,
|
||||
request: {
|
||||
subtype: subtype as never, // Type assertion needed for dynamic subtype
|
||||
...data,
|
||||
} as CLIControlRequest['request'],
|
||||
};
|
||||
|
||||
// Create promise for response
|
||||
const responsePromise = new Promise<Record<string, unknown> | null>(
|
||||
(resolve, reject) => {
|
||||
const abortController = new AbortController();
|
||||
const timeout = setTimeout(() => {
|
||||
this.pendingControlRequests.delete(requestId);
|
||||
reject(new Error(`Control request timeout: ${subtype}`));
|
||||
}, 300000); // 30 seconds
|
||||
|
||||
this.pendingControlRequests.set(requestId, {
|
||||
resolve,
|
||||
reject,
|
||||
timeout,
|
||||
abortController,
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
// Send request
|
||||
this.transport.write(serializeJsonLine(request));
|
||||
|
||||
// Wait for response
|
||||
return responsePromise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send control response to CLI
|
||||
*/
|
||||
private async sendControlResponse(
|
||||
requestId: string,
|
||||
success: boolean,
|
||||
responseOrError: Record<string, unknown> | null | string,
|
||||
): Promise<void> {
|
||||
const response: CLIControlResponse = {
|
||||
type: 'control_response',
|
||||
response: success
|
||||
? {
|
||||
subtype: 'success',
|
||||
request_id: requestId,
|
||||
response: responseOrError as Record<string, unknown> | null,
|
||||
}
|
||||
: {
|
||||
subtype: 'error',
|
||||
request_id: requestId,
|
||||
error: responseOrError as string,
|
||||
},
|
||||
};
|
||||
|
||||
this.transport.write(serializeJsonLine(response));
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the query and cleanup resources
|
||||
*
|
||||
* Idempotent - safe to call multiple times.
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
if (this.closed) {
|
||||
return; // Already closed
|
||||
}
|
||||
|
||||
this.closed = true;
|
||||
|
||||
// Cancel pending control requests
|
||||
for (const pending of this.pendingControlRequests.values()) {
|
||||
pending.abortController.abort();
|
||||
clearTimeout(pending.timeout);
|
||||
}
|
||||
this.pendingControlRequests.clear();
|
||||
|
||||
// Clear hook callbacks
|
||||
this.hookCallbacks.clear();
|
||||
|
||||
// Close transport
|
||||
await this.transport.close();
|
||||
|
||||
// Complete input stream - check if aborted first
|
||||
if (!this.inputStream.hasError) {
|
||||
if (this.abortController.signal.aborted) {
|
||||
this.inputStream.setError(new AbortError('Query aborted'));
|
||||
} else {
|
||||
this.inputStream.done();
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup MCP transports
|
||||
for (const transport of this.sdkMcpTransports.values()) {
|
||||
try {
|
||||
await transport.close();
|
||||
} catch (error) {
|
||||
console.error('[Query] Error closing MCP transport:', error);
|
||||
}
|
||||
}
|
||||
this.sdkMcpTransports.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* AsyncIterator protocol: next()
|
||||
*/
|
||||
async next(): Promise<IteratorResult<CLIMessage>> {
|
||||
// Wait for initialization to complete if still in progress
|
||||
if (this.initialized) {
|
||||
await this.initialized;
|
||||
}
|
||||
|
||||
return this.inputStream.next();
|
||||
}
|
||||
|
||||
/**
|
||||
* AsyncIterable protocol: Symbol.asyncIterator
|
||||
*/
|
||||
[Symbol.asyncIterator](): AsyncIterator<CLIMessage> {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send follow-up messages for multi-turn conversations
|
||||
*
|
||||
* @param messages - Async iterable of user messages to send
|
||||
* @throws Error if query is closed
|
||||
*/
|
||||
async streamInput(messages: AsyncIterable<CLIUserMessage>): Promise<void> {
|
||||
if (this.closed) {
|
||||
throw new Error('Query is closed');
|
||||
}
|
||||
|
||||
try {
|
||||
// Wait for initialization to complete before sending messages
|
||||
// This prevents "write after end" errors when streamInput is called
|
||||
// with an empty iterable before initialization finishes
|
||||
if (this.initialized) {
|
||||
await this.initialized;
|
||||
}
|
||||
|
||||
// Send all messages
|
||||
for await (const message of messages) {
|
||||
// Check if aborted
|
||||
if (this.abortController.signal.aborted) {
|
||||
break;
|
||||
}
|
||||
this.transport.write(serializeJsonLine(message));
|
||||
}
|
||||
|
||||
// In multi-turn mode with MCP servers, wait for first result
|
||||
// to ensure MCP servers have time to process before next input
|
||||
if (
|
||||
!this.isSingleTurn &&
|
||||
this.sdkMcpTransports.size > 0 &&
|
||||
this.firstResultReceivedPromise
|
||||
) {
|
||||
const STREAM_CLOSE_TIMEOUT = 10000; // 10 seconds
|
||||
|
||||
await Promise.race([
|
||||
this.firstResultReceivedPromise,
|
||||
new Promise<void>((resolve) => {
|
||||
setTimeout(() => {
|
||||
resolve();
|
||||
}, STREAM_CLOSE_TIMEOUT);
|
||||
}),
|
||||
]);
|
||||
}
|
||||
|
||||
this.endInput();
|
||||
} catch (error) {
|
||||
// Check if aborted - if so, set abort error on stream
|
||||
if (this.abortController.signal.aborted) {
|
||||
console.log('[Query] Aborted during input streaming');
|
||||
this.inputStream.setError(
|
||||
new AbortError('Query aborted during input streaming'),
|
||||
);
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* End input stream (close stdin to CLI)
|
||||
*
|
||||
* @throws Error if query is closed
|
||||
*/
|
||||
endInput(): void {
|
||||
if (this.closed) {
|
||||
throw new Error('Query is closed');
|
||||
}
|
||||
|
||||
if (
|
||||
'endInput' in this.transport &&
|
||||
typeof this.transport.endInput === 'function'
|
||||
) {
|
||||
(this.transport as TransportWithEndInput).endInput();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Interrupt the current operation
|
||||
*
|
||||
* @throws Error if query is closed
|
||||
*/
|
||||
async interrupt(): Promise<void> {
|
||||
if (this.closed) {
|
||||
throw new Error('Query is closed');
|
||||
}
|
||||
|
||||
await this.sendControlRequest(ControlRequestType.INTERRUPT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the permission mode for tool execution
|
||||
*
|
||||
* @param mode - Permission mode ('default' | 'plan' | 'auto-edit' | 'yolo')
|
||||
* @throws Error if query is closed
|
||||
*/
|
||||
async setPermissionMode(mode: string): Promise<void> {
|
||||
if (this.closed) {
|
||||
throw new Error('Query is closed');
|
||||
}
|
||||
|
||||
await this.sendControlRequest(ControlRequestType.SET_PERMISSION_MODE, {
|
||||
mode,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the model for the current query
|
||||
*
|
||||
* @param model - Model name (e.g., 'qwen-2.5-coder-32b-instruct')
|
||||
* @throws Error if query is closed
|
||||
*/
|
||||
async setModel(model: string): Promise<void> {
|
||||
if (this.closed) {
|
||||
throw new Error('Query is closed');
|
||||
}
|
||||
|
||||
await this.sendControlRequest(ControlRequestType.SET_MODEL, { model });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of control commands supported by the CLI
|
||||
*
|
||||
* @returns Promise resolving to list of supported command names
|
||||
* @throws Error if query is closed
|
||||
*/
|
||||
async supportedCommands(): Promise<Record<string, unknown> | null> {
|
||||
if (this.closed) {
|
||||
throw new Error('Query is closed');
|
||||
}
|
||||
|
||||
return this.sendControlRequest(ControlRequestType.SUPPORTED_COMMANDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the status of MCP servers
|
||||
*
|
||||
* @returns Promise resolving to MCP server status information
|
||||
* @throws Error if query is closed
|
||||
*/
|
||||
async mcpServerStatus(): Promise<Record<string, unknown> | null> {
|
||||
if (this.closed) {
|
||||
throw new Error('Query is closed');
|
||||
}
|
||||
|
||||
return this.sendControlRequest(ControlRequestType.MCP_SERVER_STATUS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the session ID for this query
|
||||
*
|
||||
* @returns UUID session identifier
|
||||
*/
|
||||
getSessionId(): string {
|
||||
return this.sessionId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the query has been closed
|
||||
*
|
||||
* @returns true if query is closed, false otherwise
|
||||
*/
|
||||
isClosed(): boolean {
|
||||
return this.closed;
|
||||
}
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
/**
|
||||
* Factory function for creating Query instances.
|
||||
*/
|
||||
|
||||
import type { CLIUserMessage } from '../types/protocol.js';
|
||||
import { serializeJsonLine } from '../utils/jsonLines.js';
|
||||
import type {
|
||||
CreateQueryOptions,
|
||||
PermissionMode,
|
||||
PermissionCallback,
|
||||
ExternalMcpServerConfig,
|
||||
} from '../types/config.js';
|
||||
import { ProcessTransport } from '../transport/ProcessTransport.js';
|
||||
import { parseExecutableSpec } from '../utils/cliPath.js';
|
||||
import { Query } from './Query.js';
|
||||
|
||||
/**
|
||||
* Configuration options for creating a Query.
|
||||
*/
|
||||
export type QueryOptions = {
|
||||
cwd?: string;
|
||||
model?: string;
|
||||
pathToQwenExecutable?: string;
|
||||
env?: Record<string, string>;
|
||||
permissionMode?: PermissionMode;
|
||||
canUseTool?: PermissionCallback;
|
||||
mcpServers?: Record<string, ExternalMcpServerConfig>;
|
||||
sdkMcpServers?: Record<
|
||||
string,
|
||||
{ connect: (transport: unknown) => Promise<void> }
|
||||
>;
|
||||
abortController?: AbortController;
|
||||
debug?: boolean;
|
||||
stderr?: (message: string) => void;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a Query instance for interacting with the Qwen CLI.
|
||||
*
|
||||
* Supports both single-turn (string) and multi-turn (AsyncIterable) prompts.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const q = query({
|
||||
* prompt: 'What files are in this directory?',
|
||||
* options: { cwd: process.cwd() },
|
||||
* });
|
||||
*
|
||||
* for await (const msg of q) {
|
||||
* if (msg.type === 'assistant') {
|
||||
* console.log(msg.message.content);
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export function query({
|
||||
prompt,
|
||||
options = {},
|
||||
}: {
|
||||
prompt: string | AsyncIterable<CLIUserMessage>;
|
||||
options?: QueryOptions;
|
||||
}): Query {
|
||||
// Validate options and obtain normalized executable metadata
|
||||
const parsedExecutable = validateOptions(options);
|
||||
|
||||
// Determine if this is a single-turn or multi-turn query
|
||||
// Single-turn: string prompt (simple Q&A)
|
||||
// Multi-turn: AsyncIterable prompt (streaming conversation)
|
||||
const isSingleTurn = typeof prompt === 'string';
|
||||
|
||||
// Build CreateQueryOptions
|
||||
const queryOptions: CreateQueryOptions = {
|
||||
...options,
|
||||
singleTurn: isSingleTurn,
|
||||
};
|
||||
|
||||
// Resolve CLI specification while preserving explicit runtime directives
|
||||
const pathToQwenExecutable =
|
||||
options.pathToQwenExecutable ?? parsedExecutable.executablePath;
|
||||
|
||||
// Use provided abortController or create a new one
|
||||
const abortController = options.abortController ?? new AbortController();
|
||||
|
||||
// Create transport with abortController
|
||||
const transport = new ProcessTransport({
|
||||
pathToQwenExecutable,
|
||||
cwd: options.cwd,
|
||||
model: options.model,
|
||||
permissionMode: options.permissionMode,
|
||||
mcpServers: options.mcpServers,
|
||||
env: options.env,
|
||||
abortController,
|
||||
debug: options.debug,
|
||||
stderr: options.stderr,
|
||||
});
|
||||
|
||||
// Build query options with abortController
|
||||
const finalQueryOptions: CreateQueryOptions = {
|
||||
...queryOptions,
|
||||
abortController,
|
||||
};
|
||||
|
||||
// Create Query
|
||||
const queryInstance = new Query(transport, finalQueryOptions);
|
||||
|
||||
// Handle prompt based on type
|
||||
if (isSingleTurn) {
|
||||
// For single-turn queries, send the prompt directly via transport
|
||||
const stringPrompt = prompt as string;
|
||||
const message: CLIUserMessage = {
|
||||
type: 'user',
|
||||
session_id: queryInstance.getSessionId(),
|
||||
message: {
|
||||
role: 'user',
|
||||
content: stringPrompt,
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
};
|
||||
|
||||
(async () => {
|
||||
try {
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
transport.write(serializeJsonLine(message));
|
||||
} catch (err) {
|
||||
console.error('[query] Error sending single-turn prompt:', err);
|
||||
}
|
||||
})();
|
||||
} else {
|
||||
// For multi-turn queries, stream the input
|
||||
queryInstance
|
||||
.streamInput(prompt as AsyncIterable<CLIUserMessage>)
|
||||
.catch((err) => {
|
||||
console.error('[query] Error streaming input:', err);
|
||||
});
|
||||
}
|
||||
|
||||
return queryInstance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Backward compatibility alias
|
||||
* @deprecated Use query() instead
|
||||
*/
|
||||
export const createQuery = query;
|
||||
|
||||
/**
|
||||
* Validate query configuration options and normalize CLI executable details.
|
||||
*
|
||||
* Performs strict validation for each supported option, including
|
||||
* permission mode, callbacks, AbortController usage, and executable spec.
|
||||
* Returns the parsed executable description so callers can retain
|
||||
* explicit runtime directives (e.g., `bun:/path/to/cli.js`) while still
|
||||
* benefiting from early validation and auto-detection fallbacks when the
|
||||
* specification is omitted.
|
||||
*/
|
||||
function validateOptions(
|
||||
options: QueryOptions,
|
||||
): ReturnType<typeof parseExecutableSpec> {
|
||||
let parsedExecutable: ReturnType<typeof parseExecutableSpec>;
|
||||
|
||||
// Validate permission mode if provided
|
||||
if (options.permissionMode) {
|
||||
const validModes = ['default', 'plan', 'auto-edit', 'yolo'];
|
||||
if (!validModes.includes(options.permissionMode)) {
|
||||
throw new Error(
|
||||
`Invalid permissionMode: ${options.permissionMode}. Valid values are: ${validModes.join(', ')}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate canUseTool is a function if provided
|
||||
if (options.canUseTool && typeof options.canUseTool !== 'function') {
|
||||
throw new Error('canUseTool must be a function');
|
||||
}
|
||||
|
||||
// Validate abortController is AbortController if provided
|
||||
if (
|
||||
options.abortController &&
|
||||
!(options.abortController instanceof AbortController)
|
||||
) {
|
||||
throw new Error('abortController must be an AbortController instance');
|
||||
}
|
||||
|
||||
// Validate executable path early to provide clear error messages
|
||||
try {
|
||||
parsedExecutable = parseExecutableSpec(options.pathToQwenExecutable);
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
throw new Error(`Invalid pathToQwenExecutable: ${errorMessage}`);
|
||||
}
|
||||
|
||||
// Validate no MCP server name conflicts
|
||||
if (options.mcpServers && options.sdkMcpServers) {
|
||||
const externalNames = Object.keys(options.mcpServers);
|
||||
const sdkNames = Object.keys(options.sdkMcpServers);
|
||||
|
||||
const conflicts = externalNames.filter((name) => sdkNames.includes(name));
|
||||
if (conflicts.length > 0) {
|
||||
throw new Error(
|
||||
`MCP server name conflicts between mcpServers and sdkMcpServers: ${conflicts.join(', ')}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return parsedExecutable;
|
||||
}
|
||||
@@ -1,480 +0,0 @@
|
||||
/**
|
||||
* ProcessTransport - Subprocess-based transport for SDK-CLI communication
|
||||
*
|
||||
* Manages CLI subprocess lifecycle and provides IPC via stdin/stdout using JSON Lines protocol.
|
||||
*/
|
||||
|
||||
import { spawn, type ChildProcess } from 'node:child_process';
|
||||
import * as readline from 'node:readline';
|
||||
import type { Writable, Readable } from 'node:stream';
|
||||
import type { TransportOptions } from '../types/config.js';
|
||||
import type { Transport } from './Transport.js';
|
||||
import { parseJsonLinesStream } from '../utils/jsonLines.js';
|
||||
import { prepareSpawnInfo } from '../utils/cliPath.js';
|
||||
import { AbortError } from '../types/errors.js';
|
||||
|
||||
/**
|
||||
* Exit listener type
|
||||
*/
|
||||
type ExitListener = {
|
||||
callback: (error?: Error) => void;
|
||||
handler: (code: number | null, signal: NodeJS.Signals | null) => void;
|
||||
};
|
||||
|
||||
/**
|
||||
* ProcessTransport implementation
|
||||
*
|
||||
* Lifecycle:
|
||||
* 1. Created with options
|
||||
* 2. start() spawns subprocess
|
||||
* 3. isReady becomes true
|
||||
* 4. write() sends messages to stdin
|
||||
* 5. readMessages() yields messages from stdout
|
||||
* 6. close() gracefully shuts down (SIGTERM → SIGKILL)
|
||||
* 7. waitForExit() resolves when cleanup complete
|
||||
*/
|
||||
export class ProcessTransport implements Transport {
|
||||
private childProcess: ChildProcess | null = null;
|
||||
private options: TransportOptions;
|
||||
private _isReady = false;
|
||||
private _exitError: Error | null = null;
|
||||
private exitPromise: Promise<void> | null = null;
|
||||
private exitResolve: (() => void) | null = null;
|
||||
private cleanupCallbacks: Array<() => void> = [];
|
||||
private closed = false;
|
||||
private abortController: AbortController | null = null;
|
||||
private exitListeners: ExitListener[] = [];
|
||||
|
||||
constructor(options: TransportOptions) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the transport by spawning CLI subprocess
|
||||
*/
|
||||
async start(): Promise<void> {
|
||||
if (this.childProcess) {
|
||||
return; // Already started
|
||||
}
|
||||
|
||||
// Use provided abortController or create a new one
|
||||
this.abortController =
|
||||
this.options.abortController ?? new AbortController();
|
||||
|
||||
// Check if already aborted
|
||||
if (this.abortController.signal.aborted) {
|
||||
throw new AbortError('Transport start aborted');
|
||||
}
|
||||
|
||||
const cliArgs = this.buildCliArguments();
|
||||
const cwd = this.options.cwd ?? process.cwd();
|
||||
const env = { ...process.env, ...this.options.env };
|
||||
|
||||
// Setup abort handler
|
||||
this.abortController.signal.addEventListener('abort', () => {
|
||||
this.logForDebugging('Transport aborted by user');
|
||||
this._exitError = new AbortError('Operation aborted by user');
|
||||
this._isReady = false;
|
||||
void this.close();
|
||||
});
|
||||
|
||||
// Create exit promise
|
||||
this.exitPromise = new Promise<void>((resolve) => {
|
||||
this.exitResolve = resolve;
|
||||
});
|
||||
|
||||
try {
|
||||
// Detect executable type and prepare spawn info
|
||||
const spawnInfo = prepareSpawnInfo(this.options.pathToQwenExecutable);
|
||||
|
||||
const stderrMode =
|
||||
this.options.debug || this.options.stderr ? 'pipe' : 'ignore';
|
||||
|
||||
this.logForDebugging(
|
||||
`Spawning CLI (${spawnInfo.type}): ${spawnInfo.command} ${[...spawnInfo.args, ...cliArgs].join(' ')}`,
|
||||
);
|
||||
|
||||
// Spawn CLI subprocess with appropriate command and args
|
||||
this.childProcess = spawn(
|
||||
spawnInfo.command,
|
||||
[...spawnInfo.args, ...cliArgs],
|
||||
{
|
||||
cwd,
|
||||
env,
|
||||
stdio: ['pipe', 'pipe', stderrMode],
|
||||
// Use AbortController signal
|
||||
signal: this.abortController.signal,
|
||||
},
|
||||
);
|
||||
|
||||
// Handle stderr for debugging
|
||||
if (this.options.debug || this.options.stderr) {
|
||||
this.childProcess.stderr?.on('data', (data) => {
|
||||
this.logForDebugging(data.toString());
|
||||
});
|
||||
}
|
||||
|
||||
// Setup event handlers
|
||||
this.setupEventHandlers();
|
||||
|
||||
// Mark as ready
|
||||
this._isReady = true;
|
||||
|
||||
// Register cleanup on parent process exit
|
||||
this.registerParentExitHandler();
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
throw new Error(`Failed to spawn CLI process: ${errorMessage}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup event handlers for child process
|
||||
*/
|
||||
private setupEventHandlers(): void {
|
||||
if (!this.childProcess) return;
|
||||
|
||||
// Handle process errors
|
||||
this.childProcess.on('error', (error) => {
|
||||
if (this.abortController?.signal.aborted) {
|
||||
this._exitError = new AbortError('CLI process aborted by user');
|
||||
} else {
|
||||
this._exitError = new Error(`CLI process error: ${error.message}`);
|
||||
}
|
||||
this._isReady = false;
|
||||
this.logForDebugging(`Process error: ${error.message}`);
|
||||
});
|
||||
|
||||
// Handle process exit
|
||||
this.childProcess.on('exit', (code, signal) => {
|
||||
this._isReady = false;
|
||||
|
||||
// Check if aborted
|
||||
if (this.abortController?.signal.aborted) {
|
||||
this._exitError = new AbortError('CLI process aborted by user');
|
||||
} else if (code !== null && code !== 0 && !this.closed) {
|
||||
this._exitError = new Error(`CLI process exited with code ${code}`);
|
||||
this.logForDebugging(`Process exited with code ${code}`);
|
||||
} else if (signal && !this.closed) {
|
||||
this._exitError = new Error(`CLI process killed by signal ${signal}`);
|
||||
this.logForDebugging(`Process killed by signal ${signal}`);
|
||||
}
|
||||
|
||||
// Notify exit listeners
|
||||
const error = this._exitError;
|
||||
for (const listener of this.exitListeners) {
|
||||
try {
|
||||
listener.callback(error || undefined);
|
||||
} catch (err) {
|
||||
this.logForDebugging(`Exit listener error: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve exit promise
|
||||
if (this.exitResolve) {
|
||||
this.exitResolve();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Register cleanup handler on parent process exit
|
||||
*/
|
||||
private registerParentExitHandler(): void {
|
||||
const cleanup = (): void => {
|
||||
if (this.childProcess && !this.childProcess.killed) {
|
||||
this.childProcess.kill('SIGKILL');
|
||||
}
|
||||
};
|
||||
|
||||
process.on('exit', cleanup);
|
||||
this.cleanupCallbacks.push(() => {
|
||||
process.off('exit', cleanup);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Build CLI command-line arguments
|
||||
*/
|
||||
private buildCliArguments(): string[] {
|
||||
const args: string[] = [
|
||||
'--input-format',
|
||||
'stream-json',
|
||||
'--output-format',
|
||||
'stream-json',
|
||||
];
|
||||
|
||||
// Add model if specified
|
||||
if (this.options.model) {
|
||||
args.push('--model', this.options.model);
|
||||
}
|
||||
|
||||
// Add permission mode if specified
|
||||
if (this.options.permissionMode) {
|
||||
args.push('--approval-mode', this.options.permissionMode);
|
||||
}
|
||||
|
||||
// Add MCP servers if specified
|
||||
if (this.options.mcpServers) {
|
||||
for (const [name, config] of Object.entries(this.options.mcpServers)) {
|
||||
args.push('--mcp-server', JSON.stringify({ name, ...config }));
|
||||
}
|
||||
}
|
||||
|
||||
return args;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the transport gracefully
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
if (this.closed || !this.childProcess) {
|
||||
return; // Already closed or never started
|
||||
}
|
||||
|
||||
this.closed = true;
|
||||
this._isReady = false;
|
||||
|
||||
// Clean up exit listeners
|
||||
for (const { handler } of this.exitListeners) {
|
||||
this.childProcess?.off('exit', handler);
|
||||
}
|
||||
this.exitListeners = [];
|
||||
|
||||
// Send SIGTERM for graceful shutdown
|
||||
this.childProcess.kill('SIGTERM');
|
||||
|
||||
// Wait 5 seconds, then force kill if still alive
|
||||
const forceKillTimeout = setTimeout(() => {
|
||||
if (this.childProcess && !this.childProcess.killed) {
|
||||
this.childProcess.kill('SIGKILL');
|
||||
}
|
||||
}, 5000);
|
||||
|
||||
// Wait for exit
|
||||
await this.waitForExit();
|
||||
|
||||
// Clear timeout
|
||||
clearTimeout(forceKillTimeout);
|
||||
|
||||
// Run cleanup callbacks
|
||||
for (const callback of this.cleanupCallbacks) {
|
||||
callback();
|
||||
}
|
||||
this.cleanupCallbacks = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for process to fully exit
|
||||
*/
|
||||
async waitForExit(): Promise<void> {
|
||||
if (this.exitPromise) {
|
||||
await this.exitPromise;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a message to stdin
|
||||
*/
|
||||
write(message: string): void {
|
||||
// Check abort status
|
||||
if (this.abortController?.signal.aborted) {
|
||||
throw new AbortError('Cannot write: operation aborted');
|
||||
}
|
||||
|
||||
if (!this._isReady || !this.childProcess?.stdin) {
|
||||
throw new Error('Transport not ready for writing');
|
||||
}
|
||||
|
||||
if (this.closed) {
|
||||
throw new Error('Cannot write to closed transport');
|
||||
}
|
||||
|
||||
if (this.childProcess?.killed || this.childProcess?.exitCode !== null) {
|
||||
throw new Error('Cannot write to terminated process');
|
||||
}
|
||||
|
||||
if (this._exitError) {
|
||||
throw new Error(
|
||||
`Cannot write to process that exited with error: ${this._exitError.message}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (process.env['DEBUG_SDK']) {
|
||||
this.logForDebugging(
|
||||
`[ProcessTransport] Writing to stdin: ${message.substring(0, 100)}`,
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
const written = this.childProcess.stdin.write(message + '\n', (err) => {
|
||||
if (err) {
|
||||
throw new Error(`Failed to write to stdin: ${err.message}`);
|
||||
}
|
||||
});
|
||||
if (!written && process.env['DEBUG_SDK']) {
|
||||
this.logForDebugging(
|
||||
'[ProcessTransport] Write buffer full, data queued',
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
this._isReady = false;
|
||||
throw new Error(
|
||||
`Failed to write to stdin: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read messages from stdout as async generator
|
||||
*/
|
||||
async *readMessages(): AsyncGenerator<unknown, void, unknown> {
|
||||
if (!this.childProcess?.stdout) {
|
||||
throw new Error('Cannot read messages: process not started');
|
||||
}
|
||||
|
||||
const rl = readline.createInterface({
|
||||
input: this.childProcess.stdout,
|
||||
crlfDelay: Infinity,
|
||||
});
|
||||
|
||||
try {
|
||||
// Use JSON Lines parser
|
||||
for await (const message of parseJsonLinesStream(
|
||||
rl,
|
||||
'ProcessTransport',
|
||||
)) {
|
||||
yield message;
|
||||
}
|
||||
|
||||
await this.waitForExit();
|
||||
} finally {
|
||||
rl.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if transport is ready for I/O
|
||||
*/
|
||||
get isReady(): boolean {
|
||||
return this._isReady;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get exit error (if any)
|
||||
*/
|
||||
get exitError(): Error | null {
|
||||
return this._exitError;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get child process (for testing)
|
||||
*/
|
||||
get process(): ChildProcess | null {
|
||||
return this.childProcess;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get path to qwen executable
|
||||
*/
|
||||
get pathToQwenExecutable(): string {
|
||||
return this.options.pathToQwenExecutable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get CLI arguments
|
||||
*/
|
||||
get cliArgs(): readonly string[] {
|
||||
return this.buildCliArguments();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get working directory
|
||||
*/
|
||||
get cwd(): string {
|
||||
return this.options.cwd ?? process.cwd();
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a callback to be invoked when the process exits
|
||||
*
|
||||
* @param callback - Function to call on exit, receives error if abnormal exit
|
||||
* @returns Cleanup function to remove the listener
|
||||
*/
|
||||
onExit(callback: (error?: Error) => void): () => void {
|
||||
if (!this.childProcess) {
|
||||
return () => {}; // No-op if process not started
|
||||
}
|
||||
|
||||
const handler = (code: number | null, signal: NodeJS.Signals | null) => {
|
||||
let error: Error | undefined;
|
||||
|
||||
if (this.abortController?.signal.aborted) {
|
||||
error = new AbortError('Process aborted by user');
|
||||
} else if (code !== null && code !== 0) {
|
||||
error = new Error(`Process exited with code ${code}`);
|
||||
} else if (signal) {
|
||||
error = new Error(`Process killed by signal ${signal}`);
|
||||
}
|
||||
|
||||
callback(error);
|
||||
};
|
||||
|
||||
this.childProcess.on('exit', handler);
|
||||
this.exitListeners.push({ callback, handler });
|
||||
|
||||
// Return cleanup function
|
||||
return () => {
|
||||
if (this.childProcess) {
|
||||
this.childProcess.off('exit', handler);
|
||||
}
|
||||
const index = this.exitListeners.findIndex((l) => l.handler === handler);
|
||||
if (index !== -1) {
|
||||
this.exitListeners.splice(index, 1);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* End input stream (close stdin)
|
||||
* Useful when you want to signal no more input will be sent
|
||||
*/
|
||||
endInput(): void {
|
||||
if (this.childProcess?.stdin) {
|
||||
this.childProcess.stdin.end();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get direct access to stdin stream
|
||||
* Use with caution - prefer write() method for normal use
|
||||
*
|
||||
* @returns Writable stream for stdin, or undefined if not available
|
||||
*/
|
||||
getInputStream(): Writable | undefined {
|
||||
return this.childProcess?.stdin || undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get direct access to stdout stream
|
||||
* Use with caution - prefer readMessages() for normal use
|
||||
*
|
||||
* @returns Readable stream for stdout, or undefined if not available
|
||||
*/
|
||||
getOutputStream(): Readable | undefined {
|
||||
return this.childProcess?.stdout || undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log message for debugging (if debug enabled)
|
||||
*/
|
||||
private logForDebugging(message: string): void {
|
||||
if (this.options.debug || process.env['DEBUG']) {
|
||||
process.stderr.write(`[ProcessTransport] ${message}\n`);
|
||||
}
|
||||
if (this.options.stderr) {
|
||||
this.options.stderr(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
/**
|
||||
* Transport interface for SDK-CLI communication
|
||||
*
|
||||
* The Transport abstraction enables communication between SDK and CLI via different mechanisms:
|
||||
* - ProcessTransport: Local subprocess via stdin/stdout (initial implementation)
|
||||
* - HttpTransport: Remote CLI via HTTP (future)
|
||||
* - WebSocketTransport: Remote CLI via WebSocket (future)
|
||||
*/
|
||||
|
||||
/**
|
||||
* Abstract Transport interface
|
||||
*
|
||||
* Provides bidirectional communication with lifecycle management.
|
||||
* Implements async generator pattern for reading messages with automatic backpressure.
|
||||
*/
|
||||
export interface Transport {
|
||||
/**
|
||||
* Initialize and start the transport.
|
||||
*
|
||||
* For ProcessTransport: spawns CLI subprocess
|
||||
* For HttpTransport: establishes HTTP connection
|
||||
* For WebSocketTransport: opens WebSocket connection
|
||||
*
|
||||
* Must be called before write() or readMessages().
|
||||
*
|
||||
* @throws Error if transport cannot be started
|
||||
*/
|
||||
start(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Close the transport gracefully.
|
||||
*
|
||||
* For ProcessTransport: sends SIGTERM, waits 5s, then SIGKILL
|
||||
* For HttpTransport: sends close request, closes connection
|
||||
* For WebSocketTransport: sends close frame
|
||||
*
|
||||
* Idempotent - safe to call multiple times.
|
||||
*/
|
||||
close(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Wait for transport to fully exit and cleanup.
|
||||
*
|
||||
* Resolves when all resources are cleaned up:
|
||||
* - Process has exited (ProcessTransport)
|
||||
* - Connection is closed (Http/WebSocketTransport)
|
||||
* - All cleanup callbacks have run
|
||||
*
|
||||
* @returns Promise that resolves when exit is complete
|
||||
*/
|
||||
waitForExit(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Write a message to the transport.
|
||||
*
|
||||
* For ProcessTransport: writes to stdin
|
||||
* For HttpTransport: sends HTTP request
|
||||
* For WebSocketTransport: sends WebSocket message
|
||||
*
|
||||
* Message format: JSON Lines (one JSON object per line)
|
||||
*
|
||||
* @param message - Serialized JSON message (without trailing newline)
|
||||
* @throws Error if transport is not ready or closed
|
||||
*/
|
||||
write(message: string): void;
|
||||
|
||||
/**
|
||||
* Read messages from transport as async generator.
|
||||
*
|
||||
* Yields messages as they arrive, supporting natural backpressure via async iteration.
|
||||
* Generator completes when transport closes.
|
||||
*
|
||||
* For ProcessTransport: reads from stdout using readline
|
||||
* For HttpTransport: reads from chunked HTTP response
|
||||
* For WebSocketTransport: reads from WebSocket messages
|
||||
*
|
||||
* Message format: JSON Lines (one JSON object per line)
|
||||
* Malformed JSON lines are logged and skipped.
|
||||
*
|
||||
* @yields Parsed JSON messages
|
||||
* @throws Error if transport encounters fatal error
|
||||
*/
|
||||
readMessages(): AsyncGenerator<unknown, void, unknown>;
|
||||
|
||||
/**
|
||||
* Whether transport is ready for I/O operations.
|
||||
*
|
||||
* true: write() and readMessages() can be called
|
||||
* false: transport not started or has failed
|
||||
*/
|
||||
readonly isReady: boolean;
|
||||
|
||||
/**
|
||||
* Error that caused transport to exit unexpectedly (if any).
|
||||
*
|
||||
* null: transport exited normally or still running
|
||||
* Error: transport failed with this error
|
||||
*
|
||||
* Useful for diagnostics when transport closes unexpectedly.
|
||||
*/
|
||||
readonly exitError: Error | null;
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
/**
|
||||
* Configuration types for SDK
|
||||
*/
|
||||
|
||||
import type { ToolDefinition as ToolDef } from './mcp.js';
|
||||
import type { PermissionMode } from './protocol.js';
|
||||
|
||||
export type { ToolDef as ToolDefinition };
|
||||
export type { PermissionMode };
|
||||
|
||||
/**
|
||||
* Permission callback function
|
||||
* Called before each tool execution to determine if it should be allowed
|
||||
*
|
||||
* @param toolName - Name of the tool being executed
|
||||
* @param input - Input parameters for the tool
|
||||
* @param options - Additional options (signal for cancellation, suggestions)
|
||||
* @returns Promise<boolean|unknown> or boolean|unknown - true to allow, false to deny, or custom response
|
||||
*/
|
||||
export type PermissionCallback = (
|
||||
toolName: string,
|
||||
input: Record<string, unknown>,
|
||||
options?: {
|
||||
signal?: AbortSignal;
|
||||
suggestions?: unknown;
|
||||
},
|
||||
) => Promise<boolean | unknown> | boolean | unknown;
|
||||
|
||||
/**
|
||||
* Hook callback function
|
||||
* Called at specific points in tool execution lifecycle
|
||||
*
|
||||
* @param input - Hook input data
|
||||
* @param toolUseId - Tool execution ID (null if not associated with a tool)
|
||||
* @param options - Options including abort signal
|
||||
* @returns Promise with hook result
|
||||
*/
|
||||
export type HookCallback = (
|
||||
input: unknown,
|
||||
toolUseId: string | null,
|
||||
options: { signal: AbortSignal },
|
||||
) => Promise<unknown>;
|
||||
|
||||
/**
|
||||
* Hook matcher configuration
|
||||
*/
|
||||
export interface HookMatcher {
|
||||
matcher: Record<string, unknown>;
|
||||
hooks: HookCallback[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook configuration by event type
|
||||
*/
|
||||
export type HookConfig = {
|
||||
[event: string]: HookMatcher[];
|
||||
};
|
||||
|
||||
/**
|
||||
* External MCP server configuration (spawned by CLI)
|
||||
*/
|
||||
export type ExternalMcpServerConfig = {
|
||||
/** Command to execute (e.g., 'mcp-server-filesystem') */
|
||||
command: string;
|
||||
/** Command-line arguments */
|
||||
args?: string[];
|
||||
/** Environment variables */
|
||||
env?: Record<string, string>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Options for creating a Query instance
|
||||
*/
|
||||
export type CreateQueryOptions = {
|
||||
// Basic configuration
|
||||
/** Working directory for CLI execution */
|
||||
cwd?: string;
|
||||
/** Model name (e.g., 'qwen-2.5-coder-32b-instruct') */
|
||||
model?: string;
|
||||
|
||||
// Transport configuration
|
||||
/** Path to qwen executable (auto-detected if omitted) */
|
||||
pathToQwenExecutable?: string;
|
||||
/** Environment variables for CLI process */
|
||||
env?: Record<string, string>;
|
||||
|
||||
// Permission control
|
||||
/** Permission mode ('default' | 'plan' | 'auto-edit' | 'yolo') */
|
||||
permissionMode?: PermissionMode;
|
||||
/** Callback invoked before each tool execution */
|
||||
canUseTool?: PermissionCallback;
|
||||
|
||||
// Hook system
|
||||
/** Hook configuration for tool execution lifecycle */
|
||||
hooks?: HookConfig;
|
||||
|
||||
// MCP server configuration
|
||||
/** External MCP servers (spawned by CLI) */
|
||||
mcpServers?: Record<string, ExternalMcpServerConfig>;
|
||||
/** SDK-embedded MCP servers (run in Node.js process) */
|
||||
sdkMcpServers?: Record<
|
||||
string,
|
||||
{ connect: (transport: unknown) => Promise<void> }
|
||||
>; // Server from @modelcontextprotocol/sdk
|
||||
|
||||
// Conversation mode
|
||||
/**
|
||||
* Single-turn mode: automatically close input after receiving result
|
||||
* Multi-turn mode: keep input open for follow-up messages
|
||||
* @default false (multi-turn)
|
||||
*/
|
||||
singleTurn?: boolean;
|
||||
|
||||
// Advanced options
|
||||
/** AbortController for cancellation support */
|
||||
abortController?: AbortController;
|
||||
/** Enable debug output (inherits stderr) */
|
||||
debug?: boolean;
|
||||
/** Callback for stderr output */
|
||||
stderr?: (message: string) => void;
|
||||
};
|
||||
|
||||
/**
|
||||
* Transport options for ProcessTransport
|
||||
*/
|
||||
export type TransportOptions = {
|
||||
/** Path to qwen executable */
|
||||
pathToQwenExecutable: string;
|
||||
/** Working directory for CLI execution */
|
||||
cwd?: string;
|
||||
/** Model name */
|
||||
model?: string;
|
||||
/** Permission mode */
|
||||
permissionMode?: PermissionMode;
|
||||
/** External MCP servers */
|
||||
mcpServers?: Record<string, ExternalMcpServerConfig>;
|
||||
/** Environment variables */
|
||||
env?: Record<string, string>;
|
||||
/** AbortController for cancellation support */
|
||||
abortController?: AbortController;
|
||||
/** Enable debug output */
|
||||
debug?: boolean;
|
||||
/** Callback for stderr output */
|
||||
stderr?: (message: string) => void;
|
||||
};
|
||||
@@ -1,50 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Control Request Types
|
||||
*
|
||||
* Centralized enum for all control request subtypes supported by the CLI.
|
||||
* This enum should be kept in sync with the controllers in:
|
||||
* - packages/cli/src/services/control/controllers/systemController.ts
|
||||
* - packages/cli/src/services/control/controllers/permissionController.ts
|
||||
* - packages/cli/src/services/control/controllers/mcpController.ts
|
||||
* - packages/cli/src/services/control/controllers/hookController.ts
|
||||
*/
|
||||
export enum ControlRequestType {
|
||||
// SystemController requests
|
||||
INITIALIZE = 'initialize',
|
||||
INTERRUPT = 'interrupt',
|
||||
SET_MODEL = 'set_model',
|
||||
SUPPORTED_COMMANDS = 'supported_commands',
|
||||
|
||||
// PermissionController requests
|
||||
CAN_USE_TOOL = 'can_use_tool',
|
||||
SET_PERMISSION_MODE = 'set_permission_mode',
|
||||
|
||||
// MCPController requests
|
||||
MCP_MESSAGE = 'mcp_message',
|
||||
MCP_SERVER_STATUS = 'mcp_server_status',
|
||||
|
||||
// HookController requests
|
||||
HOOK_CALLBACK = 'hook_callback',
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available control request types as a string array
|
||||
*/
|
||||
export function getAllControlRequestTypes(): string[] {
|
||||
return Object.values(ControlRequestType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a string is a valid control request type
|
||||
*/
|
||||
export function isValidControlRequestType(
|
||||
type: string,
|
||||
): type is ControlRequestType {
|
||||
return getAllControlRequestTypes().includes(type);
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
/**
|
||||
* Error types for SDK
|
||||
*/
|
||||
|
||||
/**
|
||||
* Error thrown when an operation is aborted via AbortSignal
|
||||
*/
|
||||
export class AbortError extends Error {
|
||||
constructor(message = 'Operation aborted') {
|
||||
super(message);
|
||||
this.name = 'AbortError';
|
||||
Object.setPrototypeOf(this, AbortError.prototype);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is an AbortError
|
||||
*/
|
||||
export function isAbortError(error: unknown): error is AbortError {
|
||||
return (
|
||||
error instanceof AbortError ||
|
||||
(typeof error === 'object' &&
|
||||
error !== null &&
|
||||
'name' in error &&
|
||||
error.name === 'AbortError')
|
||||
);
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
/**
|
||||
* MCP integration types for SDK
|
||||
*/
|
||||
|
||||
/**
|
||||
* JSON Schema definition
|
||||
* Used for tool input validation
|
||||
*/
|
||||
export type JSONSchema = {
|
||||
type: string;
|
||||
properties?: Record<string, unknown>;
|
||||
required?: string[];
|
||||
description?: string;
|
||||
[key: string]: unknown;
|
||||
};
|
||||
|
||||
/**
|
||||
* Tool definition for SDK-embedded MCP servers
|
||||
*
|
||||
* @template TInput - Type of tool input (inferred from handler)
|
||||
* @template TOutput - Type of tool output (inferred from handler return)
|
||||
*/
|
||||
export type ToolDefinition<TInput = unknown, TOutput = unknown> = {
|
||||
/** Unique tool name */
|
||||
name: string;
|
||||
/** Human-readable description (helps agent decide when to use it) */
|
||||
description: string;
|
||||
/** JSON Schema for input validation */
|
||||
inputSchema: JSONSchema;
|
||||
/** Async handler function that executes the tool */
|
||||
handler: (input: TInput) => Promise<TOutput>;
|
||||
};
|
||||
@@ -1,50 +0,0 @@
|
||||
/**
|
||||
* Protocol types for SDK-CLI communication
|
||||
*
|
||||
* Re-exports protocol types from CLI package to ensure SDK and CLI use identical types.
|
||||
*/
|
||||
|
||||
export type {
|
||||
ContentBlock,
|
||||
TextBlock,
|
||||
ThinkingBlock,
|
||||
ToolUseBlock,
|
||||
ToolResultBlock,
|
||||
CLIUserMessage,
|
||||
CLIAssistantMessage,
|
||||
CLISystemMessage,
|
||||
CLIResultMessage,
|
||||
CLIPartialAssistantMessage,
|
||||
CLIMessage,
|
||||
PermissionMode,
|
||||
PermissionSuggestion,
|
||||
PermissionApproval,
|
||||
HookRegistration,
|
||||
CLIControlInterruptRequest,
|
||||
CLIControlPermissionRequest,
|
||||
CLIControlInitializeRequest,
|
||||
CLIControlSetPermissionModeRequest,
|
||||
CLIHookCallbackRequest,
|
||||
CLIControlMcpMessageRequest,
|
||||
CLIControlSetModelRequest,
|
||||
CLIControlMcpStatusRequest,
|
||||
CLIControlSupportedCommandsRequest,
|
||||
ControlRequestPayload,
|
||||
CLIControlRequest,
|
||||
ControlResponse,
|
||||
ControlErrorResponse,
|
||||
CLIControlResponse,
|
||||
ControlCancelRequest,
|
||||
ControlMessage,
|
||||
} from '@qwen-code/qwen-code/protocol';
|
||||
|
||||
export {
|
||||
isCLIUserMessage,
|
||||
isCLIAssistantMessage,
|
||||
isCLISystemMessage,
|
||||
isCLIResultMessage,
|
||||
isCLIPartialAssistantMessage,
|
||||
isControlRequest,
|
||||
isControlResponse,
|
||||
isControlCancel,
|
||||
} from '@qwen-code/qwen-code/protocol';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user