mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-23 10:17:50 +00:00
Compare commits
25 Commits
adjust-doc
...
v0.1.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
908ac5e1b0 | ||
|
|
ea4a7a2368 | ||
|
|
5386099559 | ||
|
|
495a9d6d92 | ||
|
|
db58aaff3a | ||
|
|
817218f1cf | ||
|
|
7843de882a | ||
|
|
ced79cf4e3 | ||
|
|
33e22713a0 | ||
|
|
92245f0f00 | ||
|
|
4f35f7431a | ||
|
|
84957bbb50 | ||
|
|
c1164bdd7e | ||
|
|
f8be8a61c8 | ||
|
|
c884dc080b | ||
|
|
32a71986d5 | ||
|
|
6da6bc0dfd | ||
|
|
7ccba75621 | ||
|
|
e0e5fa5084 | ||
|
|
65cf80f4ab | ||
|
|
1577dabf41 | ||
|
|
4328cd7f63 | ||
|
|
2a5577e5d7 | ||
|
|
be633a80cc | ||
|
|
5cf609c367 |
52
.github/workflows/release.yml
vendored
52
.github/workflows/release.yml
vendored
@@ -101,15 +101,27 @@ jobs:
|
||||
- name: 'Get the version'
|
||||
id: 'version'
|
||||
run: |
|
||||
VERSION_JSON=$(node scripts/get-release-version.js)
|
||||
VERSION_ARGS=()
|
||||
if [[ "${IS_NIGHTLY}" == "true" ]]; then
|
||||
VERSION_ARGS+=(--type=nightly)
|
||||
elif [[ "${IS_PREVIEW}" == "true" ]]; then
|
||||
VERSION_ARGS+=(--type=preview)
|
||||
if [[ -n "${MANUAL_VERSION}" ]]; then
|
||||
VERSION_ARGS+=("--preview_version_override=${MANUAL_VERSION}")
|
||||
fi
|
||||
else
|
||||
VERSION_ARGS+=(--type=stable)
|
||||
if [[ -n "${MANUAL_VERSION}" ]]; then
|
||||
VERSION_ARGS+=("--stable_version_override=${MANUAL_VERSION}")
|
||||
fi
|
||||
fi
|
||||
|
||||
VERSION_JSON=$(node scripts/get-release-version.js "${VERSION_ARGS[@]}")
|
||||
echo "RELEASE_TAG=$(echo "$VERSION_JSON" | jq -r .releaseTag)" >> "$GITHUB_OUTPUT"
|
||||
echo "RELEASE_VERSION=$(echo "$VERSION_JSON" | jq -r .releaseVersion)" >> "$GITHUB_OUTPUT"
|
||||
echo "NPM_TAG=$(echo "$VERSION_JSON" | jq -r .npmTag)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Get the previous tag for release notes generation
|
||||
CURRENT_TAG=$(echo "$VERSION_JSON" | jq -r .releaseTag)
|
||||
PREVIOUS_TAG=$(node scripts/get-previous-tag.js "$CURRENT_TAG" || echo "")
|
||||
echo "PREVIOUS_TAG=${PREVIOUS_TAG}" >> "$GITHUB_OUTPUT"
|
||||
echo "PREVIOUS_RELEASE_TAG=$(echo "$VERSION_JSON" | jq -r .previousReleaseTag)" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
|
||||
@@ -155,7 +167,11 @@ jobs:
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
git add package.json package-lock.json packages/*/package.json
|
||||
git commit -m "chore(release): ${RELEASE_TAG}"
|
||||
if git diff --staged --quiet; then
|
||||
echo "No version changes to commit"
|
||||
else
|
||||
git commit -m "chore(release): ${RELEASE_TAG}"
|
||||
fi
|
||||
if [[ "${IS_DRY_RUN}" == "false" ]]; then
|
||||
echo "Pushing release branch to remote..."
|
||||
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
|
||||
@@ -163,9 +179,9 @@ jobs:
|
||||
echo "Dry run enabled. Skipping push."
|
||||
fi
|
||||
|
||||
- name: 'Build and Prepare Packages'
|
||||
- name: 'Build Bundle and Prepare Package'
|
||||
run: |-
|
||||
npm run build:packages
|
||||
npm run bundle
|
||||
npm run prepare:package
|
||||
|
||||
- name: 'Configure npm for publishing'
|
||||
@@ -175,20 +191,10 @@ jobs:
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
scope: '@qwen-code'
|
||||
|
||||
- name: 'Publish @qwen-code/qwen-code-core'
|
||||
run: |-
|
||||
npm publish --workspace=@qwen-code/qwen-code-core --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
|
||||
|
||||
- name: 'Install latest core package'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
run: 'npm install @qwen-code/qwen-code-core@${{ steps.version.outputs.RELEASE_VERSION }} --workspace=@qwen-code/qwen-code --save-exact'
|
||||
|
||||
- name: 'Publish @qwen-code/qwen-code'
|
||||
working-directory: 'dist'
|
||||
run: |-
|
||||
npm publish --workspace=@qwen-code/qwen-code --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
|
||||
|
||||
@@ -199,13 +205,13 @@ jobs:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
PREVIOUS_TAG: '${{ steps.version.outputs.PREVIOUS_TAG }}'
|
||||
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
|
||||
run: |-
|
||||
gh release create "${RELEASE_TAG}" \
|
||||
bundle/gemini.js \
|
||||
dist/cli.js \
|
||||
--target "$RELEASE_BRANCH" \
|
||||
--title "Release ${RELEASE_TAG}" \
|
||||
--notes-start-tag "$PREVIOUS_TAG" \
|
||||
--notes-start-tag "$PREVIOUS_RELEASE_TAG" \
|
||||
--generate-notes
|
||||
|
||||
- name: 'Create Issue on Failure'
|
||||
|
||||
@@ -66,17 +66,6 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
|
||||
- **Usage:** `/directory show`
|
||||
|
||||
- **`/directory`** (or **`/dir`**)
|
||||
- **Description:** Manage workspace directories for multi-directory support.
|
||||
- **Sub-commands:**
|
||||
- **`add`**:
|
||||
- **Description:** Add a directory to the workspace. The path can be absolute or relative to the current working directory. Moreover, the reference from home directory is supported as well.
|
||||
- **Usage:** `/directory add <path1>,<path2>`
|
||||
- **Note:** Disabled in restrictive sandbox profiles. If you're using that, use `--include-directories` when starting the session instead.
|
||||
- **`show`**:
|
||||
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
|
||||
- **Usage:** `/directory show`
|
||||
|
||||
- **`/editor`**
|
||||
- **Description:** Open a dialog for selecting supported editors.
|
||||
|
||||
@@ -108,6 +97,20 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Description:** Reload the hierarchical instructional memory from all context files (default: `QWEN.md`) found in the configured locations (global, project/ancestors, and sub-directories). This updates the model with the latest context content.
|
||||
- **Note:** For more details on how context files contribute to hierarchical memory, see the [CLI Configuration documentation](./configuration.md#context-files-hierarchical-instructional-context).
|
||||
|
||||
- **`/model`**
|
||||
- **Description:** Switch the model for the current session. Opens a dialog to select from available models based on your authentication type.
|
||||
- **Usage:** `/model`
|
||||
- **Features:**
|
||||
- Shows a dialog with all available models for your current authentication type
|
||||
- Displays model descriptions and capabilities (e.g., vision support)
|
||||
- Changes the model for the current session only
|
||||
- Supports both Qwen models (via OAuth) and OpenAI models (via API key)
|
||||
- **Available Models:**
|
||||
- **Qwen Coder:** The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)
|
||||
- **Qwen Vision:** The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23) - supports image analysis
|
||||
- **OpenAI Models:** Available when using OpenAI authentication (configured via `OPENAI_MODEL` environment variable)
|
||||
- **Note:** Model selection is session-specific and does not persist across different Qwen Code sessions. To set a default model, use the `model.name` setting in your configuration.
|
||||
|
||||
- **`/restore`**
|
||||
- **Description:** Restores the project files to the state they were in just before a tool was executed. This is particularly useful for undoing file edits made by a tool. If run without a tool call ID, it will list available checkpoints to restore from.
|
||||
- **Usage:** `/restore [tool_call_id]`
|
||||
|
||||
@@ -246,6 +246,14 @@ Settings are organized into categories. All settings should be placed within the
|
||||
- It must return function output as JSON on `stdout`, analogous to [`functionResponse.response.content`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functionresponse).
|
||||
- **Default:** `undefined`
|
||||
|
||||
- **`tools.useRipgrep`** (boolean):
|
||||
- **Description:** Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.
|
||||
- **Default:** `true`
|
||||
|
||||
- **`tools.useBuiltinRipgrep`** (boolean):
|
||||
- **Description:** Use the bundled ripgrep binary. When set to `false`, the system-level `rg` command will be used instead. This setting is only effective when `tools.useRipgrep` is `true`.
|
||||
- **Default:** `true`
|
||||
|
||||
#### `mcp`
|
||||
|
||||
- **`mcp.serverCommand`** (string):
|
||||
|
||||
@@ -107,7 +107,7 @@ The `qwen-extension.json` file contains the configuration for the extension. The
|
||||
- `mcpServers`: A map of MCP servers to configure. The key is the name of the server, and the value is the server configuration. These servers will be loaded on startup just like MCP servers configured in a [`settings.json` file](./cli/configuration.md). If both an extension and a `settings.json` file configure an MCP server with the same name, the server defined in the `settings.json` file takes precedence.
|
||||
- Note that all MCP server configuration options are supported except for `trust`.
|
||||
- `contextFileName`: The name of the file that contains the context for the extension. This will be used to load the context from the extension directory. If this property is not used but a `QWEN.md` file is present in your extension directory, then that file will be loaded.
|
||||
- `excludeTools`: An array of tool names to exclude from the model. You can also specify command-specific restrictions for tools that support it, like the `run_shell_command` tool. For example, `"excludeTools": ["run_shell_command(rm -rf)"]` will block the `rm -rf` command. Note that this differs from the MCP server `excludeTools` functionality, which can be listed in the MCP server config.
|
||||
- `excludeTools`: An array of tool names to exclude from the model. You can also specify command-specific restrictions for tools that support it, like the `run_shell_command` tool. For example, `"excludeTools": ["run_shell_command(rm -rf)"]` will block the `rm -rf` command. Note that this differs from the MCP server `excludeTools` functionality, which can be listed in the MCP server config. **Important:** Tools specified in `excludeTools` will be disabled for the entire conversation context and will affect all subsequent queries in the current session.
|
||||
|
||||
When Qwen Code starts, it loads all the extensions and merges their configurations. If there are any conflicts, the workspace configuration takes precedence.
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ export default {
|
||||
subagents: 'Subagents',
|
||||
checkpointing: 'Checkpointing',
|
||||
sandbox: 'Sandbox Support',
|
||||
'headless-mode': 'Headless Mode',
|
||||
headless: 'Headless Mode',
|
||||
'welcome-back': 'Welcome Back',
|
||||
'token-caching': 'Token Caching',
|
||||
};
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { createRequire } from 'node:module';
|
||||
import { writeFileSync } from 'node:fs';
|
||||
import { writeFileSync, rmSync } from 'node:fs';
|
||||
|
||||
let esbuild;
|
||||
try {
|
||||
@@ -22,6 +22,9 @@ const __dirname = path.dirname(__filename);
|
||||
const require = createRequire(import.meta.url);
|
||||
const pkg = require(path.resolve(__dirname, 'package.json'));
|
||||
|
||||
// Clean dist directory (cross-platform)
|
||||
rmSync(path.resolve(__dirname, 'dist'), { recursive: true, force: true });
|
||||
|
||||
const external = [
|
||||
'@lydell/node-pty',
|
||||
'node-pty',
|
||||
@@ -30,16 +33,24 @@ const external = [
|
||||
'@lydell/node-pty-linux-x64',
|
||||
'@lydell/node-pty-win32-arm64',
|
||||
'@lydell/node-pty-win32-x64',
|
||||
'tiktoken',
|
||||
];
|
||||
|
||||
esbuild
|
||||
.build({
|
||||
entryPoints: ['packages/cli/index.ts'],
|
||||
bundle: true,
|
||||
outfile: 'bundle/gemini.js',
|
||||
outfile: 'dist/cli.js',
|
||||
platform: 'node',
|
||||
format: 'esm',
|
||||
target: 'node20',
|
||||
external,
|
||||
packages: 'bundle',
|
||||
inject: [path.resolve(__dirname, 'scripts/esbuild-shims.js')],
|
||||
banner: {
|
||||
js: `// Force strict mode and setup for ESM
|
||||
"use strict";`,
|
||||
},
|
||||
alias: {
|
||||
'is-in-ci': path.resolve(
|
||||
__dirname,
|
||||
@@ -48,17 +59,20 @@ esbuild
|
||||
},
|
||||
define: {
|
||||
'process.env.CLI_VERSION': JSON.stringify(pkg.version),
|
||||
},
|
||||
banner: {
|
||||
js: `import { createRequire } from 'module'; const require = createRequire(import.meta.url); globalThis.__filename = require('url').fileURLToPath(import.meta.url); globalThis.__dirname = require('path').dirname(globalThis.__filename);`,
|
||||
// Make global available for compatibility
|
||||
global: 'globalThis',
|
||||
},
|
||||
loader: { '.node': 'file' },
|
||||
metafile: true,
|
||||
write: true,
|
||||
keepNames: true,
|
||||
})
|
||||
.then(({ metafile }) => {
|
||||
if (process.env.DEV === 'true') {
|
||||
writeFileSync('./bundle/esbuild.json', JSON.stringify(metafile, null, 2));
|
||||
writeFileSync('./dist/esbuild.json', JSON.stringify(metafile, null, 2));
|
||||
}
|
||||
})
|
||||
.catch(() => process.exit(1));
|
||||
.catch((error) => {
|
||||
console.error('esbuild build failed:', error);
|
||||
process.exitCode = 1;
|
||||
});
|
||||
|
||||
@@ -12,24 +12,12 @@ import prettierConfig from 'eslint-config-prettier';
|
||||
import importPlugin from 'eslint-plugin-import';
|
||||
import vitest from '@vitest/eslint-plugin';
|
||||
import globals from 'globals';
|
||||
import licenseHeader from 'eslint-plugin-license-header';
|
||||
import path from 'node:path';
|
||||
import url from 'node:url';
|
||||
|
||||
// --- ESM way to get __dirname ---
|
||||
const __filename = url.fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
// --- ---
|
||||
|
||||
// Determine the monorepo root (assuming eslint.config.js is at the root)
|
||||
const projectRoot = __dirname;
|
||||
|
||||
export default tseslint.config(
|
||||
{
|
||||
// Global ignores
|
||||
ignores: [
|
||||
'node_modules/*',
|
||||
'eslint.config.js',
|
||||
'packages/**/dist/**',
|
||||
'bundle/**',
|
||||
'package/bundle/**',
|
||||
@@ -222,6 +210,21 @@ export default tseslint.config(
|
||||
'@typescript-eslint/no-require-imports': 'off',
|
||||
},
|
||||
},
|
||||
// extra settings for core package scripts
|
||||
{
|
||||
files: ['packages/core/scripts/**/*.js'],
|
||||
languageOptions: {
|
||||
globals: {
|
||||
...globals.node,
|
||||
process: 'readonly',
|
||||
console: 'readonly',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-restricted-syntax': 'off',
|
||||
'@typescript-eslint/no-require-imports': 'off',
|
||||
},
|
||||
},
|
||||
// Prettier config must be last
|
||||
prettierConfig,
|
||||
// extra settings for scripts that we run directly with node
|
||||
|
||||
@@ -92,7 +92,7 @@ describe('edit', () => {
|
||||
expect(newFileContent).toBe(expectedContent);
|
||||
});
|
||||
|
||||
it('should fail safely when old_string is not found', async () => {
|
||||
it.skip('should fail safely when old_string is not found', async () => {
|
||||
const rig = new TestRig();
|
||||
await rig.setup('should fail safely when old_string is not found');
|
||||
const fileName = 'no_match.txt';
|
||||
|
||||
@@ -19,7 +19,7 @@ describe('Interactive file system', () => {
|
||||
});
|
||||
|
||||
it.skipIf(process.platform === 'win32')(
|
||||
'should perform a read-then-write sequence',
|
||||
'should perform a read-then-write sequence in interactive mode',
|
||||
async () => {
|
||||
const fileName = 'version.txt';
|
||||
await rig.setup('interactive-read-then-write');
|
||||
|
||||
@@ -36,10 +36,10 @@ describe('JSON output', () => {
|
||||
});
|
||||
|
||||
it('should return a JSON error for enforced auth mismatch before running', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
process.env['OPENAI_API_KEY'] = 'test-key';
|
||||
await rig.setup('json-output-auth-mismatch', {
|
||||
settings: {
|
||||
security: { auth: { enforcedType: 'gemini-api-key' } },
|
||||
security: { auth: { enforcedType: 'qwen-oauth' } },
|
||||
},
|
||||
});
|
||||
|
||||
@@ -50,7 +50,7 @@ describe('JSON output', () => {
|
||||
} catch (e) {
|
||||
thrown = e as Error;
|
||||
} finally {
|
||||
delete process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
}
|
||||
|
||||
expect(thrown).toBeDefined();
|
||||
@@ -80,10 +80,8 @@ describe('JSON output', () => {
|
||||
expect(payload.error.type).toBe('Error');
|
||||
expect(payload.error.code).toBe(1);
|
||||
expect(payload.error.message).toContain(
|
||||
'configured auth type is gemini-api-key',
|
||||
);
|
||||
expect(payload.error.message).toContain(
|
||||
'current auth type is oauth-personal',
|
||||
'configured auth type is qwen-oauth',
|
||||
);
|
||||
expect(payload.error.message).toContain('current auth type is openai');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -9,7 +9,6 @@ import { mkdirSync, writeFileSync, readFileSync } from 'node:fs';
|
||||
import { join, dirname } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { env } from 'node:process';
|
||||
import { DEFAULT_QWEN_MODEL } from '../packages/core/src/config/models.js';
|
||||
import fs from 'node:fs';
|
||||
import { EOL } from 'node:os';
|
||||
import * as pty from '@lydell/node-pty';
|
||||
@@ -148,7 +147,7 @@ export class TestRig {
|
||||
_interactiveOutput = '';
|
||||
|
||||
constructor() {
|
||||
this.bundlePath = join(__dirname, '..', 'bundle/gemini.js');
|
||||
this.bundlePath = join(__dirname, '..', 'dist/cli.js');
|
||||
this.testDir = null;
|
||||
}
|
||||
|
||||
@@ -182,7 +181,6 @@ export class TestRig {
|
||||
otlpEndpoint: '',
|
||||
outfile: telemetryPath,
|
||||
},
|
||||
model: DEFAULT_QWEN_MODEL,
|
||||
sandbox: env.GEMINI_SANDBOX !== 'false' ? env.GEMINI_SANDBOX : false,
|
||||
...options.settings, // Allow tests to override/add settings
|
||||
};
|
||||
|
||||
@@ -12,13 +12,12 @@ describe('todo_write', () => {
|
||||
const rig = new TestRig();
|
||||
await rig.setup('should be able to create and manage a todo list');
|
||||
|
||||
const prompt = `I want to implement a new feature to track user preferences. Here are the tasks:
|
||||
1. Create a user preferences model
|
||||
2. Add API endpoints for preferences
|
||||
3. Implement frontend components
|
||||
4. Write tests for the new functionality
|
||||
const prompt = `Please create a todo list with these three simple tasks:
|
||||
1. Buy milk
|
||||
2. Walk the dog
|
||||
3. Read a book
|
||||
|
||||
Please create a todo list for these tasks.`;
|
||||
Use the todo_write tool to create this list.`;
|
||||
|
||||
const result = await rig.run(prompt);
|
||||
|
||||
@@ -50,83 +49,21 @@ Please create a todo list for these tasks.`;
|
||||
|
||||
expect(todoArgs.todos).toBeDefined();
|
||||
expect(Array.isArray(todoArgs.todos)).toBe(true);
|
||||
expect(todoArgs.todos.length).toBe(4);
|
||||
expect(todoArgs.todos.length).toBeGreaterThanOrEqual(3);
|
||||
|
||||
// Check that all todos have the correct structure
|
||||
for (const todo of todoArgs.todos) {
|
||||
expect(todo.id).toBeDefined();
|
||||
expect(todo.content).toBeDefined();
|
||||
expect(['pending', 'in_progress', 'completed']).toContain(todo.status);
|
||||
expect(['pending', 'in_progress', 'completed', 'cancelled']).toContain(
|
||||
todo.status,
|
||||
);
|
||||
}
|
||||
|
||||
// Log success info if verbose
|
||||
if (process.env['VERBOSE'] === 'true') {
|
||||
console.log('Todo list created successfully');
|
||||
}
|
||||
});
|
||||
|
||||
it('should be able to update todo status', async () => {
|
||||
const rig = new TestRig();
|
||||
await rig.setup('should be able to update todo status');
|
||||
|
||||
// First create a todo list
|
||||
const initialPrompt = `Create a todo list with these tasks:
|
||||
1. Set up project structure
|
||||
2. Implement authentication
|
||||
3. Add database migrations`;
|
||||
|
||||
await rig.run(initialPrompt);
|
||||
await rig.waitForToolCall('todo_write');
|
||||
|
||||
// Now update the todo list by marking one as in progress
|
||||
const updatePrompt = `I've started working on implementing authentication. Please update the todo list to reflect that.`;
|
||||
|
||||
const result = await rig.run(updatePrompt);
|
||||
|
||||
const foundToolCall = await rig.waitForToolCall('todo_write');
|
||||
|
||||
// Add debugging information
|
||||
if (!foundToolCall) {
|
||||
printDebugInfo(rig, result);
|
||||
}
|
||||
|
||||
expect(
|
||||
foundToolCall,
|
||||
'Expected to find a todo_write tool call',
|
||||
).toBeTruthy();
|
||||
|
||||
// Validate model output - will throw if no output
|
||||
validateModelOutput(result, null, 'Todo update test');
|
||||
|
||||
// Check that the tool was called with updated parameters
|
||||
const toolLogs = rig.readToolLogs();
|
||||
const todoWriteCalls = toolLogs.filter(
|
||||
(t) => t.toolRequest.name === 'todo_write',
|
||||
);
|
||||
|
||||
expect(todoWriteCalls.length).toBeGreaterThan(0);
|
||||
|
||||
// Parse the arguments to verify the update
|
||||
const todoArgs = JSON.parse(
|
||||
todoWriteCalls[todoWriteCalls.length - 1].toolRequest.args,
|
||||
);
|
||||
|
||||
expect(todoArgs.todos).toBeDefined();
|
||||
expect(Array.isArray(todoArgs.todos)).toBe(true);
|
||||
// The model might create a new list with just the task it's working on
|
||||
// or it might update the existing list. Let's check that we have at least one todo
|
||||
expect(todoArgs.todos.length).toBeGreaterThanOrEqual(1);
|
||||
|
||||
// Check that all todos have the correct structure
|
||||
for (const todo of todoArgs.todos) {
|
||||
expect(todo.id).toBeDefined();
|
||||
expect(todo.content).toBeDefined();
|
||||
expect(['pending', 'in_progress', 'completed']).toContain(todo.status);
|
||||
}
|
||||
|
||||
// Log success info if verbose
|
||||
if (process.env['VERBOSE'] === 'true') {
|
||||
console.log('Todo list updated successfully');
|
||||
console.log(`Created ${todoArgs.todos.length} todos`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
486
package-lock.json
generated
486
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -15,7 +15,7 @@
|
||||
"simple-git": "^3.28.0"
|
||||
},
|
||||
"bin": {
|
||||
"qwen": "bundle/gemini.js"
|
||||
"qwen": "dist/cli.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/marked": "^5.0.2",
|
||||
@@ -1501,28 +1501,6 @@
|
||||
"node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@joshua.litt/get-ripgrep": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@joshua.litt/get-ripgrep/-/get-ripgrep-0.0.2.tgz",
|
||||
"integrity": "sha512-cSHA+H+HEkOXeiCxrNvGj/pgv2Y0bfp4GbH3R87zr7Vob2pDUZV3BkUL9ucHMoDFID4GteSy5z5niN/lF9QeuQ==",
|
||||
"dependencies": {
|
||||
"@lvce-editor/verror": "^1.6.0",
|
||||
"execa": "^9.5.2",
|
||||
"extract-zip": "^2.0.1",
|
||||
"fs-extra": "^11.3.0",
|
||||
"got": "^14.4.5",
|
||||
"path-exists": "^5.0.0",
|
||||
"xdg-basedir": "^5.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@joshua.litt/get-ripgrep/node_modules/path-exists": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz",
|
||||
"integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==",
|
||||
"engines": {
|
||||
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@jridgewell/gen-mapping": {
|
||||
"version": "0.3.8",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz",
|
||||
@@ -1720,12 +1698,6 @@
|
||||
"integrity": "sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@lvce-editor/verror": {
|
||||
"version": "1.7.0",
|
||||
"resolved": "https://registry.npmjs.org/@lvce-editor/verror/-/verror-1.7.0.tgz",
|
||||
"integrity": "sha512-+LGuAEIC2L7pbvkyAQVWM2Go0dAy+UWEui28g07zNtZsCBhm+gusBK8PNwLJLV5Jay+TyUYuwLIbJdjLLzqEBg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@lydell/node-pty": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@lydell/node-pty/-/node-pty-1.1.0.tgz",
|
||||
@@ -3084,12 +3056,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@sec-ant/readable-stream": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz",
|
||||
"integrity": "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@secretlint/config-creator": {
|
||||
"version": "10.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@secretlint/config-creator/-/config-creator-10.2.2.tgz",
|
||||
@@ -3308,42 +3274,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@sindresorhus/is": {
|
||||
"version": "7.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-7.0.2.tgz",
|
||||
"integrity": "sha512-d9xRovfKNz1SKieM0qJdO+PQonjnnIfSNWfHYnBSJ9hkjm0ZPw6HlxscDXYstp3z+7V2GOFHc+J0CYrYTjqCJw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sindresorhus/is?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/@sindresorhus/merge-streams": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz",
|
||||
"integrity": "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/@szmarczak/http-timer": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz",
|
||||
"integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"defer-to-connect": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.16"
|
||||
}
|
||||
},
|
||||
"node_modules/@testing-library/dom": {
|
||||
"version": "10.4.1",
|
||||
"resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz",
|
||||
@@ -3679,12 +3609,6 @@
|
||||
"integrity": "sha512-pUY3cKH/Nm2yYrEmDlPR1mR7yszjGx4DrwPjQ702C4/D5CwHuZTgZdIdwPkRbcuhs7BAh2L5rg3CL5cbRiGTCQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/http-cache-semantics": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz",
|
||||
"integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/http-errors": {
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz",
|
||||
@@ -5685,33 +5609,6 @@
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/cacheable-lookup": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz",
|
||||
"integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=14.16"
|
||||
}
|
||||
},
|
||||
"node_modules/cacheable-request": {
|
||||
"version": "12.0.1",
|
||||
"resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-12.0.1.tgz",
|
||||
"integrity": "sha512-Yo9wGIQUaAfIbk+qY0X4cDQgCosecfBe3V9NSyeY4qPC2SAkbCS4Xj79VP8WOzitpJUZKc/wsRCYF5ariDIwkg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/http-cache-semantics": "^4.0.4",
|
||||
"get-stream": "^9.0.1",
|
||||
"http-cache-semantics": "^4.1.1",
|
||||
"keyv": "^4.5.4",
|
||||
"mimic-response": "^4.0.0",
|
||||
"normalize-url": "^8.0.1",
|
||||
"responselike": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/call-bind": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz",
|
||||
@@ -6632,7 +6529,9 @@
|
||||
"version": "6.0.0",
|
||||
"resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
|
||||
"integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"mimic-response": "^3.1.0"
|
||||
},
|
||||
@@ -6647,7 +6546,9 @@
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz",
|
||||
"integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
@@ -6718,15 +6619,6 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/defer-to-connect": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz",
|
||||
"integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/define-data-property": {
|
||||
"version": "1.1.4",
|
||||
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
|
||||
@@ -7805,44 +7697,6 @@
|
||||
"node": ">=20.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/execa": {
|
||||
"version": "9.6.0",
|
||||
"resolved": "https://registry.npmjs.org/execa/-/execa-9.6.0.tgz",
|
||||
"integrity": "sha512-jpWzZ1ZhwUmeWRhS7Qv3mhpOhLfwI+uAX4e5fOcXqwMR7EcJ0pj2kV1CVzHVMX/LphnKWD3LObjZCoJ71lKpHw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@sindresorhus/merge-streams": "^4.0.0",
|
||||
"cross-spawn": "^7.0.6",
|
||||
"figures": "^6.1.0",
|
||||
"get-stream": "^9.0.0",
|
||||
"human-signals": "^8.0.1",
|
||||
"is-plain-obj": "^4.1.0",
|
||||
"is-stream": "^4.0.1",
|
||||
"npm-run-path": "^6.0.0",
|
||||
"pretty-ms": "^9.2.0",
|
||||
"signal-exit": "^4.1.0",
|
||||
"strip-final-newline": "^4.0.0",
|
||||
"yoctocolors": "^2.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^18.19.0 || >=20.5.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sindresorhus/execa?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/execa/node_modules/is-stream": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz",
|
||||
"integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/expand-template": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz",
|
||||
@@ -8087,21 +7941,6 @@
|
||||
"pend": "~1.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/figures": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz",
|
||||
"integrity": "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-unicode-supported": "^2.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/file-entry-cache": {
|
||||
"version": "8.0.0",
|
||||
"resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz",
|
||||
@@ -8273,15 +8112,6 @@
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data-encoder": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-4.1.0.tgz",
|
||||
"integrity": "sha512-G6NsmEW15s0Uw9XnCg+33H3ViYRyiM0hMrMhhqQOR8NFc5GhYrI+6I3u7OTw7b91J2g8rtvMBZJDbcGb2YUniw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data/node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
@@ -8331,6 +8161,7 @@
|
||||
"version": "11.3.1",
|
||||
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.1.tgz",
|
||||
"integrity": "sha512-eXvGGwZ5CL17ZSwHWd3bbgk7UUpF6IFHtP57NYYakPvHOs8GDgDe5KJI36jIJzDkJ6eJjuzRA8eBQb6SkKue0g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"graceful-fs": "^4.2.0",
|
||||
@@ -8345,6 +8176,7 @@
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
|
||||
"integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 10.0.0"
|
||||
@@ -8499,34 +8331,6 @@
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/get-stream": {
|
||||
"version": "9.0.1",
|
||||
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-9.0.1.tgz",
|
||||
"integrity": "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@sec-ant/readable-stream": "^0.4.1",
|
||||
"is-stream": "^4.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/get-stream/node_modules/is-stream": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz",
|
||||
"integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/get-symbol-description": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz",
|
||||
@@ -8807,43 +8611,6 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/got": {
|
||||
"version": "14.4.8",
|
||||
"resolved": "https://registry.npmjs.org/got/-/got-14.4.8.tgz",
|
||||
"integrity": "sha512-vxwU4HuR0BIl+zcT1LYrgBjM+IJjNElOjCzs0aPgHorQyr/V6H6Y73Sn3r3FOlUffvWD+Q5jtRuGWaXkU8Jbhg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@sindresorhus/is": "^7.0.1",
|
||||
"@szmarczak/http-timer": "^5.0.1",
|
||||
"cacheable-lookup": "^7.0.0",
|
||||
"cacheable-request": "^12.0.1",
|
||||
"decompress-response": "^6.0.0",
|
||||
"form-data-encoder": "^4.0.2",
|
||||
"http2-wrapper": "^2.2.1",
|
||||
"lowercase-keys": "^3.0.0",
|
||||
"p-cancelable": "^4.0.1",
|
||||
"responselike": "^3.0.0",
|
||||
"type-fest": "^4.26.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sindresorhus/got?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/got/node_modules/type-fest": {
|
||||
"version": "4.41.0",
|
||||
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz",
|
||||
"integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==",
|
||||
"license": "(MIT OR CC0-1.0)",
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/graceful-fs": {
|
||||
"version": "4.2.11",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||
@@ -9076,12 +8843,6 @@
|
||||
"entities": "^4.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/http-cache-semantics": {
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz",
|
||||
"integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==",
|
||||
"license": "BSD-2-Clause"
|
||||
},
|
||||
"node_modules/http-errors": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
|
||||
@@ -9121,19 +8882,6 @@
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/http2-wrapper": {
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz",
|
||||
"integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"quick-lru": "^5.1.1",
|
||||
"resolve-alpn": "^1.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10.19.0"
|
||||
}
|
||||
},
|
||||
"node_modules/https-proxy-agent": {
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
|
||||
@@ -9147,15 +8895,6 @@
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/human-signals": {
|
||||
"version": "8.0.1",
|
||||
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-8.0.1.tgz",
|
||||
"integrity": "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==",
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=18.18.0"
|
||||
}
|
||||
},
|
||||
"node_modules/husky": {
|
||||
"version": "9.1.7",
|
||||
"resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz",
|
||||
@@ -9967,18 +9706,6 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/is-plain-obj": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
|
||||
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/is-potential-custom-element-name": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz",
|
||||
@@ -10103,18 +9830,6 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/is-unicode-supported": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz",
|
||||
"integrity": "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/is-weakmap": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz",
|
||||
@@ -10392,6 +10107,7 @@
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
|
||||
"integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/json-parse-better-errors": {
|
||||
@@ -10448,6 +10164,7 @@
|
||||
"version": "6.2.0",
|
||||
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
|
||||
"integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"universalify": "^2.0.0"
|
||||
@@ -10460,6 +10177,7 @@
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
|
||||
"integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 10.0.0"
|
||||
@@ -10574,6 +10292,7 @@
|
||||
"version": "4.5.4",
|
||||
"resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
|
||||
"integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"json-buffer": "3.0.1"
|
||||
@@ -11053,18 +10772,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lowercase-keys": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz",
|
||||
"integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/lowlight": {
|
||||
"version": "3.3.0",
|
||||
"resolved": "https://registry.npmjs.org/lowlight/-/lowlight-3.3.0.tgz",
|
||||
@@ -11305,18 +11012,6 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/mimic-response": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz",
|
||||
"integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/minimatch": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
|
||||
@@ -11657,18 +11352,6 @@
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/normalize-url": {
|
||||
"version": "8.0.2",
|
||||
"resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.2.tgz",
|
||||
"integrity": "sha512-Ee/R3SyN4BuynXcnTaekmaVdbDAEiNrHqjQIA37mHU8G9pf7aaAD4ZX3XjBLo6rsdcxA/gtkcNYZLt30ACgynw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=14.16"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/npm-normalize-package-bin": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-4.0.0.tgz",
|
||||
@@ -11950,46 +11633,6 @@
|
||||
"node": "^18.17.0 || >=20.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/npm-run-path": {
|
||||
"version": "6.0.0",
|
||||
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz",
|
||||
"integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"path-key": "^4.0.0",
|
||||
"unicorn-magic": "^0.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/npm-run-path/node_modules/path-key": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
|
||||
"integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/npm-run-path/node_modules/unicorn-magic": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz",
|
||||
"integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/nth-check": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
|
||||
@@ -12255,15 +11898,6 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/p-cancelable": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-4.0.1.tgz",
|
||||
"integrity": "sha512-wBowNApzd45EIKdO1LaU+LrMBwAcjfPaYtVzV3lmfM3gf8Z4CHZsiIqlM8TZZ8okYvh5A1cP6gTfCRQtwUpaUg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=14.16"
|
||||
}
|
||||
},
|
||||
"node_modules/p-limit": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
|
||||
@@ -12375,18 +12009,6 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/parse-ms": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-4.0.0.tgz",
|
||||
"integrity": "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/parse-semver": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/parse-semver/-/parse-semver-1.1.1.tgz",
|
||||
@@ -12773,21 +12395,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/pretty-ms": {
|
||||
"version": "9.2.0",
|
||||
"resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-9.2.0.tgz",
|
||||
"integrity": "sha512-4yf0QO/sllf/1zbZWYnvWw3NxCQwLXKzIj0G849LSufP15BXKM0rbD2Z3wVnkMfjdn/CB0Dpp444gYAACdsplg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"parse-ms": "^4.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/process": {
|
||||
"version": "0.11.10",
|
||||
"resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz",
|
||||
@@ -12967,18 +12574,6 @@
|
||||
],
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/quick-lru": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz",
|
||||
"integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/qwen-code-vscode-ide-companion": {
|
||||
"resolved": "packages/vscode-ide-companion",
|
||||
"link": true
|
||||
@@ -13431,12 +13026,6 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/resolve-alpn": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz",
|
||||
"integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/resolve-from": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
|
||||
@@ -13457,21 +13046,6 @@
|
||||
"url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/responselike": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz",
|
||||
"integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"lowercase-keys": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.16"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/restore-cursor": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz",
|
||||
@@ -14507,18 +14081,6 @@
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/strip-final-newline": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-4.0.0.tgz",
|
||||
"integrity": "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/strip-json-comments": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
|
||||
@@ -16366,18 +15928,6 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/yoctocolors": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/yoctocolors/-/yoctocolors-2.1.1.tgz",
|
||||
"integrity": "sha512-GQHQqAopRhwU8Kt1DDM8NjibDXHC8eoh1erhGAJPEyveY9qqVeXvVikNKrDz69sHowPMorbPUrH/mx8c50eiBQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/yoctocolors-cjs": {
|
||||
"version": "2.1.2",
|
||||
"resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz",
|
||||
@@ -16474,7 +16024,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -16589,10 +16139,10 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"hasInstallScript": true,
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@joshua.litt/get-ripgrep": "^0.0.2",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
|
||||
@@ -16728,7 +16278,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
@@ -16740,7 +16290,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.2"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "cross-env node scripts/start.js",
|
||||
@@ -63,10 +63,10 @@
|
||||
}
|
||||
},
|
||||
"bin": {
|
||||
"qwen": "bundle/gemini.js"
|
||||
"qwen": "dist/cli.js"
|
||||
},
|
||||
"files": [
|
||||
"bundle/",
|
||||
"dist/",
|
||||
"README.md",
|
||||
"LICENSE"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,7 +25,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
|
||||
@@ -18,60 +18,26 @@ vi.mock('./settings.js', () => ({
|
||||
describe('validateAuthMethod', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
vi.stubEnv('GEMINI_API_KEY', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', undefined);
|
||||
vi.stubEnv('GOOGLE_API_KEY', undefined);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should return null for LOGIN_WITH_GOOGLE', () => {
|
||||
expect(validateAuthMethod(AuthType.LOGIN_WITH_GOOGLE)).toBeNull();
|
||||
it('should return null for USE_OPENAI', () => {
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for CLOUD_SHELL', () => {
|
||||
expect(validateAuthMethod(AuthType.CLOUD_SHELL)).toBeNull();
|
||||
it('should return an error message for USE_OPENAI if OPENAI_API_KEY is not set', () => {
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBe(
|
||||
'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.',
|
||||
);
|
||||
});
|
||||
|
||||
describe('USE_GEMINI', () => {
|
||||
it('should return null if GEMINI_API_KEY is set', () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', 'test-key');
|
||||
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message if GEMINI_API_KEY is not set', () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', undefined);
|
||||
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBe(
|
||||
'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('USE_VERTEX_AI', () => {
|
||||
it('should return null if GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION are set', () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', 'test-location');
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if GOOGLE_API_KEY is set', () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', 'test-api-key');
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message if no required environment variables are set', () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', undefined);
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBe(
|
||||
'When using Vertex AI, you must specify either:\n' +
|
||||
'• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' +
|
||||
'• GOOGLE_API_KEY environment variable (if using express mode).\n' +
|
||||
'Update your environment and try again (no reload needed if using .env)!',
|
||||
);
|
||||
});
|
||||
it('should return null for QWEN_OAUTH', () => {
|
||||
expect(validateAuthMethod(AuthType.QWEN_OAUTH)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message for an invalid auth method', () => {
|
||||
|
||||
@@ -8,39 +8,13 @@ import { AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { loadEnvironment, loadSettings } from './settings.js';
|
||||
|
||||
export function validateAuthMethod(authMethod: string): string | null {
|
||||
loadEnvironment(loadSettings().merged);
|
||||
if (
|
||||
authMethod === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authMethod === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
if (!process.env['GEMINI_API_KEY']) {
|
||||
return 'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_VERTEX_AI) {
|
||||
const hasVertexProjectLocationConfig =
|
||||
!!process.env['GOOGLE_CLOUD_PROJECT'] &&
|
||||
!!process.env['GOOGLE_CLOUD_LOCATION'];
|
||||
const hasGoogleApiKey = !!process.env['GOOGLE_API_KEY'];
|
||||
if (!hasVertexProjectLocationConfig && !hasGoogleApiKey) {
|
||||
return (
|
||||
'When using Vertex AI, you must specify either:\n' +
|
||||
'• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' +
|
||||
'• GOOGLE_API_KEY environment variable (if using express mode).\n' +
|
||||
'Update your environment and try again (no reload needed if using .env)!'
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
const settings = loadSettings();
|
||||
loadEnvironment(settings.merged);
|
||||
|
||||
if (authMethod === AuthType.USE_OPENAI) {
|
||||
if (!process.env['OPENAI_API_KEY']) {
|
||||
const hasApiKey =
|
||||
process.env['OPENAI_API_KEY'] || settings.merged.security?.auth?.apiKey;
|
||||
if (!hasApiKey) {
|
||||
return 'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
|
||||
}
|
||||
return null;
|
||||
@@ -54,15 +28,3 @@ export function validateAuthMethod(authMethod: string): string | null {
|
||||
|
||||
return 'Invalid auth method selected.';
|
||||
}
|
||||
|
||||
export const setOpenAIApiKey = (apiKey: string): void => {
|
||||
process.env['OPENAI_API_KEY'] = apiKey;
|
||||
};
|
||||
|
||||
export const setOpenAIBaseUrl = (baseUrl: string): void => {
|
||||
process.env['OPENAI_BASE_URL'] = baseUrl;
|
||||
};
|
||||
|
||||
export const setOpenAIModel = (model: string): void => {
|
||||
process.env['OPENAI_MODEL'] = model;
|
||||
};
|
||||
|
||||
@@ -2051,7 +2051,7 @@ describe('loadCliConfig extensions', () => {
|
||||
});
|
||||
|
||||
describe('loadCliConfig model selection', () => {
|
||||
it('selects a model from settings.json if provided', async () => {
|
||||
it.skip('selects a model from settings.json if provided', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
@@ -2072,7 +2072,7 @@ describe('loadCliConfig model selection', () => {
|
||||
expect(config.getModel()).toBe('qwen3-coder-plus');
|
||||
});
|
||||
|
||||
it('uses the default gemini model if nothing is set', async () => {
|
||||
it.skip('uses the default gemini model if nothing is set', async () => {
|
||||
process.argv = ['node', 'script.js']; // No model set.
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
@@ -2399,6 +2399,73 @@ describe('loadCliConfig useRipgrep', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadCliConfig useBuiltinRipgrep', () => {
|
||||
const originalArgv = process.argv;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
vi.mocked(os.homedir).mockReturnValue('/mock/home/user');
|
||||
vi.stubEnv('GEMINI_API_KEY', 'test-api-key');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.argv = originalArgv;
|
||||
vi.unstubAllEnvs();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should be true by default when useBuiltinRipgrep is not set in settings', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const settings: Settings = {};
|
||||
const config = await loadCliConfig(
|
||||
settings,
|
||||
[],
|
||||
new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
argv.extensions,
|
||||
),
|
||||
'test-session',
|
||||
argv,
|
||||
);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
|
||||
it('should be false when useBuiltinRipgrep is set to false in settings', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const settings: Settings = { tools: { useBuiltinRipgrep: false } };
|
||||
const config = await loadCliConfig(
|
||||
settings,
|
||||
[],
|
||||
new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
argv.extensions,
|
||||
),
|
||||
'test-session',
|
||||
argv,
|
||||
);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(false);
|
||||
});
|
||||
|
||||
it('should be true when useBuiltinRipgrep is explicitly set to true in settings', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const settings: Settings = { tools: { useBuiltinRipgrep: true } };
|
||||
const config = await loadCliConfig(
|
||||
settings,
|
||||
[],
|
||||
new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
argv.extensions,
|
||||
),
|
||||
'test-session',
|
||||
argv,
|
||||
);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('screenReader configuration', () => {
|
||||
const originalArgv = process.argv;
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import { extensionsCommand } from '../commands/extensions.js';
|
||||
import {
|
||||
ApprovalMode,
|
||||
Config,
|
||||
DEFAULT_QWEN_MODEL,
|
||||
DEFAULT_QWEN_EMBEDDING_MODEL,
|
||||
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
|
||||
EditTool,
|
||||
@@ -194,14 +193,13 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
})
|
||||
.option('proxy', {
|
||||
type: 'string',
|
||||
description:
|
||||
'Proxy for gemini client, like schema://user:password@host:port',
|
||||
description: 'Proxy for Qwen Code, like schema://user:password@host:port',
|
||||
})
|
||||
.deprecateOption(
|
||||
'proxy',
|
||||
'Use the "proxy" setting in settings.json instead. This flag will be removed in a future version.',
|
||||
)
|
||||
.command('$0 [query..]', 'Launch Gemini CLI', (yargsInstance: Argv) =>
|
||||
.command('$0 [query..]', 'Launch Qwen Code CLI', (yargsInstance: Argv) =>
|
||||
yargsInstance
|
||||
.positional('query', {
|
||||
description:
|
||||
@@ -669,13 +667,11 @@ export async function loadCliConfig(
|
||||
);
|
||||
}
|
||||
|
||||
const defaultModel = DEFAULT_QWEN_MODEL;
|
||||
const resolvedModel: string =
|
||||
const resolvedModel =
|
||||
argv.model ||
|
||||
process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name ||
|
||||
defaultModel;
|
||||
settings.model?.name;
|
||||
|
||||
const sandboxConfig = await loadSandboxConfig(settings, argv);
|
||||
const screenReader =
|
||||
@@ -739,8 +735,14 @@ export async function loadCliConfig(
|
||||
generationConfig: {
|
||||
...(settings.model?.generationConfig || {}),
|
||||
model: resolvedModel,
|
||||
apiKey: argv.openaiApiKey || process.env['OPENAI_API_KEY'],
|
||||
baseUrl: argv.openaiBaseUrl || process.env['OPENAI_BASE_URL'],
|
||||
apiKey:
|
||||
argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey,
|
||||
baseUrl:
|
||||
argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl,
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.model?.enableOpenAILogging
|
||||
@@ -758,6 +760,7 @@ export async function loadCliConfig(
|
||||
interactive,
|
||||
trustedFolder,
|
||||
useRipgrep: settings.tools?.useRipgrep,
|
||||
useBuiltinRipgrep: settings.tools?.useBuiltinRipgrep,
|
||||
shouldUseNodePtyShell: settings.tools?.shell?.enableInteractiveShell,
|
||||
skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck,
|
||||
enablePromptCompletion: settings.general?.enablePromptCompletion ?? false,
|
||||
|
||||
@@ -66,6 +66,8 @@ import {
|
||||
loadEnvironment,
|
||||
migrateDeprecatedSettings,
|
||||
SettingScope,
|
||||
SETTINGS_VERSION,
|
||||
SETTINGS_VERSION_KEY,
|
||||
} from './settings.js';
|
||||
import { FatalConfigError, QWEN_DIR } from '@qwen-code/qwen-code-core';
|
||||
|
||||
@@ -94,6 +96,7 @@ vi.mock('fs', async (importOriginal) => {
|
||||
existsSync: vi.fn(),
|
||||
readFileSync: vi.fn(),
|
||||
writeFileSync: vi.fn(),
|
||||
renameSync: vi.fn(),
|
||||
mkdirSync: vi.fn(),
|
||||
realpathSync: (p: string) => p,
|
||||
};
|
||||
@@ -171,11 +174,15 @@ describe('Settings Loading and Merging', () => {
|
||||
getSystemSettingsPath(),
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.user.settings).toEqual({});
|
||||
expect(settings.workspace.settings).toEqual({});
|
||||
expect(settings.merged).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -207,10 +214,14 @@ describe('Settings Loading and Merging', () => {
|
||||
expectedUserSettingsPath,
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual({});
|
||||
expect(settings.merged).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -241,9 +252,13 @@ describe('Settings Loading and Merging', () => {
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.user.settings).toEqual({});
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.merged).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -304,10 +319,20 @@ describe('Settings Loading and Merging', () => {
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.merged).toEqual({
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'system-theme',
|
||||
},
|
||||
@@ -361,6 +386,7 @@ describe('Settings Loading and Merging', () => {
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.merged).toEqual({
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'legacy-dark',
|
||||
},
|
||||
@@ -413,6 +439,132 @@ describe('Settings Loading and Merging', () => {
|
||||
expect((settings.merged as TestSettings)['allowedTools']).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should add version field to migrated settings file', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
const legacySettingsContent = {
|
||||
theme: 'dark',
|
||||
model: 'qwen-coder',
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(legacySettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that fs.writeFileSync was called with migrated settings including version
|
||||
expect(fs.writeFileSync).toHaveBeenCalled();
|
||||
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
|
||||
const writtenContent = JSON.parse(writeCall[1] as string);
|
||||
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
|
||||
});
|
||||
|
||||
it('should not re-migrate settings that have version field', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
const migratedSettingsContent = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'dark',
|
||||
},
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(migratedSettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that fs.renameSync and fs.writeFileSync were NOT called
|
||||
// (because no migration was needed)
|
||||
expect(fs.renameSync).not.toHaveBeenCalled();
|
||||
expect(fs.writeFileSync).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should add version field to V2 settings without version and write to disk', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
// V2 format but no version field
|
||||
const v2SettingsWithoutVersion = {
|
||||
ui: {
|
||||
theme: 'dark',
|
||||
},
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(v2SettingsWithoutVersion);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that fs.writeFileSync was called (to add version)
|
||||
// but NOT fs.renameSync (no backup needed, just adding version)
|
||||
expect(fs.renameSync).not.toHaveBeenCalled();
|
||||
expect(fs.writeFileSync).toHaveBeenCalledTimes(1);
|
||||
|
||||
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
|
||||
const writtenPath = writeCall[0];
|
||||
const writtenContent = JSON.parse(writeCall[1] as string);
|
||||
|
||||
expect(writtenPath).toBe(USER_SETTINGS_PATH);
|
||||
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
|
||||
expect(writtenContent.ui?.theme).toBe('dark');
|
||||
expect(writtenContent.model?.name).toBe('qwen-coder');
|
||||
});
|
||||
|
||||
it('should correctly handle partially migrated settings without version field', () => {
|
||||
(mockFsExistsSync as Mock).mockImplementation(
|
||||
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
|
||||
);
|
||||
// Edge case: model already in V2 format (object), but autoAccept in V1 format
|
||||
const partiallyMigratedContent = {
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
autoAccept: false, // V1 key
|
||||
};
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(partiallyMigratedContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Verify that the migrated settings preserve the model object correctly
|
||||
expect(fs.writeFileSync).toHaveBeenCalled();
|
||||
const writeCall = (fs.writeFileSync as Mock).mock.calls[0];
|
||||
const writtenContent = JSON.parse(writeCall[1] as string);
|
||||
|
||||
// Model should remain as an object, not double-nested
|
||||
expect(writtenContent.model).toEqual({ name: 'qwen-coder' });
|
||||
// autoAccept should be migrated to tools.autoAccept
|
||||
expect(writtenContent.tools?.autoAccept).toBe(false);
|
||||
// Version field should be added
|
||||
expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION);
|
||||
});
|
||||
|
||||
it('should correctly merge and migrate legacy array properties from multiple scopes', () => {
|
||||
(mockFsExistsSync as Mock).mockReturnValue(true);
|
||||
const legacyUserSettings = {
|
||||
@@ -515,11 +667,24 @@ describe('Settings Loading and Merging', () => {
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.systemDefaults.settings).toEqual(systemDefaultsContent);
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.systemDefaults.settings).toEqual({
|
||||
...systemDefaultsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.merged).toEqual({
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
context: {
|
||||
fileName: 'WORKSPACE_CONTEXT.md',
|
||||
includeDirectories: [
|
||||
@@ -866,8 +1031,14 @@ describe('Settings Loading and Merging', () => {
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.user.settings).toEqual(userSettingsContent);
|
||||
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
|
||||
expect(settings.user.settings).toEqual({
|
||||
...userSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.workspace.settings).toEqual({
|
||||
...workspaceSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.merged.mcpServers).toEqual({
|
||||
'user-server': {
|
||||
command: 'user-command',
|
||||
@@ -1696,9 +1867,13 @@ describe('Settings Loading and Merging', () => {
|
||||
'utf-8',
|
||||
);
|
||||
expect(settings.system.path).toBe(MOCK_ENV_SYSTEM_SETTINGS_PATH);
|
||||
expect(settings.system.settings).toEqual(systemSettingsContent);
|
||||
expect(settings.system.settings).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
expect(settings.merged).toEqual({
|
||||
...systemSettingsContent,
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -2248,6 +2423,44 @@ describe('Settings Loading and Merging', () => {
|
||||
customWittyPhrases: ['test phrase'],
|
||||
});
|
||||
});
|
||||
|
||||
it('should remove version field when migrating to V1', () => {
|
||||
const v2Settings = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
ui: {
|
||||
theme: 'dark',
|
||||
},
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
};
|
||||
const v1Settings = migrateSettingsToV1(v2Settings);
|
||||
|
||||
// Version field should not be present in V1 settings
|
||||
expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined();
|
||||
// Other fields should be properly migrated
|
||||
expect(v1Settings).toEqual({
|
||||
theme: 'dark',
|
||||
model: 'qwen-coder',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle version field in unrecognized properties', () => {
|
||||
const v2Settings = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
general: {
|
||||
vimMode: true,
|
||||
},
|
||||
someUnrecognizedKey: 'value',
|
||||
};
|
||||
const v1Settings = migrateSettingsToV1(v2Settings);
|
||||
|
||||
// Version field should be filtered out
|
||||
expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined();
|
||||
// Unrecognized keys should be preserved
|
||||
expect(v1Settings['someUnrecognizedKey']).toBe('value');
|
||||
expect(v1Settings['vimMode']).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadEnvironment', () => {
|
||||
@@ -2368,6 +2581,73 @@ describe('Settings Loading and Merging', () => {
|
||||
};
|
||||
expect(needsMigration(settings)).toBe(false);
|
||||
});
|
||||
|
||||
describe('with version field', () => {
|
||||
it('should return false when version field indicates current or newer version', () => {
|
||||
const settingsWithVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
theme: 'dark', // Even though this is a V1 key, version field takes precedence
|
||||
};
|
||||
expect(needsMigration(settingsWithVersion)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when version field indicates a newer version', () => {
|
||||
const settingsWithNewerVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION + 1,
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithNewerVersion)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true when version field indicates an older version', () => {
|
||||
const settingsWithOldVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION - 1,
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithOldVersion)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use fallback logic when version field is not a number', () => {
|
||||
const settingsWithInvalidVersion = {
|
||||
[SETTINGS_VERSION_KEY]: 'not-a-number',
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithInvalidVersion)).toBe(true);
|
||||
});
|
||||
|
||||
it('should use fallback logic when version field is missing', () => {
|
||||
const settingsWithoutVersion = {
|
||||
theme: 'dark',
|
||||
};
|
||||
expect(needsMigration(settingsWithoutVersion)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge case: partially migrated settings', () => {
|
||||
it('should return true for partially migrated settings without version field', () => {
|
||||
// This simulates the dangerous edge case: model already in V2 format,
|
||||
// but other fields in V1 format
|
||||
const partiallyMigrated = {
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
autoAccept: false, // V1 key
|
||||
};
|
||||
expect(needsMigration(partiallyMigrated)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for partially migrated settings WITH version field', () => {
|
||||
// With version field, we trust that it's been properly migrated
|
||||
const partiallyMigratedWithVersion = {
|
||||
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
|
||||
model: {
|
||||
name: 'qwen-coder',
|
||||
},
|
||||
autoAccept: false, // This would look like V1 but version says it's V2
|
||||
};
|
||||
expect(needsMigration(partiallyMigratedWithVersion)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('migrateDeprecatedSettings', () => {
|
||||
|
||||
@@ -56,6 +56,10 @@ export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE'];
|
||||
|
||||
const MIGRATE_V2_OVERWRITE = true;
|
||||
|
||||
// Settings version to track migration state
|
||||
export const SETTINGS_VERSION = 2;
|
||||
export const SETTINGS_VERSION_KEY = '$version';
|
||||
|
||||
const MIGRATION_MAP: Record<string, string> = {
|
||||
accessibility: 'ui.accessibility',
|
||||
allowedTools: 'tools.allowed',
|
||||
@@ -216,8 +220,16 @@ function setNestedProperty(
|
||||
}
|
||||
|
||||
export function needsMigration(settings: Record<string, unknown>): boolean {
|
||||
// A file needs migration if it contains any top-level key that is moved to a
|
||||
// nested location in V2.
|
||||
// Check version field first - if present and matches current version, no migration needed
|
||||
if (SETTINGS_VERSION_KEY in settings) {
|
||||
const version = settings[SETTINGS_VERSION_KEY];
|
||||
if (typeof version === 'number' && version >= SETTINGS_VERSION) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to legacy detection: A file needs migration if it contains any
|
||||
// top-level key that is moved to a nested location in V2.
|
||||
const hasV1Keys = Object.entries(MIGRATION_MAP).some(([v1Key, v2Path]) => {
|
||||
if (v1Key === v2Path || !(v1Key in settings)) {
|
||||
return false;
|
||||
@@ -250,6 +262,21 @@ function migrateSettingsToV2(
|
||||
|
||||
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
|
||||
if (flatKeys.has(oldKey)) {
|
||||
// Safety check: If this key is a V2 container (like 'model') and it's
|
||||
// already an object, it's likely already in V2 format. Skip migration
|
||||
// to prevent double-nesting (e.g., model.name.name).
|
||||
if (
|
||||
KNOWN_V2_CONTAINERS.has(oldKey) &&
|
||||
typeof flatSettings[oldKey] === 'object' &&
|
||||
flatSettings[oldKey] !== null &&
|
||||
!Array.isArray(flatSettings[oldKey])
|
||||
) {
|
||||
// This is already a V2 container, carry it over as-is
|
||||
v2Settings[oldKey] = flatSettings[oldKey];
|
||||
flatKeys.delete(oldKey);
|
||||
continue;
|
||||
}
|
||||
|
||||
setNestedProperty(v2Settings, newPath, flatSettings[oldKey]);
|
||||
flatKeys.delete(oldKey);
|
||||
}
|
||||
@@ -287,6 +314,9 @@ function migrateSettingsToV2(
|
||||
}
|
||||
}
|
||||
|
||||
// Set version field to indicate this is a V2 settings file
|
||||
v2Settings[SETTINGS_VERSION_KEY] = SETTINGS_VERSION;
|
||||
|
||||
return v2Settings;
|
||||
}
|
||||
|
||||
@@ -336,6 +366,11 @@ export function migrateSettingsToV1(
|
||||
|
||||
// Carry over any unrecognized keys
|
||||
for (const remainingKey of v2Keys) {
|
||||
// Skip the version field - it's only for V2 format
|
||||
if (remainingKey === SETTINGS_VERSION_KEY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const value = v2Settings[remainingKey];
|
||||
if (value === undefined) {
|
||||
continue;
|
||||
@@ -621,6 +656,22 @@ export function loadSettings(
|
||||
}
|
||||
settingsObject = migratedSettings;
|
||||
}
|
||||
} else if (!(SETTINGS_VERSION_KEY in settingsObject)) {
|
||||
// No migration needed, but version field is missing - add it for future optimizations
|
||||
settingsObject[SETTINGS_VERSION_KEY] = SETTINGS_VERSION;
|
||||
if (MIGRATE_V2_OVERWRITE) {
|
||||
try {
|
||||
fs.writeFileSync(
|
||||
filePath,
|
||||
JSON.stringify(settingsObject, null, 2),
|
||||
'utf-8',
|
||||
);
|
||||
} catch (e) {
|
||||
console.error(
|
||||
`Error adding version to settings file: ${getErrorMessage(e)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
return { settings: settingsObject as Settings, rawJson: content };
|
||||
}
|
||||
|
||||
@@ -847,6 +847,16 @@ const SETTINGS_SCHEMA = {
|
||||
'Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.',
|
||||
showInDialog: true,
|
||||
},
|
||||
useBuiltinRipgrep: {
|
||||
type: 'boolean',
|
||||
label: 'Use Builtin Ripgrep',
|
||||
category: 'Tools',
|
||||
requiresRestart: false,
|
||||
default: true,
|
||||
description:
|
||||
'Use the bundled ripgrep binary. When set to false, the system-level "rg" command will be used instead. This setting is only effective when useRipgrep is true.',
|
||||
showInDialog: true,
|
||||
},
|
||||
enableToolOutputTruncation: {
|
||||
type: 'boolean',
|
||||
label: 'Enable Tool Output Truncation',
|
||||
@@ -991,6 +1001,24 @@ const SETTINGS_SCHEMA = {
|
||||
description: 'Whether to use an external authentication flow.',
|
||||
showInDialog: false,
|
||||
},
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
label: 'API Key',
|
||||
category: 'Security',
|
||||
requiresRestart: true,
|
||||
default: undefined as string | undefined,
|
||||
description: 'API key for OpenAI compatible authentication.',
|
||||
showInDialog: false,
|
||||
},
|
||||
baseUrl: {
|
||||
type: 'string',
|
||||
label: 'Base URL',
|
||||
category: 'Security',
|
||||
requiresRestart: true,
|
||||
default: undefined as string | undefined,
|
||||
description: 'Base URL for OpenAI compatible API.',
|
||||
showInDialog: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -17,11 +17,7 @@ import dns from 'node:dns';
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import { start_sandbox } from './utils/sandbox.js';
|
||||
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
|
||||
import {
|
||||
loadSettings,
|
||||
migrateDeprecatedSettings,
|
||||
SettingScope,
|
||||
} from './config/settings.js';
|
||||
import { loadSettings, migrateDeprecatedSettings } from './config/settings.js';
|
||||
import { themeManager } from './ui/themes/theme-manager.js';
|
||||
import { getStartupWarnings } from './utils/startupWarnings.js';
|
||||
import { getUserStartupWarnings } from './utils/userStartupWarnings.js';
|
||||
@@ -233,17 +229,6 @@ export async function main() {
|
||||
validateDnsResolutionOrder(settings.merged.advanced?.dnsResolutionOrder),
|
||||
);
|
||||
|
||||
// Set a default auth type if one isn't set.
|
||||
if (!settings.merged.security?.auth?.selectedType) {
|
||||
if (process.env['CLOUD_SHELL'] === 'true') {
|
||||
settings.setValue(
|
||||
SettingScope.User,
|
||||
'selectedAuthType',
|
||||
AuthType.CLOUD_SHELL,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Load custom themes from settings
|
||||
themeManager.loadCustomThemes(settings.merged.ui?.customThemes);
|
||||
|
||||
@@ -402,7 +387,11 @@ export async function main() {
|
||||
let input = config.getQuestion();
|
||||
const startupWarnings = [
|
||||
...(await getStartupWarnings()),
|
||||
...(await getUserStartupWarnings()),
|
||||
...(await getUserStartupWarnings({
|
||||
workspaceRoot: process.cwd(),
|
||||
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
|
||||
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
|
||||
})),
|
||||
];
|
||||
|
||||
// Render UI, passing necessary config values. Check that there is no command line question.
|
||||
|
||||
@@ -555,7 +555,7 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: 'Refreshing hierarchical memory (GEMINI.md or other context files)...',
|
||||
text: 'Refreshing hierarchical memory (QWEN.md or other context files)...',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
@@ -8,12 +8,7 @@ import type React from 'react';
|
||||
import { useState } from 'react';
|
||||
import { AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { Box, Text } from 'ink';
|
||||
import {
|
||||
setOpenAIApiKey,
|
||||
setOpenAIBaseUrl,
|
||||
setOpenAIModel,
|
||||
validateAuthMethod,
|
||||
} from '../../config/auth.js';
|
||||
import { validateAuthMethod } from '../../config/auth.js';
|
||||
import { type LoadedSettings, SettingScope } from '../../config/settings.js';
|
||||
import { Colors } from '../colors.js';
|
||||
import { useKeypress } from '../hooks/useKeypress.js';
|
||||
@@ -21,7 +16,15 @@ import { OpenAIKeyPrompt } from '../components/OpenAIKeyPrompt.js';
|
||||
import { RadioButtonSelect } from '../components/shared/RadioButtonSelect.js';
|
||||
|
||||
interface AuthDialogProps {
|
||||
onSelect: (authMethod: AuthType | undefined, scope: SettingScope) => void;
|
||||
onSelect: (
|
||||
authMethod: AuthType | undefined,
|
||||
scope: SettingScope,
|
||||
credentials?: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
},
|
||||
) => void;
|
||||
settings: LoadedSettings;
|
||||
initialErrorMessage?: string | null;
|
||||
}
|
||||
@@ -70,11 +73,7 @@ export function AuthDialog({
|
||||
return item.value === defaultAuthType;
|
||||
}
|
||||
|
||||
if (process.env['GEMINI_API_KEY']) {
|
||||
return item.value === AuthType.USE_GEMINI;
|
||||
}
|
||||
|
||||
return item.value === AuthType.LOGIN_WITH_GOOGLE;
|
||||
return item.value === AuthType.QWEN_OAUTH;
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -101,11 +100,12 @@ export function AuthDialog({
|
||||
baseUrl: string,
|
||||
model: string,
|
||||
) => {
|
||||
setOpenAIApiKey(apiKey);
|
||||
setOpenAIBaseUrl(baseUrl);
|
||||
setOpenAIModel(model);
|
||||
setShowOpenAIKeyPrompt(false);
|
||||
onSelect(AuthType.USE_OPENAI, SettingScope.User);
|
||||
onSelect(AuthType.USE_OPENAI, SettingScope.User, {
|
||||
apiKey,
|
||||
baseUrl,
|
||||
model,
|
||||
});
|
||||
};
|
||||
|
||||
const handleOpenAIKeyCancel = () => {
|
||||
|
||||
@@ -6,12 +6,11 @@
|
||||
|
||||
import { useState, useCallback, useEffect } from 'react';
|
||||
import type { LoadedSettings, SettingScope } from '../../config/settings.js';
|
||||
import { AuthType, type Config } from '@qwen-code/qwen-code-core';
|
||||
import type { AuthType, Config } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
clearCachedCredentialFile,
|
||||
getErrorMessage,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { runExitCleanup } from '../../utils/cleanup.js';
|
||||
import { AuthState } from '../types.js';
|
||||
import { validateAuthMethod } from '../../config/auth.js';
|
||||
|
||||
@@ -30,23 +29,24 @@ export function validateAuthMethodWithSettings(
|
||||
}
|
||||
|
||||
export const useAuthCommand = (settings: LoadedSettings, config: Config) => {
|
||||
// If no auth type is selected, start in Updating state (shows auth dialog)
|
||||
const unAuthenticated =
|
||||
settings.merged.security?.auth?.selectedType === undefined;
|
||||
|
||||
const [authState, setAuthState] = useState<AuthState>(
|
||||
settings.merged.security?.auth?.selectedType === undefined
|
||||
? AuthState.Updating
|
||||
: AuthState.Unauthenticated,
|
||||
unAuthenticated ? AuthState.Updating : AuthState.Unauthenticated,
|
||||
);
|
||||
|
||||
const [authError, setAuthError] = useState<string | null>(null);
|
||||
|
||||
const [isAuthenticating, setIsAuthenticating] = useState(false);
|
||||
const [isAuthDialogOpen, setIsAuthDialogOpen] = useState(false);
|
||||
const [isAuthDialogOpen, setIsAuthDialogOpen] = useState(unAuthenticated);
|
||||
|
||||
const onAuthError = useCallback(
|
||||
(error: string | null) => {
|
||||
setAuthError(error);
|
||||
if (error) {
|
||||
setAuthState(AuthState.Updating);
|
||||
setIsAuthDialogOpen(true);
|
||||
}
|
||||
},
|
||||
[setAuthError, setAuthState],
|
||||
@@ -87,24 +87,49 @@ export const useAuthCommand = (settings: LoadedSettings, config: Config) => {
|
||||
|
||||
// Handle auth selection from dialog
|
||||
const handleAuthSelect = useCallback(
|
||||
async (authType: AuthType | undefined, scope: SettingScope) => {
|
||||
async (
|
||||
authType: AuthType | undefined,
|
||||
scope: SettingScope,
|
||||
credentials?: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
},
|
||||
) => {
|
||||
if (authType) {
|
||||
await clearCachedCredentialFile();
|
||||
|
||||
settings.setValue(scope, 'security.auth.selectedType', authType);
|
||||
// Save OpenAI credentials if provided
|
||||
if (credentials) {
|
||||
// Update Config's internal generationConfig before calling refreshAuth
|
||||
// This ensures refreshAuth has access to the new credentials
|
||||
config.updateCredentials({
|
||||
apiKey: credentials.apiKey,
|
||||
baseUrl: credentials.baseUrl,
|
||||
model: credentials.model,
|
||||
});
|
||||
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE &&
|
||||
config.isBrowserLaunchSuppressed()
|
||||
) {
|
||||
await runExitCleanup();
|
||||
console.log(`
|
||||
----------------------------------------------------------------
|
||||
Logging in with Google... Please restart Gemini CLI to continue.
|
||||
----------------------------------------------------------------
|
||||
`);
|
||||
process.exit(0);
|
||||
// Also set environment variables for compatibility with other parts of the code
|
||||
if (credentials.apiKey) {
|
||||
settings.setValue(
|
||||
scope,
|
||||
'security.auth.apiKey',
|
||||
credentials.apiKey,
|
||||
);
|
||||
}
|
||||
if (credentials.baseUrl) {
|
||||
settings.setValue(
|
||||
scope,
|
||||
'security.auth.baseUrl',
|
||||
credentials.baseUrl,
|
||||
);
|
||||
}
|
||||
if (credentials.model) {
|
||||
settings.setValue(scope, 'model.name', credentials.model);
|
||||
}
|
||||
}
|
||||
|
||||
settings.setValue(scope, 'security.auth.selectedType', authType);
|
||||
}
|
||||
|
||||
setIsAuthDialogOpen(false);
|
||||
|
||||
@@ -11,6 +11,7 @@ import { createMockCommandContext } from '../../test-utils/mockCommandContext.js
|
||||
import { getCliVersion } from '../../utils/version.js';
|
||||
import { GIT_COMMIT_INFO } from '../../generated/git-commit.js';
|
||||
import { formatMemoryUsage } from '../utils/formatters.js';
|
||||
import { AuthType } from '@qwen-code/qwen-code-core';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('open');
|
||||
@@ -26,7 +27,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
getDetectedIdeDisplayName: vi.fn().mockReturnValue('VSCode'),
|
||||
}),
|
||||
},
|
||||
sessionId: 'test-session-id',
|
||||
};
|
||||
});
|
||||
vi.mock('node:process', () => ({
|
||||
@@ -58,6 +58,16 @@ describe('bugCommand', () => {
|
||||
getModel: () => 'qwen3-coder-plus',
|
||||
getBugCommand: () => undefined,
|
||||
getIdeMode: () => true,
|
||||
getSessionId: () => 'test-session-id',
|
||||
},
|
||||
settings: {
|
||||
merged: {
|
||||
security: {
|
||||
auth: {
|
||||
selectedType: undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -71,6 +81,7 @@ describe('bugCommand', () => {
|
||||
* **Session ID:** test-session-id
|
||||
* **Operating System:** test-platform v20.0.0
|
||||
* **Sandbox Environment:** test
|
||||
* **Auth Type:**
|
||||
* **Model Version:** qwen3-coder-plus
|
||||
* **Memory Usage:** 100 MB
|
||||
* **IDE Client:** VSCode
|
||||
@@ -91,6 +102,16 @@ describe('bugCommand', () => {
|
||||
getModel: () => 'qwen3-coder-plus',
|
||||
getBugCommand: () => ({ urlTemplate: customTemplate }),
|
||||
getIdeMode: () => true,
|
||||
getSessionId: () => 'test-session-id',
|
||||
},
|
||||
settings: {
|
||||
merged: {
|
||||
security: {
|
||||
auth: {
|
||||
selectedType: undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -104,6 +125,7 @@ describe('bugCommand', () => {
|
||||
* **Session ID:** test-session-id
|
||||
* **Operating System:** test-platform v20.0.0
|
||||
* **Sandbox Environment:** test
|
||||
* **Auth Type:**
|
||||
* **Model Version:** qwen3-coder-plus
|
||||
* **Memory Usage:** 100 MB
|
||||
* **IDE Client:** VSCode
|
||||
@@ -114,4 +136,50 @@ describe('bugCommand', () => {
|
||||
|
||||
expect(open).toHaveBeenCalledWith(expectedUrl);
|
||||
});
|
||||
|
||||
it('should include Base URL when auth type is OpenAI', async () => {
|
||||
const mockContext = createMockCommandContext({
|
||||
services: {
|
||||
config: {
|
||||
getModel: () => 'qwen3-coder-plus',
|
||||
getBugCommand: () => undefined,
|
||||
getIdeMode: () => true,
|
||||
getSessionId: () => 'test-session-id',
|
||||
getContentGeneratorConfig: () => ({
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
}),
|
||||
},
|
||||
settings: {
|
||||
merged: {
|
||||
security: {
|
||||
auth: {
|
||||
selectedType: AuthType.USE_OPENAI,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!bugCommand.action) throw new Error('Action is not defined');
|
||||
await bugCommand.action(mockContext, 'OpenAI bug');
|
||||
|
||||
const expectedInfo = `
|
||||
* **CLI Version:** 0.1.0
|
||||
* **Git Commit:** ${GIT_COMMIT_INFO}
|
||||
* **Session ID:** test-session-id
|
||||
* **Operating System:** test-platform v20.0.0
|
||||
* **Sandbox Environment:** test
|
||||
* **Auth Type:** ${AuthType.USE_OPENAI}
|
||||
* **Base URL:** https://api.openai.com/v1
|
||||
* **Model Version:** qwen3-coder-plus
|
||||
* **Memory Usage:** 100 MB
|
||||
* **IDE Client:** VSCode
|
||||
`;
|
||||
const expectedUrl =
|
||||
'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title=OpenAI%20bug&info=' +
|
||||
encodeURIComponent(expectedInfo);
|
||||
|
||||
expect(open).toHaveBeenCalledWith(expectedUrl);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -15,7 +15,7 @@ import { MessageType } from '../types.js';
|
||||
import { GIT_COMMIT_INFO } from '../../generated/git-commit.js';
|
||||
import { formatMemoryUsage } from '../utils/formatters.js';
|
||||
import { getCliVersion } from '../../utils/version.js';
|
||||
import { IdeClient, sessionId } from '@qwen-code/qwen-code-core';
|
||||
import { IdeClient, AuthType } from '@qwen-code/qwen-code-core';
|
||||
|
||||
export const bugCommand: SlashCommand = {
|
||||
name: 'bug',
|
||||
@@ -38,13 +38,24 @@ export const bugCommand: SlashCommand = {
|
||||
const cliVersion = await getCliVersion();
|
||||
const memoryUsage = formatMemoryUsage(process.memoryUsage().rss);
|
||||
const ideClient = await getIdeClientName(context);
|
||||
const selectedAuthType =
|
||||
context.services.settings.merged.security?.auth?.selectedType || '';
|
||||
const baseUrl =
|
||||
selectedAuthType === AuthType.USE_OPENAI
|
||||
? config?.getContentGeneratorConfig()?.baseUrl
|
||||
: undefined;
|
||||
|
||||
let info = `
|
||||
* **CLI Version:** ${cliVersion}
|
||||
* **Git Commit:** ${GIT_COMMIT_INFO}
|
||||
* **Session ID:** ${sessionId}
|
||||
* **Session ID:** ${config?.getSessionId() || 'unknown'}
|
||||
* **Operating System:** ${osVersion}
|
||||
* **Sandbox Environment:** ${sandboxEnv}
|
||||
* **Auth Type:** ${selectedAuthType}`;
|
||||
if (baseUrl) {
|
||||
info += `\n* **Base URL:** ${baseUrl}`;
|
||||
}
|
||||
info += `
|
||||
* **Model Version:** ${modelVersion}
|
||||
* **Memory Usage:** ${memoryUsage}
|
||||
`;
|
||||
|
||||
@@ -139,8 +139,8 @@ describe('chatCommand', () => {
|
||||
.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/);
|
||||
const formattedDate = isoDate ? `${isoDate[1]} ${isoDate[2]}` : '';
|
||||
expect(content).toContain(formattedDate);
|
||||
const index1 = content.indexOf('- \u001b[36mtest1\u001b[0m');
|
||||
const index2 = content.indexOf('- \u001b[36mtest2\u001b[0m');
|
||||
const index1 = content.indexOf('- test1');
|
||||
const index2 = content.indexOf('- test2');
|
||||
expect(index1).toBeGreaterThanOrEqual(0);
|
||||
expect(index2).toBeGreaterThan(index1);
|
||||
});
|
||||
|
||||
@@ -89,9 +89,9 @@ const listCommand: SlashCommand = {
|
||||
const isoString = chat.mtime.toISOString();
|
||||
const match = isoString.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/);
|
||||
const formattedDate = match ? `${match[1]} ${match[2]}` : 'Invalid Date';
|
||||
message += ` - \u001b[36m${paddedName}\u001b[0m \u001b[90m(saved on ${formattedDate})\u001b[0m\n`;
|
||||
message += ` - ${paddedName} (saved on ${formattedDate})\n`;
|
||||
}
|
||||
message += `\n\u001b[90mNote: Newest last, oldest first\u001b[0m`;
|
||||
message += `\nNote: Newest last, oldest first`;
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
|
||||
@@ -20,6 +20,7 @@ import {
|
||||
MCPServerStatus,
|
||||
getErrorMessage,
|
||||
MCPOAuthTokenStorage,
|
||||
MCPOAuthProvider,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { appEvents, AppEvent } from '../../utils/events.js';
|
||||
import { MessageType, type HistoryItemMcpStatus } from '../types.js';
|
||||
@@ -93,9 +94,6 @@ const authCommand: SlashCommand = {
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
// Import dynamically to avoid circular dependencies
|
||||
const { MCPOAuthProvider } = await import('@qwen-code/qwen-code-core');
|
||||
|
||||
let oauthConfig = server.oauth;
|
||||
if (!oauthConfig) {
|
||||
oauthConfig = { enabled: false };
|
||||
|
||||
@@ -130,7 +130,7 @@ export function OpenAIKeyPrompt({
|
||||
}
|
||||
|
||||
// Handle regular character input
|
||||
if (key.sequence && !key.ctrl && !key.meta && !key.name) {
|
||||
if (key.sequence && !key.ctrl && !key.meta) {
|
||||
// Filter control characters
|
||||
const cleanInput = key.sequence
|
||||
.split('')
|
||||
|
||||
@@ -12,6 +12,7 @@ import type {
|
||||
Config,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { renderWithProviders } from '../../../test-utils/render.js';
|
||||
import type { LoadedSettings } from '../../../config/settings.js';
|
||||
|
||||
describe('ToolConfirmationMessage', () => {
|
||||
const mockConfig = {
|
||||
@@ -187,4 +188,63 @@ describe('ToolConfirmationMessage', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('external editor option', () => {
|
||||
const editConfirmationDetails: ToolCallConfirmationDetails = {
|
||||
type: 'edit',
|
||||
title: 'Confirm Edit',
|
||||
fileName: 'test.txt',
|
||||
filePath: '/test.txt',
|
||||
fileDiff: '...diff...',
|
||||
originalContent: 'a',
|
||||
newContent: 'b',
|
||||
onConfirm: vi.fn(),
|
||||
};
|
||||
|
||||
it('should show "Modify with external editor" when preferredEditor is set', () => {
|
||||
const mockConfig = {
|
||||
isTrustedFolder: () => true,
|
||||
getIdeMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
const { lastFrame } = renderWithProviders(
|
||||
<ToolConfirmationMessage
|
||||
confirmationDetails={editConfirmationDetails}
|
||||
config={mockConfig}
|
||||
availableTerminalHeight={30}
|
||||
terminalWidth={80}
|
||||
/>,
|
||||
{
|
||||
settings: {
|
||||
merged: { general: { preferredEditor: 'vscode' } },
|
||||
} as unknown as LoadedSettings,
|
||||
},
|
||||
);
|
||||
|
||||
expect(lastFrame()).toContain('Modify with external editor');
|
||||
});
|
||||
|
||||
it('should NOT show "Modify with external editor" when preferredEditor is not set', () => {
|
||||
const mockConfig = {
|
||||
isTrustedFolder: () => true,
|
||||
getIdeMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
const { lastFrame } = renderWithProviders(
|
||||
<ToolConfirmationMessage
|
||||
confirmationDetails={editConfirmationDetails}
|
||||
config={mockConfig}
|
||||
availableTerminalHeight={30}
|
||||
terminalWidth={80}
|
||||
/>,
|
||||
{
|
||||
settings: {
|
||||
merged: { general: {} },
|
||||
} as unknown as LoadedSettings,
|
||||
},
|
||||
);
|
||||
|
||||
expect(lastFrame()).not.toContain('Modify with external editor');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -15,12 +15,14 @@ import type {
|
||||
ToolExecuteConfirmationDetails,
|
||||
ToolMcpConfirmationDetails,
|
||||
Config,
|
||||
EditorType,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { IdeClient, ToolConfirmationOutcome } from '@qwen-code/qwen-code-core';
|
||||
import type { RadioSelectItem } from '../shared/RadioButtonSelect.js';
|
||||
import { RadioButtonSelect } from '../shared/RadioButtonSelect.js';
|
||||
import { MaxSizedBox } from '../shared/MaxSizedBox.js';
|
||||
import { useKeypress } from '../../hooks/useKeypress.js';
|
||||
import { useSettings } from '../../contexts/SettingsContext.js';
|
||||
import { theme } from '../../semantic-colors.js';
|
||||
|
||||
export interface ToolConfirmationMessageProps {
|
||||
@@ -45,6 +47,11 @@ export const ToolConfirmationMessage: React.FC<
|
||||
const { onConfirm } = confirmationDetails;
|
||||
const childWidth = terminalWidth - 2; // 2 for padding
|
||||
|
||||
const settings = useSettings();
|
||||
const preferredEditor = settings.merged.general?.preferredEditor as
|
||||
| EditorType
|
||||
| undefined;
|
||||
|
||||
const [ideClient, setIdeClient] = useState<IdeClient | null>(null);
|
||||
const [isDiffingEnabled, setIsDiffingEnabled] = useState(false);
|
||||
|
||||
@@ -199,7 +206,7 @@ export const ToolConfirmationMessage: React.FC<
|
||||
key: 'Yes, allow always',
|
||||
});
|
||||
}
|
||||
if (!config.getIdeMode() || !isDiffingEnabled) {
|
||||
if ((!config.getIdeMode() || !isDiffingEnabled) && preferredEditor) {
|
||||
options.push({
|
||||
label: 'Modify with external editor',
|
||||
value: ToolConfirmationOutcome.ModifyWithEditor,
|
||||
|
||||
@@ -23,7 +23,7 @@ export const ToolsList: React.FC<ToolsListProps> = ({
|
||||
}) => (
|
||||
<Box flexDirection="column" marginBottom={1}>
|
||||
<Text bold color={theme.text.primary}>
|
||||
Available Gemini CLI tools:
|
||||
Available Qwen Code CLI tools:
|
||||
</Text>
|
||||
<Box height={1} />
|
||||
{tools.length > 0 ? (
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
||||
|
||||
exports[`<ToolsList /> > renders correctly with descriptions 1`] = `
|
||||
"Available Gemini CLI tools:
|
||||
"Available Qwen Code CLI tools:
|
||||
|
||||
- Test Tool One (test-tool-one)
|
||||
This is the first test tool.
|
||||
@@ -16,14 +16,14 @@ exports[`<ToolsList /> > renders correctly with descriptions 1`] = `
|
||||
`;
|
||||
|
||||
exports[`<ToolsList /> > renders correctly with no tools 1`] = `
|
||||
"Available Gemini CLI tools:
|
||||
"Available Qwen Code CLI tools:
|
||||
|
||||
No tools available
|
||||
"
|
||||
`;
|
||||
|
||||
exports[`<ToolsList /> > renders correctly without descriptions 1`] = `
|
||||
"Available Gemini CLI tools:
|
||||
"Available Qwen Code CLI tools:
|
||||
|
||||
- Test Tool One
|
||||
- Test Tool Two
|
||||
|
||||
@@ -109,7 +109,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'preferredEditor',
|
||||
'general.preferredEditor',
|
||||
editorType,
|
||||
);
|
||||
|
||||
@@ -139,7 +139,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'preferredEditor',
|
||||
'general.preferredEditor',
|
||||
undefined,
|
||||
);
|
||||
|
||||
@@ -170,7 +170,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'preferredEditor',
|
||||
'general.preferredEditor',
|
||||
editorType,
|
||||
);
|
||||
|
||||
@@ -199,7 +199,7 @@ describe('useEditorSettings', () => {
|
||||
|
||||
expect(mockLoadedSettings.setValue).toHaveBeenCalledWith(
|
||||
scope,
|
||||
'preferredEditor',
|
||||
'general.preferredEditor',
|
||||
editorType,
|
||||
);
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ export const useEditorSettings = (
|
||||
}
|
||||
|
||||
try {
|
||||
loadedSettings.setValue(scope, 'preferredEditor', editorType);
|
||||
loadedSettings.setValue(scope, 'general.preferredEditor', editorType);
|
||||
addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
|
||||
@@ -20,12 +20,14 @@ export const AVAILABLE_MODELS_QWEN: AvailableModel[] = [
|
||||
{
|
||||
id: MAINLINE_CODER,
|
||||
label: MAINLINE_CODER,
|
||||
description: 'Optimized for code generation and understanding',
|
||||
description:
|
||||
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
|
||||
},
|
||||
{
|
||||
id: MAINLINE_VLM,
|
||||
label: MAINLINE_VLM,
|
||||
description: 'Vision model with multimodal capabilities',
|
||||
description:
|
||||
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)',
|
||||
isVision: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -22,12 +22,22 @@ vi.mock('os', async (importOriginal) => {
|
||||
describe('getUserStartupWarnings', () => {
|
||||
let testRootDir: string;
|
||||
let homeDir: string;
|
||||
let startupOptions: {
|
||||
workspaceRoot: string;
|
||||
useRipgrep: boolean;
|
||||
useBuiltinRipgrep: boolean;
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
testRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'warnings-test-'));
|
||||
homeDir = path.join(testRootDir, 'home');
|
||||
await fs.mkdir(homeDir, { recursive: true });
|
||||
vi.mocked(os.homedir).mockReturnValue(homeDir);
|
||||
startupOptions = {
|
||||
workspaceRoot: testRootDir,
|
||||
useRipgrep: true,
|
||||
useBuiltinRipgrep: true,
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
@@ -37,7 +47,10 @@ describe('getUserStartupWarnings', () => {
|
||||
|
||||
describe('home directory check', () => {
|
||||
it('should return a warning when running in home directory', async () => {
|
||||
const warnings = await getUserStartupWarnings(homeDir);
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: homeDir,
|
||||
});
|
||||
expect(warnings).toContainEqual(
|
||||
expect.stringContaining('home directory'),
|
||||
);
|
||||
@@ -46,7 +59,10 @@ describe('getUserStartupWarnings', () => {
|
||||
it('should not return a warning when running in a project directory', async () => {
|
||||
const projectDir = path.join(testRootDir, 'project');
|
||||
await fs.mkdir(projectDir);
|
||||
const warnings = await getUserStartupWarnings(projectDir);
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: projectDir,
|
||||
});
|
||||
expect(warnings).not.toContainEqual(
|
||||
expect.stringContaining('home directory'),
|
||||
);
|
||||
@@ -56,7 +72,10 @@ describe('getUserStartupWarnings', () => {
|
||||
describe('root directory check', () => {
|
||||
it('should return a warning when running in a root directory', async () => {
|
||||
const rootDir = path.parse(testRootDir).root;
|
||||
const warnings = await getUserStartupWarnings(rootDir);
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: rootDir,
|
||||
});
|
||||
expect(warnings).toContainEqual(
|
||||
expect.stringContaining('root directory'),
|
||||
);
|
||||
@@ -68,7 +87,10 @@ describe('getUserStartupWarnings', () => {
|
||||
it('should not return a warning when running in a non-root directory', async () => {
|
||||
const projectDir = path.join(testRootDir, 'project');
|
||||
await fs.mkdir(projectDir);
|
||||
const warnings = await getUserStartupWarnings(projectDir);
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: projectDir,
|
||||
});
|
||||
expect(warnings).not.toContainEqual(
|
||||
expect.stringContaining('root directory'),
|
||||
);
|
||||
@@ -78,7 +100,10 @@ describe('getUserStartupWarnings', () => {
|
||||
describe('error handling', () => {
|
||||
it('should handle errors when checking directory', async () => {
|
||||
const nonExistentPath = path.join(testRootDir, 'non-existent');
|
||||
const warnings = await getUserStartupWarnings(nonExistentPath);
|
||||
const warnings = await getUserStartupWarnings({
|
||||
...startupOptions,
|
||||
workspaceRoot: nonExistentPath,
|
||||
});
|
||||
const expectedWarning =
|
||||
'Could not verify the current directory due to a file system error.';
|
||||
expect(warnings).toEqual([expectedWarning, expectedWarning]);
|
||||
|
||||
@@ -7,19 +7,26 @@
|
||||
import fs from 'node:fs/promises';
|
||||
import * as os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { canUseRipgrep } from '@qwen-code/qwen-code-core';
|
||||
|
||||
type WarningCheckOptions = {
|
||||
workspaceRoot: string;
|
||||
useRipgrep: boolean;
|
||||
useBuiltinRipgrep: boolean;
|
||||
};
|
||||
|
||||
type WarningCheck = {
|
||||
id: string;
|
||||
check: (workspaceRoot: string) => Promise<string | null>;
|
||||
check: (options: WarningCheckOptions) => Promise<string | null>;
|
||||
};
|
||||
|
||||
// Individual warning checks
|
||||
const homeDirectoryCheck: WarningCheck = {
|
||||
id: 'home-directory',
|
||||
check: async (workspaceRoot: string) => {
|
||||
check: async (options: WarningCheckOptions) => {
|
||||
try {
|
||||
const [workspaceRealPath, homeRealPath] = await Promise.all([
|
||||
fs.realpath(workspaceRoot),
|
||||
fs.realpath(options.workspaceRoot),
|
||||
fs.realpath(os.homedir()),
|
||||
]);
|
||||
|
||||
@@ -35,9 +42,9 @@ const homeDirectoryCheck: WarningCheck = {
|
||||
|
||||
const rootDirectoryCheck: WarningCheck = {
|
||||
id: 'root-directory',
|
||||
check: async (workspaceRoot: string) => {
|
||||
check: async (options: WarningCheckOptions) => {
|
||||
try {
|
||||
const workspaceRealPath = await fs.realpath(workspaceRoot);
|
||||
const workspaceRealPath = await fs.realpath(options.workspaceRoot);
|
||||
const errorMessage =
|
||||
'Warning: You are running Qwen Code in the root directory. Your entire folder structure will be used for context. It is strongly recommended to run in a project-specific directory.';
|
||||
|
||||
@@ -53,17 +60,33 @@ const rootDirectoryCheck: WarningCheck = {
|
||||
},
|
||||
};
|
||||
|
||||
const ripgrepAvailabilityCheck: WarningCheck = {
|
||||
id: 'ripgrep-availability',
|
||||
check: async (options: WarningCheckOptions) => {
|
||||
if (!options.useRipgrep) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const isAvailable = await canUseRipgrep(options.useBuiltinRipgrep);
|
||||
if (!isAvailable) {
|
||||
return 'Ripgrep not available: Please install ripgrep globally to enable faster file content search. Falling back to built-in grep.';
|
||||
}
|
||||
return null;
|
||||
},
|
||||
};
|
||||
|
||||
// All warning checks
|
||||
const WARNING_CHECKS: readonly WarningCheck[] = [
|
||||
homeDirectoryCheck,
|
||||
rootDirectoryCheck,
|
||||
ripgrepAvailabilityCheck,
|
||||
];
|
||||
|
||||
export async function getUserStartupWarnings(
|
||||
workspaceRoot: string = process.cwd(),
|
||||
options: WarningCheckOptions,
|
||||
): Promise<string[]> {
|
||||
const results = await Promise.all(
|
||||
WARNING_CHECKS.map((check) => check.check(workspaceRoot)),
|
||||
WARNING_CHECKS.map((check) => check.check(options)),
|
||||
);
|
||||
return results.filter((msg) => msg !== null);
|
||||
}
|
||||
|
||||
@@ -105,34 +105,6 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect(processExitSpy).toHaveBeenCalledWith(1);
|
||||
});
|
||||
|
||||
it('uses LOGIN_WITH_GOOGLE if GOOGLE_GENAI_USE_GCA is set', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.LOGIN_WITH_GOOGLE);
|
||||
});
|
||||
|
||||
it('uses USE_GEMINI if GEMINI_API_KEY is set', async () => {
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('uses USE_OPENAI if OPENAI_API_KEY is set', async () => {
|
||||
process.env['OPENAI_API_KEY'] = 'fake-openai-key';
|
||||
const nonInteractiveConfig = {
|
||||
@@ -168,104 +140,6 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.QWEN_OAUTH);
|
||||
});
|
||||
|
||||
it('uses USE_VERTEX_AI if GOOGLE_GENAI_USE_VERTEXAI is true (with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION)', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI);
|
||||
});
|
||||
|
||||
it('uses USE_VERTEX_AI if GOOGLE_GENAI_USE_VERTEXAI is true and GOOGLE_API_KEY is set', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_API_KEY'] = 'vertex-api-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI);
|
||||
});
|
||||
|
||||
it('uses LOGIN_WITH_GOOGLE if GOOGLE_GENAI_USE_GCA is set, even with other env vars', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.LOGIN_WITH_GOOGLE);
|
||||
});
|
||||
|
||||
it('uses USE_VERTEX_AI if both GEMINI_API_KEY and GOOGLE_GENAI_USE_VERTEXAI are set', async () => {
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI);
|
||||
});
|
||||
|
||||
it('uses USE_GEMINI if GOOGLE_GENAI_USE_VERTEXAI is false, GEMINI_API_KEY is set, and project/location are available', async () => {
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'false';
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';
|
||||
process.env['GOOGLE_CLOUD_LOCATION'] = 'us-central1';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
undefined,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('uses configuredAuthType if provided', async () => {
|
||||
// Set required env var for USE_GEMINI
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_GEMINI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('exits if validateAuthMethod returns error', async () => {
|
||||
// Mock validateAuthMethod to return error
|
||||
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue('Auth error!');
|
||||
@@ -317,26 +191,25 @@ describe('validateNonInterActiveAuth', () => {
|
||||
});
|
||||
|
||||
it('uses enforcedAuthType if provided', async () => {
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.USE_GEMINI;
|
||||
mockSettings.merged.security!.auth!.selectedType = AuthType.USE_GEMINI;
|
||||
// Set required env var for USE_GEMINI to ensure enforcedAuthType takes precedence
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.USE_OPENAI;
|
||||
mockSettings.merged.security!.auth!.selectedType = AuthType.USE_OPENAI;
|
||||
// Set required env var for USE_OPENAI to ensure enforcedAuthType takes precedence
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
} as unknown as Config;
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_OPENAI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI);
|
||||
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_OPENAI);
|
||||
});
|
||||
|
||||
it('exits if currentAuthType does not match enforcedAuthType', async () => {
|
||||
mockSettings.merged.security!.auth!.enforcedType =
|
||||
AuthType.LOGIN_WITH_GOOGLE;
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.QWEN_OAUTH;
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
|
||||
@@ -346,7 +219,7 @@ describe('validateNonInterActiveAuth', () => {
|
||||
} as unknown as Config;
|
||||
try {
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_OPENAI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
@@ -356,7 +229,7 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect((e as Error).message).toContain('process.exit(1) called');
|
||||
}
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'The configured auth type is oauth-personal, but the current auth type is vertex-ai. Please re-authenticate with the correct type.',
|
||||
'The configured auth type is qwen-oauth, but the current auth type is openai. Please re-authenticate with the correct type.',
|
||||
);
|
||||
expect(processExitSpy).toHaveBeenCalledWith(1);
|
||||
});
|
||||
@@ -394,8 +267,8 @@ describe('validateNonInterActiveAuth', () => {
|
||||
});
|
||||
|
||||
it('prints JSON error when enforced auth mismatches current auth and exits with code 1', async () => {
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.USE_GEMINI;
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] = 'true';
|
||||
mockSettings.merged.security!.auth!.enforcedType = AuthType.QWEN_OAUTH;
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
@@ -424,14 +297,14 @@ describe('validateNonInterActiveAuth', () => {
|
||||
expect(payload.error.type).toBe('Error');
|
||||
expect(payload.error.code).toBe(1);
|
||||
expect(payload.error.message).toContain(
|
||||
'The configured auth type is gemini-api-key, but the current auth type is oauth-personal.',
|
||||
'The configured auth type is qwen-oauth, but the current auth type is openai.',
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it('prints JSON error when validateAuthMethod fails and exits with code 1', async () => {
|
||||
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue('Auth error!');
|
||||
process.env['GEMINI_API_KEY'] = 'fake-key';
|
||||
process.env['OPENAI_API_KEY'] = 'fake-key';
|
||||
|
||||
const nonInteractiveConfig = {
|
||||
refreshAuth: refreshAuthMock,
|
||||
@@ -444,7 +317,7 @@ describe('validateNonInterActiveAuth', () => {
|
||||
let thrown: Error | undefined;
|
||||
try {
|
||||
await validateNonInteractiveAuth(
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_OPENAI,
|
||||
undefined,
|
||||
nonInteractiveConfig,
|
||||
mockSettings,
|
||||
|
||||
@@ -12,18 +12,13 @@ import { type LoadedSettings } from './config/settings.js';
|
||||
import { handleError } from './utils/errors.js';
|
||||
|
||||
function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
if (process.env['GOOGLE_GENAI_USE_GCA'] === 'true') {
|
||||
return AuthType.LOGIN_WITH_GOOGLE;
|
||||
}
|
||||
if (process.env['GOOGLE_GENAI_USE_VERTEXAI'] === 'true') {
|
||||
return AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
if (process.env['GEMINI_API_KEY']) {
|
||||
return AuthType.USE_GEMINI;
|
||||
}
|
||||
if (process.env['OPENAI_API_KEY']) {
|
||||
return AuthType.USE_OPENAI;
|
||||
}
|
||||
if (process.env['QWEN_OAUTH']) {
|
||||
return AuthType.QWEN_OAUTH;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
@@ -47,7 +42,7 @@ export async function validateNonInteractiveAuth(
|
||||
enforcedType || getAuthTypeFromEnv() || configuredAuthType;
|
||||
|
||||
if (!effectiveAuthType) {
|
||||
const message = `Please set an Auth method in your ${USER_SETTINGS_PATH} or specify one of the following environment variables before running: GEMINI_API_KEY, OPENAI_API_KEY, GOOGLE_GENAI_USE_VERTEXAI, GOOGLE_GENAI_USE_GCA`;
|
||||
const message = `Please set an Auth method in your ${USER_SETTINGS_PATH} or specify one of the following environment variables before running: QWEN_OAUTH, OPENAI_API_KEY`;
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -14,14 +14,16 @@
|
||||
"format": "prettier --write .",
|
||||
"test": "vitest run",
|
||||
"test:ci": "vitest run",
|
||||
"typecheck": "tsc --noEmit"
|
||||
"typecheck": "tsc --noEmit",
|
||||
"postinstall": "node scripts/postinstall.js"
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
"dist",
|
||||
"vendor",
|
||||
"scripts/postinstall.js"
|
||||
],
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@joshua.litt/get-ripgrep": "^0.0.2",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
|
||||
|
||||
85
packages/core/scripts/postinstall.js
Normal file
85
packages/core/scripts/postinstall.js
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { execSync } from 'node:child_process';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
// Get the package root directory
|
||||
const packageRoot = path.join(__dirname, '..');
|
||||
const vendorDir = path.join(packageRoot, 'vendor', 'ripgrep');
|
||||
|
||||
/**
|
||||
* Remove quarantine attribute and set executable permissions on macOS/Linux
|
||||
*/
|
||||
function setupRipgrepBinaries() {
|
||||
if (!fs.existsSync(vendorDir)) {
|
||||
console.log('Vendor directory not found, skipping ripgrep setup');
|
||||
return;
|
||||
}
|
||||
|
||||
const platform = process.platform;
|
||||
const arch = process.arch;
|
||||
|
||||
// Determine the binary directory based on platform and architecture
|
||||
let binaryDir;
|
||||
if (platform === 'darwin' || platform === 'linux') {
|
||||
const archStr = arch === 'x64' || arch === 'arm64' ? arch : null;
|
||||
if (archStr) {
|
||||
binaryDir = path.join(vendorDir, `${archStr}-${platform}`);
|
||||
}
|
||||
} else if (platform === 'win32') {
|
||||
// Windows doesn't need these fixes
|
||||
return;
|
||||
}
|
||||
|
||||
if (!binaryDir || !fs.existsSync(binaryDir)) {
|
||||
console.log(
|
||||
`Binary directory not found for ${platform}-${arch}, skipping ripgrep setup`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const rgBinary = path.join(binaryDir, 'rg');
|
||||
|
||||
if (!fs.existsSync(rgBinary)) {
|
||||
console.log(`Ripgrep binary not found at ${rgBinary}`);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Set executable permissions
|
||||
fs.chmodSync(rgBinary, 0o755);
|
||||
console.log(`✓ Set executable permissions on ${rgBinary}`);
|
||||
|
||||
// On macOS, remove quarantine attribute
|
||||
if (platform === 'darwin') {
|
||||
try {
|
||||
execSync(`xattr -d com.apple.quarantine "${rgBinary}"`, {
|
||||
stdio: 'pipe',
|
||||
});
|
||||
console.log(`✓ Removed quarantine attribute from ${rgBinary}`);
|
||||
} catch (error) {
|
||||
// Quarantine attribute might not exist, which is fine
|
||||
if (error.message && !error.message.includes('No such xattr')) {
|
||||
console.warn(
|
||||
`Warning: Could not remove quarantine attribute: ${error.message}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error setting up ripgrep binary: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
setupRipgrepBinaries();
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
QwenLogger,
|
||||
} from '../telemetry/index.js';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
@@ -38,7 +39,8 @@ vi.mock('fs', async (importOriginal) => {
|
||||
import { ShellTool } from '../tools/shell.js';
|
||||
import { ReadFileTool } from '../tools/read-file.js';
|
||||
import { GrepTool } from '../tools/grep.js';
|
||||
import { RipGrepTool, canUseRipgrep } from '../tools/ripGrep.js';
|
||||
import { canUseRipgrep } from '../utils/ripgrepUtils.js';
|
||||
import { RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { logRipgrepFallback } from '../telemetry/loggers.js';
|
||||
import { RipgrepFallbackEvent } from '../telemetry/types.js';
|
||||
import { ToolRegistry } from '../tools/tool-registry.js';
|
||||
@@ -75,9 +77,11 @@ vi.mock('../tools/ls');
|
||||
vi.mock('../tools/read-file');
|
||||
vi.mock('../tools/grep.js');
|
||||
vi.mock('../tools/ripGrep.js', () => ({
|
||||
canUseRipgrep: vi.fn(),
|
||||
RipGrepTool: class MockRipGrepTool {},
|
||||
}));
|
||||
vi.mock('../utils/ripgrepUtils.js', () => ({
|
||||
canUseRipgrep: vi.fn(),
|
||||
}));
|
||||
vi.mock('../tools/glob');
|
||||
vi.mock('../tools/edit');
|
||||
vi.mock('../tools/shell');
|
||||
@@ -150,6 +154,11 @@ vi.mock('../core/tokenLimits.js', () => ({
|
||||
|
||||
describe('Server Config (config.ts)', () => {
|
||||
const MODEL = 'qwen3-coder-plus';
|
||||
|
||||
// Default mock for canUseRipgrep to return true (tests that care about ripgrep will override this)
|
||||
beforeEach(() => {
|
||||
vi.mocked(canUseRipgrep).mockResolvedValue(true);
|
||||
});
|
||||
const SANDBOX: SandboxConfig = {
|
||||
command: 'docker',
|
||||
image: 'qwen-code-sandbox',
|
||||
@@ -247,6 +256,7 @@ describe('Server Config (config.ts)', () => {
|
||||
authType,
|
||||
{
|
||||
model: MODEL,
|
||||
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
|
||||
},
|
||||
);
|
||||
// Verify that contentGeneratorConfig is updated
|
||||
@@ -573,6 +583,40 @@ describe('Server Config (config.ts)', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('UseBuiltinRipgrep Configuration', () => {
|
||||
it('should default useBuiltinRipgrep to true when not provided', () => {
|
||||
const config = new Config(baseParams);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
|
||||
it('should set useBuiltinRipgrep to false when provided as false', () => {
|
||||
const paramsWithBuiltinRipgrep: ConfigParameters = {
|
||||
...baseParams,
|
||||
useBuiltinRipgrep: false,
|
||||
};
|
||||
const config = new Config(paramsWithBuiltinRipgrep);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(false);
|
||||
});
|
||||
|
||||
it('should set useBuiltinRipgrep to true when explicitly provided as true', () => {
|
||||
const paramsWithBuiltinRipgrep: ConfigParameters = {
|
||||
...baseParams,
|
||||
useBuiltinRipgrep: true,
|
||||
};
|
||||
const config = new Config(paramsWithBuiltinRipgrep);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
|
||||
it('should default useBuiltinRipgrep to true when undefined', () => {
|
||||
const paramsWithUndefinedBuiltinRipgrep: ConfigParameters = {
|
||||
...baseParams,
|
||||
useBuiltinRipgrep: undefined,
|
||||
};
|
||||
const config = new Config(paramsWithUndefinedBuiltinRipgrep);
|
||||
expect(config.getUseBuiltinRipgrep()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createToolRegistry', () => {
|
||||
it('should register a tool if coreTools contains an argument-specific pattern', async () => {
|
||||
const params: ConfigParameters = {
|
||||
@@ -820,10 +864,60 @@ describe('setApprovalMode with folder trust', () => {
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(true);
|
||||
expect(wasGrepRegistered).toBe(false);
|
||||
expect(logRipgrepFallback).not.toHaveBeenCalled();
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it('should register GrepTool as a fallback when useRipgrep is true but it is not available', async () => {
|
||||
it('should register RipGrepTool with system ripgrep when useBuiltinRipgrep is false', async () => {
|
||||
(canUseRipgrep as Mock).mockResolvedValue(true);
|
||||
const config = new Config({
|
||||
...baseParams,
|
||||
useRipgrep: true,
|
||||
useBuiltinRipgrep: false,
|
||||
});
|
||||
await config.initialize();
|
||||
|
||||
const calls = (ToolRegistry.prototype.registerTool as Mock).mock.calls;
|
||||
const wasRipGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(RipGrepTool),
|
||||
);
|
||||
const wasGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(GrepTool),
|
||||
);
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(true);
|
||||
expect(wasGrepRegistered).toBe(false);
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(false);
|
||||
});
|
||||
|
||||
it('should fall back to GrepTool and log error when useBuiltinRipgrep is false but system ripgrep is not available', async () => {
|
||||
(canUseRipgrep as Mock).mockResolvedValue(false);
|
||||
const config = new Config({
|
||||
...baseParams,
|
||||
useRipgrep: true,
|
||||
useBuiltinRipgrep: false,
|
||||
});
|
||||
await config.initialize();
|
||||
|
||||
const calls = (ToolRegistry.prototype.registerTool as Mock).mock.calls;
|
||||
const wasRipGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(RipGrepTool),
|
||||
);
|
||||
const wasGrepRegistered = calls.some(
|
||||
(call) => call[0] instanceof vi.mocked(GrepTool),
|
||||
);
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(false);
|
||||
expect(wasGrepRegistered).toBe(true);
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(false);
|
||||
expect(logRipgrepFallback).toHaveBeenCalledWith(
|
||||
config,
|
||||
expect.any(RipgrepFallbackEvent),
|
||||
);
|
||||
const event = (logRipgrepFallback as Mock).mock.calls[0][1];
|
||||
expect(event.error).toContain('Ripgrep is not available');
|
||||
});
|
||||
|
||||
it('should fall back to GrepTool and log error when useRipgrep is true and builtin ripgrep is not available', async () => {
|
||||
(canUseRipgrep as Mock).mockResolvedValue(false);
|
||||
const config = new Config({ ...baseParams, useRipgrep: true });
|
||||
await config.initialize();
|
||||
@@ -838,15 +932,16 @@ describe('setApprovalMode with folder trust', () => {
|
||||
|
||||
expect(wasRipGrepRegistered).toBe(false);
|
||||
expect(wasGrepRegistered).toBe(true);
|
||||
expect(canUseRipgrep).toHaveBeenCalledWith(true);
|
||||
expect(logRipgrepFallback).toHaveBeenCalledWith(
|
||||
config,
|
||||
expect.any(RipgrepFallbackEvent),
|
||||
);
|
||||
const event = (logRipgrepFallback as Mock).mock.calls[0][1];
|
||||
expect(event.error).toBeUndefined();
|
||||
expect(event.error).toContain('Ripgrep is not available');
|
||||
});
|
||||
|
||||
it('should register GrepTool as a fallback when canUseRipgrep throws an error', async () => {
|
||||
it('should fall back to GrepTool and log error when canUseRipgrep throws an error', async () => {
|
||||
const error = new Error('ripGrep check failed');
|
||||
(canUseRipgrep as Mock).mockRejectedValue(error);
|
||||
const config = new Config({ ...baseParams, useRipgrep: true });
|
||||
@@ -885,7 +980,6 @@ describe('setApprovalMode with folder trust', () => {
|
||||
expect(wasRipGrepRegistered).toBe(false);
|
||||
expect(wasGrepRegistered).toBe(true);
|
||||
expect(canUseRipgrep).not.toHaveBeenCalled();
|
||||
expect(logRipgrepFallback).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -49,7 +49,8 @@ import { LSTool } from '../tools/ls.js';
|
||||
import { MemoryTool, setGeminiMdFilename } from '../tools/memoryTool.js';
|
||||
import { ReadFileTool } from '../tools/read-file.js';
|
||||
import { ReadManyFilesTool } from '../tools/read-many-files.js';
|
||||
import { canUseRipgrep, RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { canUseRipgrep } from '../utils/ripgrepUtils.js';
|
||||
import { RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { ShellTool } from '../tools/shell.js';
|
||||
import { SmartEditTool } from '../tools/smart-edit.js';
|
||||
import { TaskTool } from '../tools/task.js';
|
||||
@@ -87,8 +88,9 @@ import {
|
||||
DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
|
||||
} from './constants.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL } from './models.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
|
||||
import { Storage } from './storage.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
|
||||
// Re-export types
|
||||
export type { AnyToolInvocation, FileFilteringOptions, MCPOAuthConfig };
|
||||
@@ -242,7 +244,7 @@ export interface ConfigParameters {
|
||||
fileDiscoveryService?: FileDiscoveryService;
|
||||
includeDirectories?: string[];
|
||||
bugCommand?: BugCommandSettings;
|
||||
model: string;
|
||||
model?: string;
|
||||
extensionContextFilePaths?: string[];
|
||||
maxSessionTurns?: number;
|
||||
sessionTokenLimit?: number;
|
||||
@@ -265,6 +267,7 @@ export interface ConfigParameters {
|
||||
interactive?: boolean;
|
||||
trustedFolder?: boolean;
|
||||
useRipgrep?: boolean;
|
||||
useBuiltinRipgrep?: boolean;
|
||||
shouldUseNodePtyShell?: boolean;
|
||||
skipNextSpeakerCheck?: boolean;
|
||||
shellExecutionConfig?: ShellExecutionConfig;
|
||||
@@ -288,7 +291,7 @@ export class Config {
|
||||
private fileSystemService: FileSystemService;
|
||||
private contentGeneratorConfig!: ContentGeneratorConfig;
|
||||
private contentGenerator!: ContentGenerator;
|
||||
private readonly _generationConfig: ContentGeneratorConfig;
|
||||
private _generationConfig: Partial<ContentGeneratorConfig>;
|
||||
private readonly embeddingModel: string;
|
||||
private readonly sandbox: SandboxConfig | undefined;
|
||||
private readonly targetDir: string;
|
||||
@@ -353,6 +356,7 @@ export class Config {
|
||||
private readonly interactive: boolean;
|
||||
private readonly trustedFolder: boolean | undefined;
|
||||
private readonly useRipgrep: boolean;
|
||||
private readonly useBuiltinRipgrep: boolean;
|
||||
private readonly shouldUseNodePtyShell: boolean;
|
||||
private readonly skipNextSpeakerCheck: boolean;
|
||||
private shellExecutionConfig: ShellExecutionConfig;
|
||||
@@ -439,8 +443,10 @@ export class Config {
|
||||
this._generationConfig = {
|
||||
model: params.model,
|
||||
...(params.generationConfig || {}),
|
||||
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
|
||||
};
|
||||
this.contentGeneratorConfig = this._generationConfig;
|
||||
this.contentGeneratorConfig = this
|
||||
._generationConfig as ContentGeneratorConfig;
|
||||
this.cliVersion = params.cliVersion;
|
||||
|
||||
this.loadMemoryFromIncludeDirectories =
|
||||
@@ -448,13 +454,12 @@ export class Config {
|
||||
this.chatCompression = params.chatCompression;
|
||||
this.interactive = params.interactive ?? false;
|
||||
this.trustedFolder = params.trustedFolder;
|
||||
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
|
||||
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? false;
|
||||
this.skipLoopDetection = params.skipLoopDetection ?? false;
|
||||
|
||||
// Web search
|
||||
this.tavilyApiKey = params.tavilyApiKey;
|
||||
this.useRipgrep = params.useRipgrep ?? true;
|
||||
this.useBuiltinRipgrep = params.useBuiltinRipgrep ?? true;
|
||||
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
|
||||
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? true;
|
||||
this.shellExecutionConfig = {
|
||||
@@ -519,6 +524,26 @@ export class Config {
|
||||
return this.contentGenerator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the credentials in the generation config.
|
||||
* This is needed when credentials are set after Config construction.
|
||||
*/
|
||||
updateCredentials(credentials: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
}): void {
|
||||
if (credentials.apiKey) {
|
||||
this._generationConfig.apiKey = credentials.apiKey;
|
||||
}
|
||||
if (credentials.baseUrl) {
|
||||
this._generationConfig.baseUrl = credentials.baseUrl;
|
||||
}
|
||||
if (credentials.model) {
|
||||
this._generationConfig.model = credentials.model;
|
||||
}
|
||||
}
|
||||
|
||||
async refreshAuth(authMethod: AuthType) {
|
||||
// Vertex and Genai have incompatible encryption and sending history with
|
||||
// throughtSignature from Genai to Vertex will fail, we need to strip them
|
||||
@@ -586,7 +611,7 @@ export class Config {
|
||||
}
|
||||
|
||||
getModel(): string {
|
||||
return this.contentGeneratorConfig.model;
|
||||
return this.contentGeneratorConfig?.model || DEFAULT_QWEN_MODEL;
|
||||
}
|
||||
|
||||
async setModel(
|
||||
@@ -964,6 +989,10 @@ export class Config {
|
||||
return this.useRipgrep;
|
||||
}
|
||||
|
||||
getUseBuiltinRipgrep(): boolean {
|
||||
return this.useBuiltinRipgrep;
|
||||
}
|
||||
|
||||
getShouldUseNodePtyShell(): boolean {
|
||||
return this.shouldUseNodePtyShell;
|
||||
}
|
||||
@@ -1091,13 +1120,18 @@ export class Config {
|
||||
let useRipgrep = false;
|
||||
let errorString: undefined | string = undefined;
|
||||
try {
|
||||
useRipgrep = await canUseRipgrep();
|
||||
useRipgrep = await canUseRipgrep(this.getUseBuiltinRipgrep());
|
||||
} catch (error: unknown) {
|
||||
errorString = String(error);
|
||||
}
|
||||
if (useRipgrep) {
|
||||
registerCoreTool(RipGrepTool, this);
|
||||
} else {
|
||||
errorString =
|
||||
errorString ||
|
||||
'Ripgrep is not available. Please install ripgrep globally.';
|
||||
|
||||
// Log for telemetry
|
||||
logRipgrepFallback(this, new RipgrepFallbackEvent(errorString));
|
||||
registerCoreTool(GrepTool, this);
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -288,7 +288,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -517,7 +517,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -731,7 +731,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -945,7 +945,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1159,7 +1159,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1373,7 +1373,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1587,7 +1587,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -1801,7 +1801,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2015,7 +2015,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2252,7 +2252,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2549,7 +2549,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -2786,7 +2786,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -3079,7 +3079,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
@@ -3293,7 +3293,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
||||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'search_file_content', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
@@ -16,11 +16,11 @@ import {
|
||||
|
||||
import type { Content, GenerateContentResponse, Part } from '@google/genai';
|
||||
import {
|
||||
findCompressSplitPoint,
|
||||
isThinkingDefault,
|
||||
isThinkingSupported,
|
||||
GeminiClient,
|
||||
} from './client.js';
|
||||
import { findCompressSplitPoint } from '../services/chatCompressionService.js';
|
||||
import {
|
||||
AuthType,
|
||||
type ContentGenerator,
|
||||
@@ -42,7 +42,6 @@ import { setSimulate429 } from '../utils/testUtils.js';
|
||||
import { tokenLimit } from './tokenLimits.js';
|
||||
import { ideContextStore } from '../ide/ideContext.js';
|
||||
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
|
||||
import { QwenLogger } from '../telemetry/index.js';
|
||||
|
||||
// Mock fs module to prevent actual file system operations during tests
|
||||
const mockFileSystem = new Map<string, string>();
|
||||
@@ -101,6 +100,22 @@ vi.mock('../utils/errorReporting', () => ({ reportError: vi.fn() }));
|
||||
vi.mock('../utils/nextSpeakerChecker', () => ({
|
||||
checkNextSpeaker: vi.fn().mockResolvedValue(null),
|
||||
}));
|
||||
vi.mock('../utils/environmentContext', () => ({
|
||||
getEnvironmentContext: vi
|
||||
.fn()
|
||||
.mockResolvedValue([{ text: 'Mocked env context' }]),
|
||||
getInitialChatHistory: vi.fn(async (_config, extraHistory) => [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Mocked env context' }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
]),
|
||||
}));
|
||||
vi.mock('../utils/generateContentResponseUtilities', () => ({
|
||||
getResponseText: (result: GenerateContentResponse) =>
|
||||
result.candidates?.[0]?.content?.parts?.map((part) => part.text).join('') ||
|
||||
@@ -136,6 +151,10 @@ vi.mock('../ide/ideContext.js');
|
||||
vi.mock('../telemetry/uiTelemetry.js', () => ({
|
||||
uiTelemetryService: mockUiTelemetryService,
|
||||
}));
|
||||
vi.mock('../telemetry/loggers.js', () => ({
|
||||
logChatCompression: vi.fn(),
|
||||
logNextSpeakerCheck: vi.fn(),
|
||||
}));
|
||||
|
||||
/**
|
||||
* Array.fromAsync ponyfill, which will be available in es 2024.
|
||||
@@ -619,7 +638,8 @@ describe('Gemini Client (client.ts)', () => {
|
||||
});
|
||||
|
||||
it('logs a telemetry event when compressing', async () => {
|
||||
vi.spyOn(QwenLogger.prototype, 'logChatCompressionEvent');
|
||||
const { logChatCompression } = await import('../telemetry/loggers.js');
|
||||
vi.mocked(logChatCompression).mockClear();
|
||||
|
||||
const MOCKED_TOKEN_LIMIT = 1000;
|
||||
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
|
||||
@@ -627,19 +647,37 @@ describe('Gemini Client (client.ts)', () => {
|
||||
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
|
||||
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
|
||||
});
|
||||
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
|
||||
// Need multiple history items so there's something to compress
|
||||
const history = [
|
||||
{ role: 'user', parts: [{ text: '...history 1...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 2...' }] },
|
||||
{ role: 'user', parts: [{ text: '...history 3...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 4...' }] },
|
||||
];
|
||||
mockGetHistory.mockReturnValue(history);
|
||||
|
||||
// Token count needs to be ABOVE the threshold to trigger compression
|
||||
const originalTokenCount =
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD + 1;
|
||||
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(
|
||||
originalTokenCount,
|
||||
);
|
||||
|
||||
// We need to control the estimated new token count.
|
||||
// We mock startChat to return a chat with a known history.
|
||||
// Mock the summary response from the chat
|
||||
const summaryText = 'This is a summary.';
|
||||
mockGenerateContentFn.mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: summaryText }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
|
||||
// Mock startChat to complete the compression flow
|
||||
const splitPoint = findCompressSplitPoint(history, 0.7);
|
||||
const historyToKeep = history.slice(splitPoint);
|
||||
const newCompressedHistory: Content[] = [
|
||||
@@ -659,52 +697,36 @@ describe('Gemini Client (client.ts)', () => {
|
||||
.fn()
|
||||
.mockResolvedValue(mockNewChat as GeminiChat);
|
||||
|
||||
const totalChars = newCompressedHistory.reduce(
|
||||
(total, content) => total + JSON.stringify(content).length,
|
||||
0,
|
||||
);
|
||||
const newTokenCount = Math.floor(totalChars / 4);
|
||||
|
||||
// Mock the summary response from the chat
|
||||
mockGenerateContentFn.mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: summaryText }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
|
||||
await client.tryCompressChat('prompt-id-3', false);
|
||||
|
||||
expect(QwenLogger.prototype.logChatCompressionEvent).toHaveBeenCalledWith(
|
||||
expect(logChatCompression).toHaveBeenCalledWith(
|
||||
expect.anything(),
|
||||
expect.objectContaining({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalledWith(
|
||||
newTokenCount,
|
||||
);
|
||||
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalledTimes(
|
||||
1,
|
||||
);
|
||||
expect(uiTelemetryService.setLastPromptTokenCount).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should trigger summarization if token count is at threshold with contextPercentageThreshold setting', async () => {
|
||||
it('should trigger summarization if token count is above threshold with contextPercentageThreshold setting', async () => {
|
||||
const MOCKED_TOKEN_LIMIT = 1000;
|
||||
const MOCKED_CONTEXT_PERCENTAGE_THRESHOLD = 0.5;
|
||||
vi.mocked(tokenLimit).mockReturnValue(MOCKED_TOKEN_LIMIT);
|
||||
vi.spyOn(client['config'], 'getChatCompression').mockReturnValue({
|
||||
contextPercentageThreshold: MOCKED_CONTEXT_PERCENTAGE_THRESHOLD,
|
||||
});
|
||||
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
|
||||
// Need multiple history items so there's something to compress
|
||||
const history = [
|
||||
{ role: 'user', parts: [{ text: '...history 1...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 2...' }] },
|
||||
{ role: 'user', parts: [{ text: '...history 3...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 4...' }] },
|
||||
];
|
||||
mockGetHistory.mockReturnValue(history);
|
||||
|
||||
// Token count needs to be ABOVE the threshold to trigger compression
|
||||
const originalTokenCount =
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD;
|
||||
MOCKED_TOKEN_LIMIT * MOCKED_CONTEXT_PERCENTAGE_THRESHOLD + 1;
|
||||
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(
|
||||
originalTokenCount,
|
||||
@@ -864,7 +886,13 @@ describe('Gemini Client (client.ts)', () => {
|
||||
});
|
||||
|
||||
it('should always trigger summarization when force is true, regardless of token count', async () => {
|
||||
const history = [{ role: 'user', parts: [{ text: '...history...' }] }];
|
||||
// Need multiple history items so there's something to compress
|
||||
const history = [
|
||||
{ role: 'user', parts: [{ text: '...history 1...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 2...' }] },
|
||||
{ role: 'user', parts: [{ text: '...history 3...' }] },
|
||||
{ role: 'model', parts: [{ text: '...history 4...' }] },
|
||||
];
|
||||
mockGetHistory.mockReturnValue(history);
|
||||
|
||||
const originalTokenCount = 100; // Well below threshold, but > estimated new count
|
||||
|
||||
@@ -25,13 +25,11 @@ import {
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { GeminiChat } from './geminiChat.js';
|
||||
import {
|
||||
getCompressionPrompt,
|
||||
getCoreSystemPrompt,
|
||||
getCustomSystemPrompt,
|
||||
getPlanModeSystemReminder,
|
||||
getSubagentSystemReminder,
|
||||
} from './prompts.js';
|
||||
import { tokenLimit } from './tokenLimits.js';
|
||||
import {
|
||||
CompressionStatus,
|
||||
GeminiEventType,
|
||||
@@ -42,6 +40,11 @@ import {
|
||||
|
||||
// Services
|
||||
import { type ChatRecordingService } from '../services/chatRecordingService.js';
|
||||
import {
|
||||
ChatCompressionService,
|
||||
COMPRESSION_PRESERVE_THRESHOLD,
|
||||
COMPRESSION_TOKEN_THRESHOLD,
|
||||
} from '../services/chatCompressionService.js';
|
||||
import { LoopDetectionService } from '../services/loopDetectionService.js';
|
||||
|
||||
// Tools
|
||||
@@ -50,21 +53,18 @@ import { TaskTool } from '../tools/task.js';
|
||||
// Telemetry
|
||||
import {
|
||||
NextSpeakerCheckEvent,
|
||||
logChatCompression,
|
||||
logNextSpeakerCheck,
|
||||
makeChatCompressionEvent,
|
||||
uiTelemetryService,
|
||||
} from '../telemetry/index.js';
|
||||
|
||||
// Utilities
|
||||
import {
|
||||
getDirectoryContextString,
|
||||
getEnvironmentContext,
|
||||
getInitialChatHistory,
|
||||
} from '../utils/environmentContext.js';
|
||||
import { reportError } from '../utils/errorReporting.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
import { checkNextSpeaker } from '../utils/nextSpeakerChecker.js';
|
||||
import { flatMapTextParts, getResponseText } from '../utils/partUtils.js';
|
||||
import { flatMapTextParts } from '../utils/partUtils.js';
|
||||
import { retryWithBackoff } from '../utils/retry.js';
|
||||
|
||||
// IDE integration
|
||||
@@ -85,68 +85,8 @@ export function isThinkingDefault(model: string) {
|
||||
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the index of the oldest item to keep when compressing. May return
|
||||
* contents.length which indicates that everything should be compressed.
|
||||
*
|
||||
* Exported for testing purposes.
|
||||
*/
|
||||
export function findCompressSplitPoint(
|
||||
contents: Content[],
|
||||
fraction: number,
|
||||
): number {
|
||||
if (fraction <= 0 || fraction >= 1) {
|
||||
throw new Error('Fraction must be between 0 and 1');
|
||||
}
|
||||
|
||||
const charCounts = contents.map((content) => JSON.stringify(content).length);
|
||||
const totalCharCount = charCounts.reduce((a, b) => a + b, 0);
|
||||
const targetCharCount = totalCharCount * fraction;
|
||||
|
||||
let lastSplitPoint = 0; // 0 is always valid (compress nothing)
|
||||
let cumulativeCharCount = 0;
|
||||
for (let i = 0; i < contents.length; i++) {
|
||||
const content = contents[i];
|
||||
if (
|
||||
content.role === 'user' &&
|
||||
!content.parts?.some((part) => !!part.functionResponse)
|
||||
) {
|
||||
if (cumulativeCharCount >= targetCharCount) {
|
||||
return i;
|
||||
}
|
||||
lastSplitPoint = i;
|
||||
}
|
||||
cumulativeCharCount += charCounts[i];
|
||||
}
|
||||
|
||||
// We found no split points after targetCharCount.
|
||||
// Check if it's safe to compress everything.
|
||||
const lastContent = contents[contents.length - 1];
|
||||
if (
|
||||
lastContent?.role === 'model' &&
|
||||
!lastContent?.parts?.some((part) => part.functionCall)
|
||||
) {
|
||||
return contents.length;
|
||||
}
|
||||
|
||||
// Can't compress everything so just compress at last splitpoint.
|
||||
return lastSplitPoint;
|
||||
}
|
||||
|
||||
const MAX_TURNS = 100;
|
||||
|
||||
/**
|
||||
* Threshold for compression token count as a fraction of the model's token limit.
|
||||
* If the chat history exceeds this threshold, it will be compressed.
|
||||
*/
|
||||
const COMPRESSION_TOKEN_THRESHOLD = 0.7;
|
||||
|
||||
/**
|
||||
* The fraction of the latest chat history to keep. A value of 0.3
|
||||
* means that only the last 30% of the chat history will be kept after compression.
|
||||
*/
|
||||
const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
|
||||
|
||||
export class GeminiClient {
|
||||
private chat?: GeminiChat;
|
||||
private readonly generateContentConfig: GenerateContentConfig = {
|
||||
@@ -243,23 +183,13 @@ export class GeminiClient {
|
||||
async startChat(extraHistory?: Content[]): Promise<GeminiChat> {
|
||||
this.forceFullIdeContext = true;
|
||||
this.hasFailedCompressionAttempt = false;
|
||||
const envParts = await getEnvironmentContext(this.config);
|
||||
|
||||
const toolRegistry = this.config.getToolRegistry();
|
||||
const toolDeclarations = toolRegistry.getFunctionDeclarations();
|
||||
const tools: Tool[] = [{ functionDeclarations: toolDeclarations }];
|
||||
|
||||
const history: Content[] = [
|
||||
{
|
||||
role: 'user',
|
||||
parts: envParts,
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
];
|
||||
const history = await getInitialChatHistory(this.config, extraHistory);
|
||||
|
||||
try {
|
||||
const userMemory = this.config.getUserMemory();
|
||||
const model = this.config.getModel();
|
||||
@@ -503,14 +433,15 @@ export class GeminiClient {
|
||||
userMemory,
|
||||
this.config.getModel(),
|
||||
);
|
||||
const environment = await getEnvironmentContext(this.config);
|
||||
const initialHistory = await getInitialChatHistory(this.config);
|
||||
|
||||
// Create a mock request content to count total tokens
|
||||
const mockRequestContent = [
|
||||
{
|
||||
role: 'system' as const,
|
||||
parts: [{ text: systemPrompt }, ...environment],
|
||||
parts: [{ text: systemPrompt }],
|
||||
},
|
||||
...initialHistory,
|
||||
...currentHistory,
|
||||
];
|
||||
|
||||
@@ -732,127 +663,37 @@ export class GeminiClient {
|
||||
prompt_id: string,
|
||||
force: boolean = false,
|
||||
): Promise<ChatCompressionInfo> {
|
||||
const model = this.config.getModel();
|
||||
const compressionService = new ChatCompressionService();
|
||||
|
||||
const curatedHistory = this.getChat().getHistory(true);
|
||||
const { newHistory, info } = await compressionService.compress(
|
||||
this.getChat(),
|
||||
prompt_id,
|
||||
force,
|
||||
this.config.getModel(),
|
||||
this.config,
|
||||
this.hasFailedCompressionAttempt,
|
||||
);
|
||||
|
||||
// Regardless of `force`, don't do anything if the history is empty.
|
||||
if (
|
||||
curatedHistory.length === 0 ||
|
||||
(this.hasFailedCompressionAttempt && !force)
|
||||
// Handle compression result
|
||||
if (info.compressionStatus === CompressionStatus.COMPRESSED) {
|
||||
// Success: update chat with new compressed history
|
||||
if (newHistory) {
|
||||
this.chat = await this.startChat(newHistory);
|
||||
this.forceFullIdeContext = true;
|
||||
}
|
||||
} else if (
|
||||
info.compressionStatus ===
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT ||
|
||||
info.compressionStatus ===
|
||||
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY
|
||||
) {
|
||||
return {
|
||||
originalTokenCount: 0,
|
||||
newTokenCount: 0,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
};
|
||||
}
|
||||
|
||||
const originalTokenCount = uiTelemetryService.getLastPromptTokenCount();
|
||||
|
||||
const contextPercentageThreshold =
|
||||
this.config.getChatCompression()?.contextPercentageThreshold;
|
||||
|
||||
// Don't compress if not forced and we are under the limit.
|
||||
if (!force) {
|
||||
const threshold =
|
||||
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
|
||||
if (originalTokenCount < threshold * tokenLimit(model)) {
|
||||
return {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
};
|
||||
// Track failed attempts (only mark as failed if not forced)
|
||||
if (!force) {
|
||||
this.hasFailedCompressionAttempt = true;
|
||||
}
|
||||
}
|
||||
|
||||
const splitPoint = findCompressSplitPoint(
|
||||
curatedHistory,
|
||||
1 - COMPRESSION_PRESERVE_THRESHOLD,
|
||||
);
|
||||
|
||||
const historyToCompress = curatedHistory.slice(0, splitPoint);
|
||||
const historyToKeep = curatedHistory.slice(splitPoint);
|
||||
|
||||
const summaryResponse = await this.config
|
||||
.getContentGenerator()
|
||||
.generateContent(
|
||||
{
|
||||
model,
|
||||
contents: [
|
||||
...historyToCompress,
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
text: 'First, reason in your scratchpad. Then, generate the <state_snapshot>.',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
config: {
|
||||
systemInstruction: { text: getCompressionPrompt() },
|
||||
},
|
||||
},
|
||||
prompt_id,
|
||||
);
|
||||
const summary = getResponseText(summaryResponse) ?? '';
|
||||
|
||||
const chat = await this.startChat([
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: summary }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the additional context!' }],
|
||||
},
|
||||
...historyToKeep,
|
||||
]);
|
||||
this.forceFullIdeContext = true;
|
||||
|
||||
// Estimate token count 1 token ≈ 4 characters
|
||||
const newTokenCount = Math.floor(
|
||||
chat
|
||||
.getHistory()
|
||||
.reduce((total, content) => total + JSON.stringify(content).length, 0) /
|
||||
4,
|
||||
);
|
||||
|
||||
logChatCompression(
|
||||
this.config,
|
||||
makeChatCompressionEvent({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
|
||||
if (newTokenCount > originalTokenCount) {
|
||||
this.hasFailedCompressionAttempt = !force && true;
|
||||
return {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus:
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
|
||||
};
|
||||
} else {
|
||||
this.chat = chat; // Chat compression successful, set new state.
|
||||
uiTelemetryService.setLastPromptTokenCount(newTokenCount);
|
||||
}
|
||||
|
||||
logChatCompression(
|
||||
this.config,
|
||||
makeChatCompressionEvent({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus: CompressionStatus.COMPRESSED,
|
||||
};
|
||||
return info;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,13 +4,9 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import {
|
||||
createContentGenerator,
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
} from './contentGenerator.js';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
@@ -110,83 +106,3 @@ describe('createContentGenerator', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createContentGeneratorConfig', () => {
|
||||
const mockConfig = {
|
||||
getModel: vi.fn().mockReturnValue('gemini-pro'),
|
||||
setModel: vi.fn(),
|
||||
flashFallbackHandler: vi.fn(),
|
||||
getProxy: vi.fn(),
|
||||
getEnableOpenAILogging: vi.fn().mockReturnValue(false),
|
||||
getSamplingParams: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorTimeout: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorMaxRetries: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorDisableCacheControl: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorSamplingParams: vi.fn().mockReturnValue(undefined),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset modules to re-evaluate imports and environment variables
|
||||
vi.resetModules();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should configure for Gemini using GEMINI_API_KEY when set', async () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', 'env-gemini-key');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_GEMINI,
|
||||
);
|
||||
expect(config.apiKey).toBe('env-gemini-key');
|
||||
expect(config.vertexai).toBe(false);
|
||||
});
|
||||
|
||||
it('should not configure for Gemini if GEMINI_API_KEY is empty', async () => {
|
||||
vi.stubEnv('GEMINI_API_KEY', '');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_GEMINI,
|
||||
);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
expect(config.vertexai).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should configure for Vertex AI using GOOGLE_API_KEY when set', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', 'env-google-key');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.apiKey).toBe('env-google-key');
|
||||
expect(config.vertexai).toBe(true);
|
||||
});
|
||||
|
||||
it('should configure for Vertex AI using GCP project and location when set', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', undefined);
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'env-gcp-project');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', 'env-gcp-location');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.vertexai).toBe(true);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not configure for Vertex AI if required env vars are empty', async () => {
|
||||
vi.stubEnv('GOOGLE_API_KEY', '');
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
vi.stubEnv('GOOGLE_CLOUD_LOCATION', '');
|
||||
const config = await createContentGeneratorConfig(
|
||||
mockConfig,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
);
|
||||
expect(config.apiKey).toBeUndefined();
|
||||
expect(config.vertexai).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -14,8 +14,8 @@ import type {
|
||||
} from '@google/genai';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
|
||||
import type { UserTierId } from '../code_assist/types.js';
|
||||
import { InstallationManager } from '../utils/installationManager.js';
|
||||
@@ -82,53 +82,37 @@ export function createContentGeneratorConfig(
|
||||
authType: AuthType | undefined,
|
||||
generationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): ContentGeneratorConfig {
|
||||
const geminiApiKey = process.env['GEMINI_API_KEY'] || undefined;
|
||||
const googleApiKey = process.env['GOOGLE_API_KEY'] || undefined;
|
||||
const googleCloudProject = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
|
||||
const googleCloudLocation = process.env['GOOGLE_CLOUD_LOCATION'] || undefined;
|
||||
|
||||
const newContentGeneratorConfig: ContentGeneratorConfig = {
|
||||
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
...(generationConfig || {}),
|
||||
model: generationConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
authType,
|
||||
proxy: config?.getProxy(),
|
||||
};
|
||||
|
||||
// If we are using Google auth or we are in Cloud Shell, there is nothing else to validate for now
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_GEMINI && geminiApiKey) {
|
||||
newContentGeneratorConfig.apiKey = geminiApiKey;
|
||||
newContentGeneratorConfig.vertexai = false;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (
|
||||
authType === AuthType.USE_VERTEX_AI &&
|
||||
(googleApiKey || (googleCloudProject && googleCloudLocation))
|
||||
) {
|
||||
newContentGeneratorConfig.apiKey = googleApiKey;
|
||||
newContentGeneratorConfig.vertexai = true;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.QWEN_OAUTH) {
|
||||
// For Qwen OAuth, we'll handle the API key dynamically in createContentGenerator
|
||||
// Set a special marker to indicate this is Qwen OAuth
|
||||
newContentGeneratorConfig.apiKey = 'QWEN_OAUTH_DYNAMIC_TOKEN';
|
||||
newContentGeneratorConfig.model = DEFAULT_QWEN_MODEL;
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: DEFAULT_QWEN_MODEL,
|
||||
apiKey: 'QWEN_OAUTH_DYNAMIC_TOKEN',
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
return newContentGeneratorConfig;
|
||||
if (authType === AuthType.USE_OPENAI) {
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || 'qwen3-coder-plus',
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
export async function createContentGenerator(
|
||||
|
||||
@@ -1,2 +1,8 @@
|
||||
export const DEFAULT_TIMEOUT = 120000;
|
||||
export const DEFAULT_MAX_RETRIES = 3;
|
||||
|
||||
export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1';
|
||||
export const DEFAULT_DASHSCOPE_BASE_URL =
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
||||
export const DEFAULT_DEEPSEEK_BASE_URL = 'https://api.deepseek.com/v1';
|
||||
export const DEFAULT_OPEN_ROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { OpenAIContentConverter } from './converter.js';
|
||||
import type { StreamingToolCallParser } from './streamingToolCallParser.js';
|
||||
import type { GenerateContentParameters, Content } from '@google/genai';
|
||||
|
||||
describe('OpenAIContentConverter', () => {
|
||||
let converter: OpenAIContentConverter;
|
||||
@@ -68,4 +69,77 @@ describe('OpenAIContentConverter', () => {
|
||||
expect(parser.getBuffer(0)).toBe('');
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertGeminiRequestToOpenAI', () => {
|
||||
const createRequestWithFunctionResponse = (
|
||||
response: Record<string, unknown>,
|
||||
): GenerateContentParameters => {
|
||||
const contents: Content[] = [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: {
|
||||
id: 'call_1',
|
||||
name: 'shell',
|
||||
args: {},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call_1',
|
||||
name: 'shell',
|
||||
response,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
return {
|
||||
model: 'models/test',
|
||||
contents,
|
||||
};
|
||||
};
|
||||
|
||||
it('should extract raw output from function response objects', () => {
|
||||
const request = createRequestWithFunctionResponse({
|
||||
output: 'Raw output text',
|
||||
});
|
||||
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request);
|
||||
const toolMessage = messages.find((message) => message.role === 'tool');
|
||||
|
||||
expect(toolMessage).toBeDefined();
|
||||
expect(toolMessage?.content).toBe('Raw output text');
|
||||
});
|
||||
|
||||
it('should prioritize error field when present', () => {
|
||||
const request = createRequestWithFunctionResponse({
|
||||
error: 'Command failed',
|
||||
});
|
||||
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request);
|
||||
const toolMessage = messages.find((message) => message.role === 'tool');
|
||||
|
||||
expect(toolMessage).toBeDefined();
|
||||
expect(toolMessage?.content).toBe('Command failed');
|
||||
});
|
||||
|
||||
it('should stringify non-string responses', () => {
|
||||
const request = createRequestWithFunctionResponse({
|
||||
data: { value: 42 },
|
||||
});
|
||||
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request);
|
||||
const toolMessage = messages.find((message) => message.role === 'tool');
|
||||
|
||||
expect(toolMessage).toBeDefined();
|
||||
expect(toolMessage?.content).toBe('{"data":{"value":42}}');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -276,10 +276,7 @@ export class OpenAIContentConverter {
|
||||
messages.push({
|
||||
role: 'tool' as const,
|
||||
tool_call_id: funcResponse.id || '',
|
||||
content:
|
||||
typeof funcResponse.response === 'string'
|
||||
? funcResponse.response
|
||||
: JSON.stringify(funcResponse.response),
|
||||
content: this.extractFunctionResponseContent(funcResponse.response),
|
||||
});
|
||||
}
|
||||
return;
|
||||
@@ -359,6 +356,36 @@ export class OpenAIContentConverter {
|
||||
return { textParts, functionCalls, functionResponses, mediaParts };
|
||||
}
|
||||
|
||||
private extractFunctionResponseContent(response: unknown): string {
|
||||
if (response === null || response === undefined) {
|
||||
return '';
|
||||
}
|
||||
|
||||
if (typeof response === 'string') {
|
||||
return response;
|
||||
}
|
||||
|
||||
if (typeof response === 'object') {
|
||||
const responseObject = response as Record<string, unknown>;
|
||||
const output = responseObject['output'];
|
||||
if (typeof output === 'string') {
|
||||
return output;
|
||||
}
|
||||
|
||||
const error = responseObject['error'];
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const serialized = JSON.stringify(response);
|
||||
return serialized ?? String(response);
|
||||
} catch {
|
||||
return String(response);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine media type from MIME type
|
||||
*/
|
||||
|
||||
@@ -2,7 +2,11 @@ import OpenAI from 'openai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { AuthType } from '../../contentGenerator.js';
|
||||
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
|
||||
import {
|
||||
DEFAULT_TIMEOUT,
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_DASHSCOPE_BASE_URL,
|
||||
} from '../constants.js';
|
||||
import { tokenLimit } from '../../tokenLimits.js';
|
||||
import type {
|
||||
OpenAICompatibleProvider,
|
||||
@@ -53,7 +57,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
buildClient(): OpenAI {
|
||||
const {
|
||||
apiKey,
|
||||
baseUrl,
|
||||
baseUrl = DEFAULT_DASHSCOPE_BASE_URL,
|
||||
timeout = DEFAULT_TIMEOUT,
|
||||
maxRetries = DEFAULT_MAX_RETRIES,
|
||||
} = this.contentGeneratorConfig;
|
||||
|
||||
@@ -153,6 +153,9 @@ export enum CompressionStatus {
|
||||
/** The compression failed due to an error counting tokens */
|
||||
COMPRESSION_FAILED_TOKEN_COUNT_ERROR,
|
||||
|
||||
/** The compression failed due to receiving an empty or null summary */
|
||||
COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
|
||||
/** The compression was not necessary and no action was taken */
|
||||
NOOP,
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ export * from './utils/systemEncoding.js';
|
||||
export * from './utils/textUtils.js';
|
||||
export * from './utils/formatters.js';
|
||||
export * from './utils/generateContentResponseUtilities.js';
|
||||
export * from './utils/ripgrepUtils.js';
|
||||
export * from './utils/filesearch/fileSearch.js';
|
||||
export * from './utils/errorParsing.js';
|
||||
export * from './utils/workspaceContext.js';
|
||||
|
||||
@@ -8,7 +8,7 @@ import { OpenAIContentGenerator } from '../core/openaiContentGenerator/index.js'
|
||||
import { DashScopeOpenAICompatibleProvider } from '../core/openaiContentGenerator/provider/dashscope.js';
|
||||
import type { IQwenOAuth2Client } from './qwenOAuth2.js';
|
||||
import { SharedTokenManager } from './sharedTokenManager.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { type Config } from '../config/config.js';
|
||||
import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
@@ -18,10 +18,7 @@ import type {
|
||||
EmbedContentResponse,
|
||||
} from '@google/genai';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
|
||||
// Default fallback base URL if no endpoint is provided
|
||||
const DEFAULT_QWEN_BASE_URL =
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
|
||||
/**
|
||||
* Qwen Content Generator that uses Qwen OAuth tokens with automatic refresh
|
||||
@@ -58,7 +55,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
* Get the current endpoint URL with proper protocol and /v1 suffix
|
||||
*/
|
||||
private getCurrentEndpoint(resourceUrl?: string): string {
|
||||
const baseEndpoint = resourceUrl || DEFAULT_QWEN_BASE_URL;
|
||||
const baseEndpoint = resourceUrl || DEFAULT_DASHSCOPE_BASE_URL;
|
||||
const suffix = '/v1';
|
||||
|
||||
// Normalize the URL: add protocol if missing, ensure /v1 suffix
|
||||
|
||||
372
packages/core/src/services/chatCompressionService.test.ts
Normal file
372
packages/core/src/services/chatCompressionService.test.ts
Normal file
@@ -0,0 +1,372 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
ChatCompressionService,
|
||||
findCompressSplitPoint,
|
||||
} from './chatCompressionService.js';
|
||||
import type { Content, GenerateContentResponse } from '@google/genai';
|
||||
import { CompressionStatus } from '../core/turn.js';
|
||||
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
|
||||
import { tokenLimit } from '../core/tokenLimits.js';
|
||||
import type { GeminiChat } from '../core/geminiChat.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { getInitialChatHistory } from '../utils/environmentContext.js';
|
||||
import type { ContentGenerator } from '../core/contentGenerator.js';
|
||||
|
||||
vi.mock('../telemetry/uiTelemetry.js');
|
||||
vi.mock('../core/tokenLimits.js');
|
||||
vi.mock('../telemetry/loggers.js');
|
||||
vi.mock('../utils/environmentContext.js');
|
||||
|
||||
describe('findCompressSplitPoint', () => {
|
||||
it('should throw an error for non-positive numbers', () => {
|
||||
expect(() => findCompressSplitPoint([], 0)).toThrow(
|
||||
'Fraction must be between 0 and 1',
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error for a fraction greater than or equal to 1', () => {
|
||||
expect(() => findCompressSplitPoint([], 1)).toThrow(
|
||||
'Fraction must be between 0 and 1',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle an empty history', () => {
|
||||
expect(findCompressSplitPoint([], 0.5)).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle a fraction in the middle', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (19%)
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (40%)
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (60%)
|
||||
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (80%)
|
||||
{ role: 'user', parts: [{ text: 'This is the fifth message.' }] }, // JSON length: 65 (100%)
|
||||
];
|
||||
expect(findCompressSplitPoint(history, 0.5)).toBe(4);
|
||||
});
|
||||
|
||||
it('should handle a fraction of last index', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (19%)
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (40%)
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (60%)
|
||||
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (80%)
|
||||
{ role: 'user', parts: [{ text: 'This is the fifth message.' }] }, // JSON length: 65 (100%)
|
||||
];
|
||||
expect(findCompressSplitPoint(history, 0.9)).toBe(4);
|
||||
});
|
||||
|
||||
it('should handle a fraction of after last index', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] }, // JSON length: 66 (24%)
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] }, // JSON length: 68 (50%)
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] }, // JSON length: 66 (74%)
|
||||
{ role: 'model', parts: [{ text: 'This is the fourth message.' }] }, // JSON length: 68 (100%)
|
||||
];
|
||||
expect(findCompressSplitPoint(history, 0.8)).toBe(4);
|
||||
});
|
||||
|
||||
it('should return earlier splitpoint if no valid ones are after threshhold', () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'This is the first message.' }] },
|
||||
{ role: 'model', parts: [{ text: 'This is the second message.' }] },
|
||||
{ role: 'user', parts: [{ text: 'This is the third message.' }] },
|
||||
{ role: 'model', parts: [{ functionCall: { name: 'foo', args: {} } }] },
|
||||
];
|
||||
// Can't return 4 because the previous item has a function call.
|
||||
expect(findCompressSplitPoint(history, 0.99)).toBe(2);
|
||||
});
|
||||
|
||||
it('should handle a history with only one item', () => {
|
||||
const historyWithEmptyParts: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'Message 1' }] },
|
||||
];
|
||||
expect(findCompressSplitPoint(historyWithEmptyParts, 0.5)).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle history with weird parts', () => {
|
||||
const historyWithEmptyParts: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'Message 1' }] },
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ fileData: { fileUri: 'derp', mimeType: 'text/plain' } }],
|
||||
},
|
||||
{ role: 'user', parts: [{ text: 'Message 2' }] },
|
||||
];
|
||||
expect(findCompressSplitPoint(historyWithEmptyParts, 0.5)).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ChatCompressionService', () => {
|
||||
let service: ChatCompressionService;
|
||||
let mockChat: GeminiChat;
|
||||
let mockConfig: Config;
|
||||
const mockModel = 'gemini-pro';
|
||||
const mockPromptId = 'test-prompt-id';
|
||||
|
||||
beforeEach(() => {
|
||||
service = new ChatCompressionService();
|
||||
mockChat = {
|
||||
getHistory: vi.fn(),
|
||||
} as unknown as GeminiChat;
|
||||
mockConfig = {
|
||||
getChatCompression: vi.fn(),
|
||||
getContentGenerator: vi.fn(),
|
||||
} as unknown as Config;
|
||||
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(500);
|
||||
vi.mocked(getInitialChatHistory).mockImplementation(
|
||||
async (_config, extraHistory) => extraHistory || [],
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should return NOOP if history is empty', async () => {
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue([]);
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should return NOOP if previously failed and not forced', async () => {
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue([
|
||||
{ role: 'user', parts: [{ text: 'hi' }] },
|
||||
]);
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
true,
|
||||
);
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should return NOOP if under token threshold and not forced', async () => {
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue([
|
||||
{ role: 'user', parts: [{ text: 'hi' }] },
|
||||
]);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(600);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
// Threshold is 0.7 * 1000 = 700. 600 < 700, so NOOP.
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.NOOP);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should compress if over token threshold', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
{ role: 'user', parts: [{ text: 'msg3' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg4' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(800);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: 'Summary' }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
false,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.COMPRESSED);
|
||||
expect(result.newHistory).not.toBeNull();
|
||||
expect(result.newHistory![0].parts![0].text).toBe('Summary');
|
||||
expect(mockGenerateContent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should force compress even if under threshold', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
{ role: 'user', parts: [{ text: 'msg3' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg4' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: 'Summary' }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true, // forced
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(CompressionStatus.COMPRESSED);
|
||||
expect(result.newHistory).not.toBeNull();
|
||||
});
|
||||
|
||||
it('should return FAILED if new token count is inflated', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(10);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const longSummary = 'a'.repeat(1000); // Long summary to inflate token count
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: longSummary }],
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
|
||||
);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
|
||||
it('should return FAILED if summary is empty string', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: '' }], // Empty summary
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(
|
||||
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
);
|
||||
expect(result.newHistory).toBeNull();
|
||||
expect(result.info.originalTokenCount).toBe(100);
|
||||
expect(result.info.newTokenCount).toBe(100);
|
||||
});
|
||||
|
||||
it('should return FAILED if summary is only whitespace', async () => {
|
||||
const history: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'msg1' }] },
|
||||
{ role: 'model', parts: [{ text: 'msg2' }] },
|
||||
];
|
||||
vi.mocked(mockChat.getHistory).mockReturnValue(history);
|
||||
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
|
||||
vi.mocked(tokenLimit).mockReturnValue(1000);
|
||||
|
||||
const mockGenerateContent = vi.fn().mockResolvedValue({
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
parts: [{ text: ' \n\t ' }], // Only whitespace
|
||||
},
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse);
|
||||
vi.mocked(mockConfig.getContentGenerator).mockReturnValue({
|
||||
generateContent: mockGenerateContent,
|
||||
} as unknown as ContentGenerator);
|
||||
|
||||
const result = await service.compress(
|
||||
mockChat,
|
||||
mockPromptId,
|
||||
true,
|
||||
mockModel,
|
||||
mockConfig,
|
||||
false,
|
||||
);
|
||||
|
||||
expect(result.info.compressionStatus).toBe(
|
||||
CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
);
|
||||
expect(result.newHistory).toBeNull();
|
||||
});
|
||||
});
|
||||
235
packages/core/src/services/chatCompressionService.ts
Normal file
235
packages/core/src/services/chatCompressionService.ts
Normal file
@@ -0,0 +1,235 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Content } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import type { GeminiChat } from '../core/geminiChat.js';
|
||||
import { type ChatCompressionInfo, CompressionStatus } from '../core/turn.js';
|
||||
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
|
||||
import { tokenLimit } from '../core/tokenLimits.js';
|
||||
import { getCompressionPrompt } from '../core/prompts.js';
|
||||
import { getResponseText } from '../utils/partUtils.js';
|
||||
import { logChatCompression } from '../telemetry/loggers.js';
|
||||
import { makeChatCompressionEvent } from '../telemetry/types.js';
|
||||
import { getInitialChatHistory } from '../utils/environmentContext.js';
|
||||
|
||||
/**
|
||||
* Threshold for compression token count as a fraction of the model's token limit.
|
||||
* If the chat history exceeds this threshold, it will be compressed.
|
||||
*/
|
||||
export const COMPRESSION_TOKEN_THRESHOLD = 0.7;
|
||||
|
||||
/**
|
||||
* The fraction of the latest chat history to keep. A value of 0.3
|
||||
* means that only the last 30% of the chat history will be kept after compression.
|
||||
*/
|
||||
export const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
|
||||
|
||||
/**
|
||||
* Returns the index of the oldest item to keep when compressing. May return
|
||||
* contents.length which indicates that everything should be compressed.
|
||||
*
|
||||
* Exported for testing purposes.
|
||||
*/
|
||||
export function findCompressSplitPoint(
|
||||
contents: Content[],
|
||||
fraction: number,
|
||||
): number {
|
||||
if (fraction <= 0 || fraction >= 1) {
|
||||
throw new Error('Fraction must be between 0 and 1');
|
||||
}
|
||||
|
||||
const charCounts = contents.map((content) => JSON.stringify(content).length);
|
||||
const totalCharCount = charCounts.reduce((a, b) => a + b, 0);
|
||||
const targetCharCount = totalCharCount * fraction;
|
||||
|
||||
let lastSplitPoint = 0; // 0 is always valid (compress nothing)
|
||||
let cumulativeCharCount = 0;
|
||||
for (let i = 0; i < contents.length; i++) {
|
||||
const content = contents[i];
|
||||
if (
|
||||
content.role === 'user' &&
|
||||
!content.parts?.some((part) => !!part.functionResponse)
|
||||
) {
|
||||
if (cumulativeCharCount >= targetCharCount) {
|
||||
return i;
|
||||
}
|
||||
lastSplitPoint = i;
|
||||
}
|
||||
cumulativeCharCount += charCounts[i];
|
||||
}
|
||||
|
||||
// We found no split points after targetCharCount.
|
||||
// Check if it's safe to compress everything.
|
||||
const lastContent = contents[contents.length - 1];
|
||||
if (
|
||||
lastContent?.role === 'model' &&
|
||||
!lastContent?.parts?.some((part) => part.functionCall)
|
||||
) {
|
||||
return contents.length;
|
||||
}
|
||||
|
||||
// Can't compress everything so just compress at last splitpoint.
|
||||
return lastSplitPoint;
|
||||
}
|
||||
|
||||
export class ChatCompressionService {
|
||||
async compress(
|
||||
chat: GeminiChat,
|
||||
promptId: string,
|
||||
force: boolean,
|
||||
model: string,
|
||||
config: Config,
|
||||
hasFailedCompressionAttempt: boolean,
|
||||
): Promise<{ newHistory: Content[] | null; info: ChatCompressionInfo }> {
|
||||
const curatedHistory = chat.getHistory(true);
|
||||
|
||||
// Regardless of `force`, don't do anything if the history is empty.
|
||||
if (
|
||||
curatedHistory.length === 0 ||
|
||||
(hasFailedCompressionAttempt && !force)
|
||||
) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount: 0,
|
||||
newTokenCount: 0,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const originalTokenCount = uiTelemetryService.getLastPromptTokenCount();
|
||||
|
||||
const contextPercentageThreshold =
|
||||
config.getChatCompression()?.contextPercentageThreshold;
|
||||
|
||||
// Don't compress if not forced and we are under the limit.
|
||||
if (!force) {
|
||||
const threshold =
|
||||
contextPercentageThreshold ?? COMPRESSION_TOKEN_THRESHOLD;
|
||||
if (originalTokenCount < threshold * tokenLimit(model)) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const splitPoint = findCompressSplitPoint(
|
||||
curatedHistory,
|
||||
1 - COMPRESSION_PRESERVE_THRESHOLD,
|
||||
);
|
||||
|
||||
const historyToCompress = curatedHistory.slice(0, splitPoint);
|
||||
const historyToKeep = curatedHistory.slice(splitPoint);
|
||||
|
||||
if (historyToCompress.length === 0) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.NOOP,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const summaryResponse = await config.getContentGenerator().generateContent(
|
||||
{
|
||||
model,
|
||||
contents: [
|
||||
...historyToCompress,
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
text: 'First, reason in your scratchpad. Then, generate the <state_snapshot>.',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
config: {
|
||||
systemInstruction: getCompressionPrompt(),
|
||||
},
|
||||
},
|
||||
promptId,
|
||||
);
|
||||
const summary = getResponseText(summaryResponse) ?? '';
|
||||
const isSummaryEmpty = !summary || summary.trim().length === 0;
|
||||
|
||||
let newTokenCount = originalTokenCount;
|
||||
let extraHistory: Content[] = [];
|
||||
|
||||
if (!isSummaryEmpty) {
|
||||
extraHistory = [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: summary }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the additional context!' }],
|
||||
},
|
||||
...historyToKeep,
|
||||
];
|
||||
|
||||
// Use a shared utility to construct the initial history for an accurate token count.
|
||||
const fullNewHistory = await getInitialChatHistory(config, extraHistory);
|
||||
|
||||
// Estimate token count 1 token ≈ 4 characters
|
||||
newTokenCount = Math.floor(
|
||||
fullNewHistory.reduce(
|
||||
(total, content) => total + JSON.stringify(content).length,
|
||||
0,
|
||||
) / 4,
|
||||
);
|
||||
}
|
||||
|
||||
logChatCompression(
|
||||
config,
|
||||
makeChatCompressionEvent({
|
||||
tokens_before: originalTokenCount,
|
||||
tokens_after: newTokenCount,
|
||||
}),
|
||||
);
|
||||
|
||||
if (isSummaryEmpty) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount: originalTokenCount,
|
||||
compressionStatus: CompressionStatus.COMPRESSION_FAILED_EMPTY_SUMMARY,
|
||||
},
|
||||
};
|
||||
} else if (newTokenCount > originalTokenCount) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus:
|
||||
CompressionStatus.COMPRESSION_FAILED_INFLATED_TOKEN_COUNT,
|
||||
},
|
||||
};
|
||||
} else {
|
||||
uiTelemetryService.setLastPromptTokenCount(newTokenCount);
|
||||
return {
|
||||
newHistory: extraHistory,
|
||||
info: {
|
||||
originalTokenCount,
|
||||
newTokenCount,
|
||||
compressionStatus: CompressionStatus.COMPRESSED,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,6 @@ import { GeminiChat } from '../core/geminiChat.js';
|
||||
import { executeToolCall } from '../core/nonInteractiveToolExecutor.js';
|
||||
import type { ToolRegistry } from '../tools/tool-registry.js';
|
||||
import { type AnyDeclarativeTool } from '../tools/tools.js';
|
||||
import { getEnvironmentContext } from '../utils/environmentContext.js';
|
||||
import { ContextState, SubAgentScope } from './subagent.js';
|
||||
import type {
|
||||
ModelConfig,
|
||||
@@ -44,7 +43,20 @@ import { SubagentTerminateMode } from './types.js';
|
||||
|
||||
vi.mock('../core/geminiChat.js');
|
||||
vi.mock('../core/contentGenerator.js');
|
||||
vi.mock('../utils/environmentContext.js');
|
||||
vi.mock('../utils/environmentContext.js', () => ({
|
||||
getEnvironmentContext: vi.fn().mockResolvedValue([{ text: 'Env Context' }]),
|
||||
getInitialChatHistory: vi.fn(async (_config, extraHistory) => [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Env Context' }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
]),
|
||||
}));
|
||||
vi.mock('../core/nonInteractiveToolExecutor.js');
|
||||
vi.mock('../ide/ide-client.js');
|
||||
vi.mock('../core/client.js');
|
||||
@@ -174,9 +186,6 @@ describe('subagent.ts', () => {
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
vi.mocked(getEnvironmentContext).mockResolvedValue([
|
||||
{ text: 'Env Context' },
|
||||
]);
|
||||
vi.mocked(createContentGenerator).mockResolvedValue({
|
||||
getGenerativeModel: vi.fn(),
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
|
||||
@@ -16,7 +16,7 @@ import type {
|
||||
ToolConfirmationOutcome,
|
||||
ToolCallConfirmationDetails,
|
||||
} from '../tools/tools.js';
|
||||
import { getEnvironmentContext } from '../utils/environmentContext.js';
|
||||
import { getInitialChatHistory } from '../utils/environmentContext.js';
|
||||
import type {
|
||||
Content,
|
||||
Part,
|
||||
@@ -807,11 +807,7 @@ export class SubAgentScope {
|
||||
);
|
||||
}
|
||||
|
||||
const envParts = await getEnvironmentContext(this.runtimeContext);
|
||||
const envHistory: Content[] = [
|
||||
{ role: 'user', parts: envParts },
|
||||
{ role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
|
||||
];
|
||||
const envHistory = await getInitialChatHistory(this.runtimeContext);
|
||||
|
||||
const start_history = [
|
||||
...envHistory,
|
||||
|
||||
@@ -131,16 +131,14 @@ describe('ExitPlanModeTool', () => {
|
||||
}
|
||||
|
||||
const result = await invocation.execute(signal);
|
||||
const expectedLlmMessage =
|
||||
'User has approved your plan. You can now start coding. Start with updating your todo list if applicable.';
|
||||
|
||||
expect(result).toEqual({
|
||||
llmContent: expectedLlmMessage,
|
||||
returnDisplay: {
|
||||
type: 'plan_summary',
|
||||
message: 'User approved the plan.',
|
||||
plan: params.plan,
|
||||
},
|
||||
expect(result.llmContent).toContain(
|
||||
'User has approved your plan. You can now start coding',
|
||||
);
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'plan_summary',
|
||||
message: 'User approved the plan.',
|
||||
plan: params.plan,
|
||||
});
|
||||
|
||||
expect(mockConfig.setApprovalMode).toHaveBeenCalledWith(
|
||||
@@ -188,15 +186,12 @@ describe('ExitPlanModeTool', () => {
|
||||
|
||||
const result = await invocation.execute(signal);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
plan: params.plan,
|
||||
error: 'Plan execution was not approved. Remaining in plan mode.',
|
||||
}),
|
||||
returnDisplay:
|
||||
'Plan execution was not approved. Remaining in plan mode.',
|
||||
});
|
||||
expect(result.llmContent).toBe(
|
||||
'Plan execution was not approved. Remaining in plan mode.',
|
||||
);
|
||||
expect(result.returnDisplay).toBe(
|
||||
'Plan execution was not approved. Remaining in plan mode.',
|
||||
);
|
||||
|
||||
expect(mockConfig.setApprovalMode).toHaveBeenCalledWith(
|
||||
ApprovalMode.PLAN,
|
||||
@@ -215,50 +210,6 @@ describe('ExitPlanModeTool', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle execution errors gracefully', async () => {
|
||||
const params: ExitPlanModeParams = {
|
||||
plan: 'Test plan',
|
||||
};
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const confirmation = await invocation.shouldConfirmExecute(
|
||||
new AbortController().signal,
|
||||
);
|
||||
if (confirmation) {
|
||||
// Don't approve the plan so we go through the rejection path
|
||||
await confirmation.onConfirm(ToolConfirmationOutcome.Cancel);
|
||||
}
|
||||
|
||||
// Create a spy to simulate an error during the execution
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
// Mock JSON.stringify to throw an error in the rejection path
|
||||
const originalStringify = JSON.stringify;
|
||||
vi.spyOn(JSON, 'stringify').mockImplementationOnce(() => {
|
||||
throw new Error('JSON stringify error');
|
||||
});
|
||||
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: 'Failed to present plan. Detail: JSON stringify error',
|
||||
}),
|
||||
returnDisplay: 'Error presenting plan: JSON stringify error',
|
||||
});
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'[ExitPlanModeTool] Error executing exit_plan_mode: JSON stringify error',
|
||||
);
|
||||
|
||||
// Restore original JSON.stringify
|
||||
JSON.stringify = originalStringify;
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should return empty tool locations', () => {
|
||||
const params: ExitPlanModeParams = {
|
||||
plan: 'Test plan',
|
||||
|
||||
@@ -115,17 +115,12 @@ class ExitPlanModeToolInvocation extends BaseToolInvocation<
|
||||
const rejectionMessage =
|
||||
'Plan execution was not approved. Remaining in plan mode.';
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
plan,
|
||||
error: rejectionMessage,
|
||||
}),
|
||||
llmContent: rejectionMessage,
|
||||
returnDisplay: rejectionMessage,
|
||||
};
|
||||
}
|
||||
|
||||
const llmMessage =
|
||||
'User has approved your plan. You can now start coding. Start with updating your todo list if applicable.';
|
||||
const llmMessage = `User has approved your plan. You can now start coding. Start with updating your todo list if applicable.`;
|
||||
const displayMessage = 'User approved the plan.';
|
||||
|
||||
return {
|
||||
@@ -142,11 +137,11 @@ class ExitPlanModeToolInvocation extends BaseToolInvocation<
|
||||
console.error(
|
||||
`[ExitPlanModeTool] Error executing exit_plan_mode: ${errorMessage}`,
|
||||
);
|
||||
|
||||
const errorLlmContent = `Failed to present plan: ${errorMessage}`;
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to present plan. Detail: ${errorMessage}`,
|
||||
}),
|
||||
llmContent: errorLlmContent,
|
||||
returnDisplay: `Error presenting plan: ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -241,9 +241,7 @@ describe('MemoryTool', () => {
|
||||
expectedFsArgument,
|
||||
);
|
||||
const successMessage = `Okay, I've remembered that in global memory: "${params.fact}"`;
|
||||
expect(result.llmContent).toBe(
|
||||
JSON.stringify({ success: true, message: successMessage }),
|
||||
);
|
||||
expect(result.llmContent).toBe(successMessage);
|
||||
expect(result.returnDisplay).toBe(successMessage);
|
||||
});
|
||||
|
||||
@@ -271,9 +269,7 @@ describe('MemoryTool', () => {
|
||||
expectedFsArgument,
|
||||
);
|
||||
const successMessage = `Okay, I've remembered that in project memory: "${params.fact}"`;
|
||||
expect(result.llmContent).toBe(
|
||||
JSON.stringify({ success: true, message: successMessage }),
|
||||
);
|
||||
expect(result.llmContent).toBe(successMessage);
|
||||
expect(result.returnDisplay).toBe(successMessage);
|
||||
});
|
||||
|
||||
@@ -298,10 +294,7 @@ describe('MemoryTool', () => {
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toBe(
|
||||
JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to save memory. Detail: ${underlyingError.message}`,
|
||||
}),
|
||||
`Error saving memory: ${underlyingError.message}`,
|
||||
);
|
||||
expect(result.returnDisplay).toBe(
|
||||
`Error saving memory: ${underlyingError.message}`,
|
||||
@@ -319,6 +312,8 @@ describe('MemoryTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Please specify where to save this memory',
|
||||
);
|
||||
expect(result.llmContent).toContain('Global:');
|
||||
expect(result.llmContent).toContain('Project:');
|
||||
expect(result.returnDisplay).toContain('Global:');
|
||||
expect(result.returnDisplay).toContain('Project:');
|
||||
});
|
||||
|
||||
@@ -309,7 +309,7 @@ Preview of changes to be made to GLOBAL memory:
|
||||
if (!fact || typeof fact !== 'string' || fact.trim() === '') {
|
||||
const errorMessage = 'Parameter "fact" must be a non-empty string.';
|
||||
return {
|
||||
llmContent: JSON.stringify({ success: false, error: errorMessage }),
|
||||
llmContent: `Error: ${errorMessage}`,
|
||||
returnDisplay: `Error: ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
@@ -324,10 +324,7 @@ Global: ${globalPath} (shared across all projects)
|
||||
Project: ${projectPath} (current project only)`;
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: 'Please specify where to save this memory',
|
||||
}),
|
||||
llmContent: errorMessage,
|
||||
returnDisplay: errorMessage,
|
||||
};
|
||||
}
|
||||
@@ -344,10 +341,7 @@ Project: ${projectPath} (current project only)`;
|
||||
await fs.writeFile(memoryFilePath, modified_content, 'utf-8');
|
||||
const successMessage = `Okay, I've updated the ${scope} memory file with your modifications.`;
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: true,
|
||||
message: successMessage,
|
||||
}),
|
||||
llmContent: successMessage,
|
||||
returnDisplay: successMessage,
|
||||
};
|
||||
} else {
|
||||
@@ -359,10 +353,7 @@ Project: ${projectPath} (current project only)`;
|
||||
});
|
||||
const successMessage = `Okay, I've remembered that in ${scope} memory: "${fact}"`;
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: true,
|
||||
message: successMessage,
|
||||
}),
|
||||
llmContent: successMessage,
|
||||
returnDisplay: successMessage,
|
||||
};
|
||||
}
|
||||
@@ -372,11 +363,9 @@ Project: ${projectPath} (current project only)`;
|
||||
console.error(
|
||||
`[MemoryTool] Error executing save_memory for fact "${fact}" in ${scope}: ${errorMessage}`,
|
||||
);
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to save memory. Detail: ${errorMessage}`,
|
||||
}),
|
||||
llmContent: `Error saving memory: ${errorMessage}`,
|
||||
returnDisplay: `Error saving memory: ${errorMessage}`,
|
||||
error: {
|
||||
message: errorMessage,
|
||||
|
||||
@@ -14,7 +14,7 @@ import {
|
||||
type Mock,
|
||||
} from 'vitest';
|
||||
import type { RipGrepToolParams } from './ripGrep.js';
|
||||
import { canUseRipgrep, RipGrepTool, ensureRgPath } from './ripGrep.js';
|
||||
import { RipGrepTool } from './ripGrep.js';
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs/promises';
|
||||
import os, { EOL } from 'node:os';
|
||||
@@ -22,24 +22,12 @@ import type { Config } from '../config/config.js';
|
||||
import { createMockWorkspaceContext } from '../test-utils/mockWorkspaceContext.js';
|
||||
import type { ChildProcess } from 'node:child_process';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { downloadRipGrep } from '@joshua.litt/get-ripgrep';
|
||||
import { fileExists } from '../utils/fileUtils.js';
|
||||
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
|
||||
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
|
||||
|
||||
// Mock dependencies for canUseRipgrep
|
||||
vi.mock('@joshua.litt/get-ripgrep', () => ({
|
||||
downloadRipGrep: vi.fn(),
|
||||
}));
|
||||
vi.mock('../utils/fileUtils.js', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('../utils/fileUtils.js')>();
|
||||
return {
|
||||
...actual,
|
||||
fileExists: vi.fn(),
|
||||
};
|
||||
});
|
||||
vi.mock('../config/storage.js', () => ({
|
||||
Storage: {
|
||||
getGlobalBinDir: vi.fn().mockReturnValue('/mock/bin/dir'),
|
||||
},
|
||||
// Mock ripgrepUtils
|
||||
vi.mock('../utils/ripgrepUtils.js', () => ({
|
||||
ensureRipgrepPath: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock child_process for ripgrep calls
|
||||
@@ -49,108 +37,23 @@ vi.mock('child_process', () => ({
|
||||
|
||||
const mockSpawn = vi.mocked(spawn);
|
||||
|
||||
describe('canUseRipgrep', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should return true if ripgrep already exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
const result = await canUseRipgrep();
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledWith(path.join('/mock/bin/dir', 'rg'));
|
||||
expect(downloadRipGrep).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should download ripgrep and return true if it does not exist initially', async () => {
|
||||
(fileExists as Mock)
|
||||
.mockResolvedValueOnce(false)
|
||||
.mockResolvedValueOnce(true);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
|
||||
it('should return false if download fails and file does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
|
||||
it('should propagate errors from downloadRipGrep', async () => {
|
||||
const error = new Error('Download failed');
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockRejectedValue(error);
|
||||
|
||||
await expect(canUseRipgrep()).rejects.toThrow(error);
|
||||
expect(fileExists).toHaveBeenCalledTimes(1);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
});
|
||||
|
||||
describe('ensureRgPath', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should return rg path if ripgrep already exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
const rgPath = await ensureRgPath();
|
||||
expect(rgPath).toBe(path.join('/mock/bin/dir', 'rg'));
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
expect(downloadRipGrep).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return rg path if ripgrep is downloaded successfully', async () => {
|
||||
(fileExists as Mock)
|
||||
.mockResolvedValueOnce(false)
|
||||
.mockResolvedValueOnce(true);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
const rgPath = await ensureRgPath();
|
||||
expect(rgPath).toBe(path.join('/mock/bin/dir', 'rg'));
|
||||
expect(downloadRipGrep).toHaveBeenCalledOnce();
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should throw an error if ripgrep cannot be used after download attempt', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
await expect(ensureRgPath()).rejects.toThrow('Cannot use ripgrep.');
|
||||
expect(downloadRipGrep).toHaveBeenCalledOnce();
|
||||
expect(fileExists).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should propagate errors from downloadRipGrep', async () => {
|
||||
const error = new Error('Download failed');
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockRejectedValue(error);
|
||||
|
||||
await expect(ensureRgPath()).rejects.toThrow(error);
|
||||
expect(fileExists).toHaveBeenCalledTimes(1);
|
||||
expect(downloadRipGrep).toHaveBeenCalledWith('/mock/bin/dir');
|
||||
});
|
||||
});
|
||||
|
||||
// Helper function to create mock spawn implementations
|
||||
function createMockSpawn(
|
||||
options: {
|
||||
outputData?: string;
|
||||
exitCode?: number;
|
||||
signal?: string;
|
||||
onCall?: (
|
||||
command: string,
|
||||
args: readonly string[],
|
||||
spawnOptions?: unknown,
|
||||
) => void;
|
||||
} = {},
|
||||
) {
|
||||
const { outputData, exitCode = 0, signal } = options;
|
||||
const { outputData, exitCode = 0, signal, onCall } = options;
|
||||
|
||||
return () => {
|
||||
return (command: string, args: readonly string[], spawnOptions?: unknown) => {
|
||||
onCall?.(command, args, spawnOptions);
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
on: vi.fn(),
|
||||
@@ -191,20 +94,29 @@ function createMockSpawn(
|
||||
describe('RipGrepTool', () => {
|
||||
let tempRootDir: string;
|
||||
let grepTool: RipGrepTool;
|
||||
let fileExclusionsMock: { getGlobExcludes: () => string[] };
|
||||
const abortSignal = new AbortController().signal;
|
||||
|
||||
const mockConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () => createMockWorkspaceContext(tempRootDir),
|
||||
getWorkingDir: () => tempRootDir,
|
||||
getDebugMode: () => false,
|
||||
getUseBuiltinRipgrep: () => true,
|
||||
} as unknown as Config;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
mockSpawn.mockClear();
|
||||
(ensureRipgrepPath as Mock).mockResolvedValue('/mock/path/to/rg');
|
||||
mockSpawn.mockReset();
|
||||
tempRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'grep-tool-root-'));
|
||||
fileExclusionsMock = {
|
||||
getGlobExcludes: vi.fn().mockReturnValue([]),
|
||||
};
|
||||
Object.assign(mockConfig, {
|
||||
getFileExclusions: () => fileExclusionsMock,
|
||||
getFileFilteringOptions: () => DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
});
|
||||
grepTool = new RipGrepTool(mockConfig);
|
||||
|
||||
// Create some test files and directories
|
||||
@@ -242,11 +154,11 @@ describe('RipGrepTool', () => {
|
||||
expect(grepTool.validateToolParams(params)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for valid params (pattern, path, and include)', () => {
|
||||
it('should return null for valid params (pattern, path, and glob)', () => {
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'hello',
|
||||
path: '.',
|
||||
include: '*.txt',
|
||||
glob: '*.txt',
|
||||
};
|
||||
expect(grepTool.validateToolParams(params)).toBeNull();
|
||||
});
|
||||
@@ -258,9 +170,11 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return null for what would be an invalid regex pattern', () => {
|
||||
it('should surface an error for invalid regex pattern', () => {
|
||||
const params: RipGrepToolParams = { pattern: '[[' };
|
||||
expect(grepTool.validateToolParams(params)).toBeNull();
|
||||
expect(grepTool.validateToolParams(params)).toContain(
|
||||
'Invalid regular expression pattern: [[',
|
||||
);
|
||||
});
|
||||
|
||||
it('should return error if path does not exist', () => {
|
||||
@@ -299,13 +213,11 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 3 matches for pattern "world" in the workspace directory',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileA.txt');
|
||||
expect(result.llmContent).toContain('L1: hello world');
|
||||
expect(result.llmContent).toContain('L2: second line with world');
|
||||
expect(result.llmContent).toContain('fileA.txt:1:hello world');
|
||||
expect(result.llmContent).toContain('fileA.txt:2:second line with world');
|
||||
expect(result.llmContent).toContain(
|
||||
`File: ${path.join('sub', 'fileC.txt')}`,
|
||||
'sub/fileC.txt:1:another world in sub dir',
|
||||
);
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
expect(result.returnDisplay).toBe('Found 3 matches');
|
||||
});
|
||||
|
||||
@@ -324,12 +236,33 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "world" in path "sub"',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileC.txt'); // Path relative to 'sub'
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
expect(result.llmContent).toContain(
|
||||
'fileC.txt:1:another world in sub dir',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should find matches with an include glob', async () => {
|
||||
it('should use target directory when path is not provided', async () => {
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
outputData: `fileA.txt:1:hello world${EOL}`,
|
||||
exitCode: 0,
|
||||
onCall: (_, args) => {
|
||||
// Should search in the target directory (tempRootDir)
|
||||
expect(args[args.length - 1]).toBe(tempRootDir);
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'world' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "world" in the workspace directory',
|
||||
);
|
||||
});
|
||||
|
||||
it('should find matches with a glob filter', async () => {
|
||||
// Setup specific mock for this test
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
@@ -338,20 +271,19 @@ describe('RipGrepTool', () => {
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'hello', include: '*.js' };
|
||||
const params: RipGrepToolParams = { pattern: 'hello', glob: '*.js' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "hello" in the workspace directory (filter: "*.js"):',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileB.js');
|
||||
expect(result.llmContent).toContain(
|
||||
'L2: function baz() { return "hello"; }',
|
||||
'fileB.js:2:function baz() { return "hello"; }',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should find matches with an include glob and path', async () => {
|
||||
it('should find matches with a glob filter and path', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, 'sub', 'another.js'),
|
||||
'const greeting = "hello";',
|
||||
@@ -396,18 +328,115 @@ describe('RipGrepTool', () => {
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'hello',
|
||||
path: 'sub',
|
||||
include: '*.js',
|
||||
glob: '*.js',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "hello" in path "sub" (filter: "*.js")',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: another.js');
|
||||
expect(result.llmContent).toContain('L1: const greeting = "hello";');
|
||||
expect(result.llmContent).toContain(
|
||||
'another.js:1:const greeting = "hello";',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should pass .qwenignore to ripgrep when respected', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, '.qwenignore'),
|
||||
'ignored.txt\n',
|
||||
);
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
exitCode: 1,
|
||||
onCall: (_, args) => {
|
||||
expect(args).toContain('--ignore-file');
|
||||
expect(args).toContain(path.join(tempRootDir, '.qwenignore'));
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'secret' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'No matches found for pattern "secret" in the workspace directory.',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('No matches found');
|
||||
});
|
||||
|
||||
it('should include .qwenignore matches when disabled in config', async () => {
|
||||
await fs.writeFile(path.join(tempRootDir, '.qwenignore'), 'kept.txt\n');
|
||||
await fs.writeFile(path.join(tempRootDir, 'kept.txt'), 'keep me');
|
||||
Object.assign(mockConfig, {
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: true,
|
||||
respectQwenIgnore: false,
|
||||
}),
|
||||
});
|
||||
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
outputData: `kept.txt:1:keep me${EOL}`,
|
||||
exitCode: 0,
|
||||
onCall: (_, args) => {
|
||||
expect(args).not.toContain('--ignore-file');
|
||||
expect(args).not.toContain(path.join(tempRootDir, '.qwenignore'));
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'keep' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "keep" in the workspace directory:',
|
||||
);
|
||||
expect(result.llmContent).toContain('kept.txt:1:keep me');
|
||||
expect(result.returnDisplay).toBe('Found 1 match');
|
||||
});
|
||||
|
||||
it('should disable gitignore when configured', async () => {
|
||||
Object.assign(mockConfig, {
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: false,
|
||||
respectQwenIgnore: true,
|
||||
}),
|
||||
});
|
||||
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
exitCode: 1,
|
||||
onCall: (_, args) => {
|
||||
expect(args).toContain('--no-ignore-vcs');
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'ignored' };
|
||||
const invocation = grepTool.build(params);
|
||||
await invocation.execute(abortSignal);
|
||||
});
|
||||
|
||||
it('should truncate llm content when exceeding maximum length', async () => {
|
||||
const longMatch = 'fileA.txt:1:' + 'a'.repeat(25_000);
|
||||
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
outputData: `${longMatch}${EOL}`,
|
||||
exitCode: 0,
|
||||
}),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'a+' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
expect(String(result.llmContent).length).toBeLessThanOrEqual(20_000);
|
||||
expect(result.llmContent).toMatch(/\[\d+ lines? truncated\] \.\.\./);
|
||||
expect(result.returnDisplay).toContain('truncated');
|
||||
});
|
||||
|
||||
it('should return "No matches found" when pattern does not exist', async () => {
|
||||
// Setup specific mock for no matches
|
||||
mockSpawn.mockImplementationOnce(
|
||||
@@ -425,19 +454,10 @@ describe('RipGrepTool', () => {
|
||||
expect(result.returnDisplay).toBe('No matches found');
|
||||
});
|
||||
|
||||
it('should return an error from ripgrep for invalid regex pattern', async () => {
|
||||
mockSpawn.mockImplementationOnce(
|
||||
createMockSpawn({
|
||||
exitCode: 2,
|
||||
}),
|
||||
);
|
||||
|
||||
it('should throw validation error for invalid regex pattern', async () => {
|
||||
const params: RipGrepToolParams = { pattern: '[[' };
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
expect(result.llmContent).toContain('ripgrep exited with code 2');
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Error: ripgrep exited with code 2',
|
||||
expect(() => grepTool.build(params)).toThrow(
|
||||
'Invalid regular expression pattern: [[',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -484,8 +504,7 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "foo.*bar" in the workspace directory:',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileB.js');
|
||||
expect(result.llmContent).toContain('L1: const foo = "bar";');
|
||||
expect(result.llmContent).toContain('fileB.js:1:const foo = "bar";');
|
||||
});
|
||||
|
||||
it('should be case-insensitive by default (JS fallback)', async () => {
|
||||
@@ -535,11 +554,9 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 2 matches for pattern "HELLO" in the workspace directory:',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileA.txt');
|
||||
expect(result.llmContent).toContain('L1: hello world');
|
||||
expect(result.llmContent).toContain('File: fileB.js');
|
||||
expect(result.llmContent).toContain('fileA.txt:1:hello world');
|
||||
expect(result.llmContent).toContain(
|
||||
'L2: function baz() { return "hello"; }',
|
||||
'fileB.js:2:function baz() { return "hello"; }',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -551,205 +568,22 @@ describe('RipGrepTool', () => {
|
||||
});
|
||||
|
||||
it('should throw an error if ripgrep is not available', async () => {
|
||||
// Make ensureRgPath throw
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
(downloadRipGrep as Mock).mockResolvedValue(undefined);
|
||||
// Make ensureRipgrepBinary throw
|
||||
(ensureRipgrepPath as Mock).mockRejectedValue(
|
||||
new Error('Ripgrep binary not found'),
|
||||
);
|
||||
|
||||
const params: RipGrepToolParams = { pattern: 'world' };
|
||||
const invocation = grepTool.build(params);
|
||||
|
||||
expect(await invocation.execute(abortSignal)).toStrictEqual({
|
||||
llmContent: 'Error during grep search operation: Cannot use ripgrep.',
|
||||
returnDisplay: 'Error: Cannot use ripgrep.',
|
||||
llmContent:
|
||||
'Error during grep search operation: Ripgrep binary not found',
|
||||
returnDisplay: 'Error: Ripgrep binary not found',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('multi-directory workspace', () => {
|
||||
it('should search across all workspace directories when no path is specified', async () => {
|
||||
// Create additional directory with test files
|
||||
const secondDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), 'grep-tool-second-'),
|
||||
);
|
||||
await fs.writeFile(
|
||||
path.join(secondDir, 'other.txt'),
|
||||
'hello from second directory\nworld in second',
|
||||
);
|
||||
await fs.writeFile(
|
||||
path.join(secondDir, 'another.js'),
|
||||
'function world() { return "test"; }',
|
||||
);
|
||||
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, [secondDir]),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
// Setup specific mock for this test - multi-directory search for 'world'
|
||||
// Mock will be called twice - once for each directory
|
||||
let callCount = 0;
|
||||
mockSpawn.mockImplementation(() => {
|
||||
callCount++;
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
stderr: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
};
|
||||
|
||||
setTimeout(() => {
|
||||
const stdoutDataHandler = mockProcess.stdout.on.mock.calls.find(
|
||||
(call) => call[0] === 'data',
|
||||
)?.[1];
|
||||
|
||||
const closeHandler = mockProcess.on.mock.calls.find(
|
||||
(call) => call[0] === 'close',
|
||||
)?.[1];
|
||||
|
||||
let outputData = '';
|
||||
if (callCount === 1) {
|
||||
// First directory (tempRootDir)
|
||||
outputData =
|
||||
[
|
||||
'fileA.txt:1:hello world',
|
||||
'fileA.txt:2:second line with world',
|
||||
'sub/fileC.txt:1:another world in sub dir',
|
||||
].join(EOL) + EOL;
|
||||
} else if (callCount === 2) {
|
||||
// Second directory (secondDir)
|
||||
outputData =
|
||||
[
|
||||
'other.txt:2:world in second',
|
||||
'another.js:1:function world() { return "test"; }',
|
||||
].join(EOL) + EOL;
|
||||
}
|
||||
|
||||
if (stdoutDataHandler && outputData) {
|
||||
stdoutDataHandler(Buffer.from(outputData));
|
||||
}
|
||||
|
||||
if (closeHandler) {
|
||||
closeHandler(0);
|
||||
}
|
||||
}, 0);
|
||||
|
||||
return mockProcess as unknown as ChildProcess;
|
||||
});
|
||||
|
||||
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
|
||||
const params: RipGrepToolParams = { pattern: 'world' };
|
||||
const invocation = multiDirGrepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
// Should find matches in both directories
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 5 matches for pattern "world"',
|
||||
);
|
||||
|
||||
// Matches from first directory
|
||||
expect(result.llmContent).toContain('fileA.txt');
|
||||
expect(result.llmContent).toContain('L1: hello world');
|
||||
expect(result.llmContent).toContain('L2: second line with world');
|
||||
expect(result.llmContent).toContain('fileC.txt');
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
|
||||
// Matches from both directories
|
||||
expect(result.llmContent).toContain('other.txt');
|
||||
expect(result.llmContent).toContain('L2: world in second');
|
||||
expect(result.llmContent).toContain('another.js');
|
||||
expect(result.llmContent).toContain('L1: function world()');
|
||||
|
||||
// Clean up
|
||||
await fs.rm(secondDir, { recursive: true, force: true });
|
||||
mockSpawn.mockClear();
|
||||
});
|
||||
|
||||
it('should search only specified path within workspace directories', async () => {
|
||||
// Create additional directory
|
||||
const secondDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), 'grep-tool-second-'),
|
||||
);
|
||||
await fs.mkdir(path.join(secondDir, 'sub'));
|
||||
await fs.writeFile(
|
||||
path.join(secondDir, 'sub', 'test.txt'),
|
||||
'hello from second sub directory',
|
||||
);
|
||||
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, [secondDir]),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
// Setup specific mock for this test - searching in 'sub' should only return matches from that directory
|
||||
mockSpawn.mockImplementationOnce(() => {
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
stderr: {
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
},
|
||||
on: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
};
|
||||
|
||||
setTimeout(() => {
|
||||
const onData = mockProcess.stdout.on.mock.calls.find(
|
||||
(call) => call[0] === 'data',
|
||||
)?.[1];
|
||||
const onClose = mockProcess.on.mock.calls.find(
|
||||
(call) => call[0] === 'close',
|
||||
)?.[1];
|
||||
|
||||
if (onData) {
|
||||
onData(Buffer.from(`fileC.txt:1:another world in sub dir${EOL}`));
|
||||
}
|
||||
if (onClose) {
|
||||
onClose(0);
|
||||
}
|
||||
}, 0);
|
||||
|
||||
return mockProcess as unknown as ChildProcess;
|
||||
});
|
||||
|
||||
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
|
||||
|
||||
// Search only in the 'sub' directory of the first workspace
|
||||
const params: RipGrepToolParams = { pattern: 'world', path: 'sub' };
|
||||
const invocation = multiDirGrepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
// Should only find matches in the specified sub directory
|
||||
expect(result.llmContent).toContain(
|
||||
'Found 1 match for pattern "world" in path "sub"',
|
||||
);
|
||||
expect(result.llmContent).toContain('File: fileC.txt');
|
||||
expect(result.llmContent).toContain('L1: another world in sub dir');
|
||||
|
||||
// Should not contain matches from second directory
|
||||
expect(result.llmContent).not.toContain('test.txt');
|
||||
|
||||
// Clean up
|
||||
await fs.rm(secondDir, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
describe('abort signal handling', () => {
|
||||
it('should handle AbortSignal during search', async () => {
|
||||
const controller = new AbortController();
|
||||
@@ -1165,8 +999,8 @@ describe('RipGrepTool', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('include pattern filtering', () => {
|
||||
it('should handle multiple file extensions in include pattern', async () => {
|
||||
describe('glob pattern filtering', () => {
|
||||
it('should handle multiple file extensions in glob pattern', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, 'test.ts'),
|
||||
'typescript content',
|
||||
@@ -1178,7 +1012,7 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
await fs.writeFile(path.join(tempRootDir, 'test.txt'), 'text content');
|
||||
|
||||
// Setup specific mock for this test - include pattern should filter to only ts/tsx files
|
||||
// Setup specific mock for this test - glob pattern should filter to only ts/tsx files
|
||||
mockSpawn.mockImplementationOnce(() => {
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
@@ -1219,7 +1053,7 @@ describe('RipGrepTool', () => {
|
||||
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'content',
|
||||
include: '*.{ts,tsx}',
|
||||
glob: '*.{ts,tsx}',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
@@ -1230,7 +1064,7 @@ describe('RipGrepTool', () => {
|
||||
expect(result.llmContent).not.toContain('test.txt');
|
||||
});
|
||||
|
||||
it('should handle directory patterns in include', async () => {
|
||||
it('should handle directory patterns in glob', async () => {
|
||||
await fs.mkdir(path.join(tempRootDir, 'src'), { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(tempRootDir, 'src', 'main.ts'),
|
||||
@@ -1238,7 +1072,7 @@ describe('RipGrepTool', () => {
|
||||
);
|
||||
await fs.writeFile(path.join(tempRootDir, 'other.ts'), 'other code');
|
||||
|
||||
// Setup specific mock for this test - include pattern should filter to only src/** files
|
||||
// Setup specific mock for this test - glob pattern should filter to only src/** files
|
||||
mockSpawn.mockImplementationOnce(() => {
|
||||
const mockProcess = {
|
||||
stdout: {
|
||||
@@ -1275,7 +1109,7 @@ describe('RipGrepTool', () => {
|
||||
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'code',
|
||||
include: 'src/**',
|
||||
glob: 'src/**',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
@@ -1292,10 +1126,10 @@ describe('RipGrepTool', () => {
|
||||
expect(invocation.getDescription()).toBe("'testPattern'");
|
||||
});
|
||||
|
||||
it('should generate correct description with pattern and include', () => {
|
||||
it('should generate correct description with pattern and glob', () => {
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'testPattern',
|
||||
include: '*.ts',
|
||||
glob: '*.ts',
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
expect(invocation.getDescription()).toBe("'testPattern' in *.ts");
|
||||
@@ -1314,29 +1148,18 @@ describe('RipGrepTool', () => {
|
||||
expect(invocation.getDescription()).toContain(path.join('src', 'app'));
|
||||
});
|
||||
|
||||
it('should indicate searching across all workspace directories when no path specified', () => {
|
||||
// Create a mock config with multiple directories
|
||||
const multiDirConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(tempRootDir, ['/another/dir']),
|
||||
getDebugMode: () => false,
|
||||
} as unknown as Config;
|
||||
|
||||
const multiDirGrepTool = new RipGrepTool(multiDirConfig);
|
||||
it('should generate correct description with default search path', () => {
|
||||
const params: RipGrepToolParams = { pattern: 'testPattern' };
|
||||
const invocation = multiDirGrepTool.build(params);
|
||||
expect(invocation.getDescription()).toBe(
|
||||
"'testPattern' across all workspace directories",
|
||||
);
|
||||
const invocation = grepTool.build(params);
|
||||
expect(invocation.getDescription()).toBe("'testPattern'");
|
||||
});
|
||||
|
||||
it('should generate correct description with pattern, include, and path', async () => {
|
||||
it('should generate correct description with pattern, glob, and path', async () => {
|
||||
const dirPath = path.join(tempRootDir, 'src', 'app');
|
||||
await fs.mkdir(dirPath, { recursive: true });
|
||||
const params: RipGrepToolParams = {
|
||||
pattern: 'testPattern',
|
||||
include: '*.ts',
|
||||
glob: '*.ts',
|
||||
path: path.join('src', 'app'),
|
||||
};
|
||||
const invocation = grepTool.build(params);
|
||||
|
||||
@@ -8,46 +8,21 @@ import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { EOL } from 'node:os';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { downloadRipGrep } from '@joshua.litt/get-ripgrep';
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { SchemaValidator } from '../utils/schemaValidator.js';
|
||||
import { ToolNames } from './tool-names.js';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import { getErrorMessage, isNodeError } from '../utils/errors.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { fileExists } from '../utils/fileUtils.js';
|
||||
import { Storage } from '../config/storage.js';
|
||||
import { ensureRipgrepPath } from '../utils/ripgrepUtils.js';
|
||||
import { SchemaValidator } from '../utils/schemaValidator.js';
|
||||
import type { FileFilteringOptions } from '../config/constants.js';
|
||||
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
|
||||
|
||||
const DEFAULT_TOTAL_MAX_MATCHES = 20000;
|
||||
|
||||
function getRgPath(): string {
|
||||
return path.join(Storage.getGlobalBinDir(), 'rg');
|
||||
}
|
||||
const MAX_LLM_CONTENT_LENGTH = 20_000;
|
||||
|
||||
/**
|
||||
* Checks if `rg` exists, if not then attempt to download it.
|
||||
*/
|
||||
export async function canUseRipgrep(): Promise<boolean> {
|
||||
if (await fileExists(getRgPath())) {
|
||||
return true;
|
||||
}
|
||||
|
||||
await downloadRipGrep(Storage.getGlobalBinDir());
|
||||
return await fileExists(getRgPath());
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures `rg` is downloaded, or throws.
|
||||
*/
|
||||
export async function ensureRgPath(): Promise<string> {
|
||||
if (await canUseRipgrep()) {
|
||||
return getRgPath();
|
||||
}
|
||||
throw new Error('Cannot use ripgrep.');
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for the GrepTool
|
||||
* Parameters for the GrepTool (Simplified)
|
||||
*/
|
||||
export interface RipGrepToolParams {
|
||||
/**
|
||||
@@ -61,18 +36,14 @@ export interface RipGrepToolParams {
|
||||
path?: string;
|
||||
|
||||
/**
|
||||
* File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")
|
||||
* Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}")
|
||||
*/
|
||||
include?: string;
|
||||
}
|
||||
glob?: string;
|
||||
|
||||
/**
|
||||
* Result object for a single grep match
|
||||
*/
|
||||
interface GrepMatch {
|
||||
filePath: string;
|
||||
lineNumber: number;
|
||||
line: string;
|
||||
/**
|
||||
* Maximum number of matching lines to return (optional, shows all if not specified)
|
||||
*/
|
||||
limit?: number;
|
||||
}
|
||||
|
||||
class GrepToolInvocation extends BaseToolInvocation<
|
||||
@@ -89,18 +60,15 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
/**
|
||||
* Checks if a path is within the root directory and resolves it.
|
||||
* @param relativePath Path relative to the root directory (or undefined for root).
|
||||
* @returns The absolute path if valid and exists, or null if no path specified (to search all directories).
|
||||
* @returns The absolute path to search within.
|
||||
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
|
||||
*/
|
||||
private resolveAndValidatePath(relativePath?: string): string | null {
|
||||
// If no path specified, return null to indicate searching all workspace directories
|
||||
if (!relativePath) {
|
||||
return null;
|
||||
}
|
||||
private resolveAndValidatePath(relativePath?: string): string {
|
||||
const targetDir = this.config.getTargetDir();
|
||||
const targetPath = relativePath
|
||||
? path.resolve(targetDir, relativePath)
|
||||
: targetDir;
|
||||
|
||||
const targetPath = path.resolve(this.config.getTargetDir(), relativePath);
|
||||
|
||||
// Security Check: Ensure the resolved path is within workspace boundaries
|
||||
const workspaceContext = this.config.getWorkspaceContext();
|
||||
if (!workspaceContext.isPathWithinWorkspace(targetPath)) {
|
||||
const directories = workspaceContext.getDirectories();
|
||||
@@ -109,7 +77,10 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
);
|
||||
}
|
||||
|
||||
// Check existence and type after resolving
|
||||
return this.ensureDirectory(targetPath);
|
||||
}
|
||||
|
||||
private ensureDirectory(targetPath: string): string {
|
||||
try {
|
||||
const stats = fs.statSync(targetPath);
|
||||
if (!stats.isDirectory()) {
|
||||
@@ -129,104 +100,81 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
|
||||
async execute(signal: AbortSignal): Promise<ToolResult> {
|
||||
try {
|
||||
const workspaceContext = this.config.getWorkspaceContext();
|
||||
const searchDirAbs = this.resolveAndValidatePath(this.params.path);
|
||||
const searchDirDisplay = this.params.path || '.';
|
||||
|
||||
// Determine which directories to search
|
||||
let searchDirectories: readonly string[];
|
||||
if (searchDirAbs === null) {
|
||||
// No path specified - search all workspace directories
|
||||
searchDirectories = workspaceContext.getDirectories();
|
||||
} else {
|
||||
// Specific path provided - search only that directory
|
||||
searchDirectories = [searchDirAbs];
|
||||
}
|
||||
// Get raw ripgrep output
|
||||
const rawOutput = await this.performRipgrepSearch({
|
||||
pattern: this.params.pattern,
|
||||
path: searchDirAbs,
|
||||
glob: this.params.glob,
|
||||
signal,
|
||||
});
|
||||
|
||||
let allMatches: GrepMatch[] = [];
|
||||
const totalMaxMatches = DEFAULT_TOTAL_MAX_MATCHES;
|
||||
// Build search description
|
||||
const searchLocationDescription = this.params.path
|
||||
? `in path "${searchDirDisplay}"`
|
||||
: `in the workspace directory`;
|
||||
|
||||
if (this.config.getDebugMode()) {
|
||||
console.log(`[GrepTool] Total result limit: ${totalMaxMatches}`);
|
||||
}
|
||||
const filterDescription = this.params.glob
|
||||
? ` (filter: "${this.params.glob}")`
|
||||
: '';
|
||||
|
||||
for (const searchDir of searchDirectories) {
|
||||
const searchResult = await this.performRipgrepSearch({
|
||||
pattern: this.params.pattern,
|
||||
path: searchDir,
|
||||
include: this.params.include,
|
||||
signal,
|
||||
});
|
||||
|
||||
if (searchDirectories.length > 1) {
|
||||
const dirName = path.basename(searchDir);
|
||||
searchResult.forEach((match) => {
|
||||
match.filePath = path.join(dirName, match.filePath);
|
||||
});
|
||||
}
|
||||
|
||||
allMatches = allMatches.concat(searchResult);
|
||||
|
||||
if (allMatches.length >= totalMaxMatches) {
|
||||
allMatches = allMatches.slice(0, totalMaxMatches);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let searchLocationDescription: string;
|
||||
if (searchDirAbs === null) {
|
||||
const numDirs = workspaceContext.getDirectories().length;
|
||||
searchLocationDescription =
|
||||
numDirs > 1
|
||||
? `across ${numDirs} workspace directories`
|
||||
: `in the workspace directory`;
|
||||
} else {
|
||||
searchLocationDescription = `in path "${searchDirDisplay}"`;
|
||||
}
|
||||
|
||||
if (allMatches.length === 0) {
|
||||
const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}.`;
|
||||
// Check if we have any matches
|
||||
if (!rawOutput.trim()) {
|
||||
const noMatchMsg = `No matches found for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}.`;
|
||||
return { llmContent: noMatchMsg, returnDisplay: `No matches found` };
|
||||
}
|
||||
|
||||
const wasTruncated = allMatches.length >= totalMaxMatches;
|
||||
// Split into lines and count total matches
|
||||
const allLines = rawOutput.split(EOL).filter((line) => line.trim());
|
||||
const totalMatches = allLines.length;
|
||||
const matchTerm = totalMatches === 1 ? 'match' : 'matches';
|
||||
|
||||
const matchesByFile = allMatches.reduce(
|
||||
(acc, match) => {
|
||||
const fileKey = match.filePath;
|
||||
if (!acc[fileKey]) {
|
||||
acc[fileKey] = [];
|
||||
}
|
||||
acc[fileKey].push(match);
|
||||
acc[fileKey].sort((a, b) => a.lineNumber - b.lineNumber);
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, GrepMatch[]>,
|
||||
);
|
||||
// Build header early to calculate available space
|
||||
const header = `Found ${totalMatches} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${filterDescription}:\n---\n`;
|
||||
const maxTruncationNoticeLength = 100; // "[... N more matches truncated]"
|
||||
const maxGrepOutputLength =
|
||||
MAX_LLM_CONTENT_LENGTH - header.length - maxTruncationNoticeLength;
|
||||
|
||||
const matchCount = allMatches.length;
|
||||
const matchTerm = matchCount === 1 ? 'match' : 'matches';
|
||||
|
||||
let llmContent = `Found ${matchCount} ${matchTerm} for pattern "${this.params.pattern}" ${searchLocationDescription}${this.params.include ? ` (filter: "${this.params.include}")` : ''}`;
|
||||
|
||||
if (wasTruncated) {
|
||||
llmContent += ` (results limited to ${totalMaxMatches} matches for performance)`;
|
||||
// Apply line limit first (if specified)
|
||||
let truncatedByLineLimit = false;
|
||||
let linesToInclude = allLines;
|
||||
if (
|
||||
this.params.limit !== undefined &&
|
||||
allLines.length > this.params.limit
|
||||
) {
|
||||
linesToInclude = allLines.slice(0, this.params.limit);
|
||||
truncatedByLineLimit = true;
|
||||
}
|
||||
|
||||
llmContent += `:\n---\n`;
|
||||
// Join lines back into grep output
|
||||
let grepOutput = linesToInclude.join(EOL);
|
||||
|
||||
for (const filePath in matchesByFile) {
|
||||
llmContent += `File: ${filePath}\n`;
|
||||
matchesByFile[filePath].forEach((match) => {
|
||||
const trimmedLine = match.line.trim();
|
||||
llmContent += `L${match.lineNumber}: ${trimmedLine}\n`;
|
||||
});
|
||||
llmContent += '---\n';
|
||||
// Apply character limit as safety net
|
||||
let truncatedByCharLimit = false;
|
||||
if (grepOutput.length > maxGrepOutputLength) {
|
||||
grepOutput = grepOutput.slice(0, maxGrepOutputLength) + '...';
|
||||
truncatedByCharLimit = true;
|
||||
}
|
||||
|
||||
let displayMessage = `Found ${matchCount} ${matchTerm}`;
|
||||
if (wasTruncated) {
|
||||
displayMessage += ` (limited)`;
|
||||
// Count how many lines we actually included after character truncation
|
||||
const finalLines = grepOutput.split(EOL).filter((line) => line.trim());
|
||||
const includedLines = finalLines.length;
|
||||
|
||||
// Build result
|
||||
let llmContent = header + grepOutput;
|
||||
|
||||
// Add truncation notice if needed
|
||||
if (truncatedByLineLimit || truncatedByCharLimit) {
|
||||
const omittedMatches = totalMatches - includedLines;
|
||||
llmContent += ` [${omittedMatches} ${omittedMatches === 1 ? 'line' : 'lines'} truncated] ...`;
|
||||
}
|
||||
|
||||
// Build display message (show real count, not truncated)
|
||||
let displayMessage = `Found ${totalMatches} ${matchTerm}`;
|
||||
if (truncatedByLineLimit || truncatedByCharLimit) {
|
||||
displayMessage += ` (truncated)`;
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -243,53 +191,15 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
}
|
||||
}
|
||||
|
||||
private parseRipgrepOutput(output: string, basePath: string): GrepMatch[] {
|
||||
const results: GrepMatch[] = [];
|
||||
if (!output) return results;
|
||||
|
||||
const lines = output.split(EOL);
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
|
||||
const firstColonIndex = line.indexOf(':');
|
||||
if (firstColonIndex === -1) continue;
|
||||
|
||||
const secondColonIndex = line.indexOf(':', firstColonIndex + 1);
|
||||
if (secondColonIndex === -1) continue;
|
||||
|
||||
const filePathRaw = line.substring(0, firstColonIndex);
|
||||
const lineNumberStr = line.substring(
|
||||
firstColonIndex + 1,
|
||||
secondColonIndex,
|
||||
);
|
||||
const lineContent = line.substring(secondColonIndex + 1);
|
||||
|
||||
const lineNumber = parseInt(lineNumberStr, 10);
|
||||
|
||||
if (!isNaN(lineNumber)) {
|
||||
const absoluteFilePath = path.resolve(basePath, filePathRaw);
|
||||
const relativeFilePath = path.relative(basePath, absoluteFilePath);
|
||||
|
||||
results.push({
|
||||
filePath: relativeFilePath || path.basename(absoluteFilePath),
|
||||
lineNumber,
|
||||
line: lineContent,
|
||||
});
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
private async performRipgrepSearch(options: {
|
||||
pattern: string;
|
||||
path: string;
|
||||
include?: string;
|
||||
glob?: string;
|
||||
signal: AbortSignal;
|
||||
}): Promise<GrepMatch[]> {
|
||||
const { pattern, path: absolutePath, include } = options;
|
||||
}): Promise<string> {
|
||||
const { pattern, path: absolutePath, glob } = options;
|
||||
|
||||
const rgArgs = [
|
||||
const rgArgs: string[] = [
|
||||
'--line-number',
|
||||
'--no-heading',
|
||||
'--with-filename',
|
||||
@@ -298,29 +208,34 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
pattern,
|
||||
];
|
||||
|
||||
if (include) {
|
||||
rgArgs.push('--glob', include);
|
||||
// Add file exclusions from .gitignore and .qwenignore
|
||||
const filteringOptions = this.getFileFilteringOptions();
|
||||
if (!filteringOptions.respectGitIgnore) {
|
||||
rgArgs.push('--no-ignore-vcs');
|
||||
}
|
||||
|
||||
const excludes = [
|
||||
'.git',
|
||||
'node_modules',
|
||||
'bower_components',
|
||||
'*.log',
|
||||
'*.tmp',
|
||||
'build',
|
||||
'dist',
|
||||
'coverage',
|
||||
];
|
||||
excludes.forEach((exclude) => {
|
||||
rgArgs.push('--glob', `!${exclude}`);
|
||||
});
|
||||
if (filteringOptions.respectQwenIgnore) {
|
||||
const qwenIgnorePath = path.join(
|
||||
this.config.getTargetDir(),
|
||||
'.qwenignore',
|
||||
);
|
||||
if (fs.existsSync(qwenIgnorePath)) {
|
||||
rgArgs.push('--ignore-file', qwenIgnorePath);
|
||||
}
|
||||
}
|
||||
|
||||
// Add glob pattern if provided
|
||||
if (glob) {
|
||||
rgArgs.push('--glob', glob);
|
||||
}
|
||||
|
||||
rgArgs.push('--threads', '4');
|
||||
rgArgs.push(absolutePath);
|
||||
|
||||
try {
|
||||
const rgPath = await ensureRgPath();
|
||||
const rgPath = this.config.getUseBuiltinRipgrep()
|
||||
? await ensureRipgrepPath()
|
||||
: 'rg';
|
||||
const output = await new Promise<string>((resolve, reject) => {
|
||||
const child = spawn(rgPath, rgArgs, {
|
||||
windowsHide: true,
|
||||
@@ -342,11 +257,7 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
|
||||
child.on('error', (err) => {
|
||||
options.signal.removeEventListener('abort', cleanup);
|
||||
reject(
|
||||
new Error(
|
||||
`Failed to start ripgrep: ${err.message}. Please ensure @lvce-editor/ripgrep is properly installed.`,
|
||||
),
|
||||
);
|
||||
reject(new Error(`Failed to start ripgrep: ${err.message}.`));
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
@@ -366,22 +277,33 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
});
|
||||
});
|
||||
|
||||
return this.parseRipgrepOutput(output, absolutePath);
|
||||
return output;
|
||||
} catch (error: unknown) {
|
||||
console.error(`GrepLogic: ripgrep failed: ${getErrorMessage(error)}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private getFileFilteringOptions(): FileFilteringOptions {
|
||||
const options = this.config.getFileFilteringOptions?.();
|
||||
return {
|
||||
respectGitIgnore:
|
||||
options?.respectGitIgnore ??
|
||||
DEFAULT_FILE_FILTERING_OPTIONS.respectGitIgnore,
|
||||
respectQwenIgnore:
|
||||
options?.respectQwenIgnore ??
|
||||
DEFAULT_FILE_FILTERING_OPTIONS.respectQwenIgnore,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a description of the grep operation
|
||||
* @param params Parameters for the grep operation
|
||||
* @returns A string describing the grep
|
||||
*/
|
||||
getDescription(): string {
|
||||
let description = `'${this.params.pattern}'`;
|
||||
if (this.params.include) {
|
||||
description += ` in ${this.params.include}`;
|
||||
if (this.params.glob) {
|
||||
description += ` in ${this.params.glob}`;
|
||||
}
|
||||
if (this.params.path) {
|
||||
const resolvedPath = path.resolve(
|
||||
@@ -413,36 +335,41 @@ class GrepToolInvocation extends BaseToolInvocation<
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the Grep tool logic (moved from CLI)
|
||||
* Implementation of the Grep tool logic
|
||||
*/
|
||||
export class RipGrepTool extends BaseDeclarativeTool<
|
||||
RipGrepToolParams,
|
||||
ToolResult
|
||||
> {
|
||||
static readonly Name = 'search_file_content';
|
||||
static readonly Name = ToolNames.GREP;
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
RipGrepTool.Name,
|
||||
'SearchText',
|
||||
'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers. Total results limited to 20,000 matches like VSCode.',
|
||||
'Grep',
|
||||
'A powerful search tool built on ripgrep\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")\n - Filter files with glob parameter (e.g., "*.js", "**/*.tsx")\n - Use Task tool for open-ended searches requiring multiple rounds\n - Pattern syntax: Uses ripgrep (not grep) - special regex characters need escaping (use `interface\\{\\}` to find `interface{}` in Go code)\n',
|
||||
Kind.Search,
|
||||
{
|
||||
properties: {
|
||||
pattern: {
|
||||
description:
|
||||
"The regular expression (regex) pattern to search for within file contents (e.g., 'function\\s+myFunction', 'import\\s+\\{.*\\}\\s+from\\s+.*').",
|
||||
type: 'string',
|
||||
description:
|
||||
'The regular expression pattern to search for in file contents',
|
||||
},
|
||||
glob: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}") - maps to rg --glob',
|
||||
},
|
||||
path: {
|
||||
description:
|
||||
'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.',
|
||||
type: 'string',
|
||||
description:
|
||||
'File or directory to search in (rg PATH). Defaults to current working directory.',
|
||||
},
|
||||
include: {
|
||||
limit: {
|
||||
type: 'number',
|
||||
description:
|
||||
"Optional: A glob pattern to filter which files are searched (e.g., '*.js', '*.{ts,tsx}', 'src/**'). If omitted, searches all files (respecting potential global ignores).",
|
||||
type: 'string',
|
||||
'Limit output to first N lines/entries. Optional - shows all matches if not specified.',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
@@ -454,13 +381,13 @@ export class RipGrepTool extends BaseDeclarativeTool<
|
||||
/**
|
||||
* Checks if a path is within the root directory and resolves it.
|
||||
* @param relativePath Path relative to the root directory (or undefined for root).
|
||||
* @returns The absolute path if valid and exists, or null if no path specified (to search all directories).
|
||||
* @returns The absolute path to search within.
|
||||
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
|
||||
*/
|
||||
private resolveAndValidatePath(relativePath?: string): string | null {
|
||||
// If no path specified, return null to indicate searching all workspace directories
|
||||
private resolveAndValidatePath(relativePath?: string): string {
|
||||
// If no path specified, search within the workspace root directory
|
||||
if (!relativePath) {
|
||||
return null;
|
||||
return this.config.getTargetDir();
|
||||
}
|
||||
|
||||
const targetPath = path.resolve(this.config.getTargetDir(), relativePath);
|
||||
@@ -497,7 +424,9 @@ export class RipGrepTool extends BaseDeclarativeTool<
|
||||
* @param params Parameters to validate
|
||||
* @returns An error message string if invalid, null otherwise
|
||||
*/
|
||||
override validateToolParams(params: RipGrepToolParams): string | null {
|
||||
protected override validateToolParamValues(
|
||||
params: RipGrepToolParams,
|
||||
): string | null {
|
||||
const errors = SchemaValidator.validate(
|
||||
this.schema.parametersJsonSchema,
|
||||
params,
|
||||
@@ -506,6 +435,13 @@ export class RipGrepTool extends BaseDeclarativeTool<
|
||||
return errors;
|
||||
}
|
||||
|
||||
// Validate pattern is a valid regex
|
||||
try {
|
||||
new RegExp(params.pattern);
|
||||
} catch (error) {
|
||||
return `Invalid regular expression pattern: ${params.pattern}. Error: ${getErrorMessage(error)}`;
|
||||
}
|
||||
|
||||
// Only validate path if one is provided
|
||||
if (params.path) {
|
||||
try {
|
||||
|
||||
@@ -141,7 +141,12 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('success');
|
||||
expect(result.llmContent).toContain(
|
||||
'Todos have been modified successfully',
|
||||
);
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Your todo list has changed');
|
||||
expect(result.llmContent).toContain(JSON.stringify(params.todos));
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'todo_list',
|
||||
todos: [
|
||||
@@ -178,7 +183,12 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('success');
|
||||
expect(result.llmContent).toContain(
|
||||
'Todos have been modified successfully',
|
||||
);
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Your todo list has changed');
|
||||
expect(result.llmContent).toContain(JSON.stringify(params.todos));
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'todo_list',
|
||||
todos: [
|
||||
@@ -208,7 +218,10 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('"success":false');
|
||||
expect(result.llmContent).toContain('Failed to modify todos');
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Todo list modification failed');
|
||||
expect(result.llmContent).toContain('Write failed');
|
||||
expect(result.returnDisplay).toContain('Error writing todos');
|
||||
});
|
||||
|
||||
@@ -223,7 +236,10 @@ describe('TodoWriteTool', () => {
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(mockAbortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('success');
|
||||
expect(result.llmContent).toContain('Todo list has been cleared');
|
||||
expect(result.llmContent).toContain('<system-reminder>');
|
||||
expect(result.llmContent).toContain('Your todo list is now empty');
|
||||
expect(result.llmContent).toContain('no pending tasks');
|
||||
expect(result.returnDisplay).toEqual({
|
||||
type: 'todo_list',
|
||||
todos: [],
|
||||
|
||||
@@ -340,11 +340,30 @@ class TodoWriteToolInvocation extends BaseToolInvocation<
|
||||
todos: finalTodos,
|
||||
};
|
||||
|
||||
// Create plain string format with system reminder
|
||||
const todosJson = JSON.stringify(finalTodos);
|
||||
let llmContent: string;
|
||||
|
||||
if (finalTodos.length === 0) {
|
||||
// Special message for empty todos
|
||||
llmContent = `Todo list has been cleared.
|
||||
|
||||
<system-reminder>
|
||||
Your todo list is now empty. DO NOT mention this explicitly to the user. You have no pending tasks in your todo list.
|
||||
</system-reminder>`;
|
||||
} else {
|
||||
// Normal message for todos with items
|
||||
llmContent = `Todos have been modified successfully. Ensure that you continue to use the todo list to track your progress. Please proceed with the current tasks if applicable
|
||||
|
||||
<system-reminder>
|
||||
Your todo list has changed. DO NOT mention this explicitly to the user. Here are the latest contents of your todo list:
|
||||
|
||||
${todosJson}. Continue on with the tasks at hand if applicable.
|
||||
</system-reminder>`;
|
||||
}
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: true,
|
||||
todos: finalTodos,
|
||||
}),
|
||||
llmContent,
|
||||
returnDisplay: todoResultDisplay,
|
||||
};
|
||||
} catch (error) {
|
||||
@@ -353,11 +372,16 @@ class TodoWriteToolInvocation extends BaseToolInvocation<
|
||||
console.error(
|
||||
`[TodoWriteTool] Error executing todo_write: ${errorMessage}`,
|
||||
);
|
||||
|
||||
// Create plain string format for error with system reminder
|
||||
const errorLlmContent = `Failed to modify todos. An error occurred during the operation.
|
||||
|
||||
<system-reminder>
|
||||
Todo list modification failed with error: ${errorMessage}. You may need to retry or handle this error appropriately.
|
||||
</system-reminder>`;
|
||||
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: `Failed to write todos. Detail: ${errorMessage}`,
|
||||
}),
|
||||
llmContent: errorLlmContent,
|
||||
returnDisplay: `Error writing todos: ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ export const ToolNames = {
|
||||
WRITE_FILE: 'write_file',
|
||||
READ_FILE: 'read_file',
|
||||
READ_MANY_FILES: 'read_many_files',
|
||||
GREP: 'search_file_content',
|
||||
GREP: 'grep_search',
|
||||
GLOB: 'glob',
|
||||
SHELL: 'run_shell_command',
|
||||
TODO_WRITE: 'todo_write',
|
||||
|
||||
@@ -339,6 +339,7 @@ describe('editor utils', () => {
|
||||
diffCommand.args,
|
||||
{
|
||||
stdio: 'inherit',
|
||||
shell: process.platform === 'win32',
|
||||
},
|
||||
);
|
||||
expect(mockSpawnOn).toHaveBeenCalledWith('close', expect.any(Function));
|
||||
|
||||
@@ -195,6 +195,7 @@ export async function openDiff(
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const childProcess = spawn(diffCommand.command, diffCommand.args, {
|
||||
stdio: 'inherit',
|
||||
shell: process.platform === 'win32',
|
||||
});
|
||||
|
||||
childProcess.on('close', (code) => {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Part } from '@google/genai';
|
||||
import type { Content, Part } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { getFolderStructure } from './getFolderStructure.js';
|
||||
|
||||
@@ -107,3 +107,23 @@ ${directoryContext}
|
||||
|
||||
return initialParts;
|
||||
}
|
||||
|
||||
export async function getInitialChatHistory(
|
||||
config: Config,
|
||||
extraHistory?: Content[],
|
||||
): Promise<Content[]> {
|
||||
const envParts = await getEnvironmentContext(config);
|
||||
const envContextString = envParts.map((part) => part.text || '').join('\n\n');
|
||||
|
||||
return [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: envContextString }],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(extraHistory ?? []),
|
||||
];
|
||||
}
|
||||
|
||||
278
packages/core/src/utils/ripgrepUtils.test.ts
Normal file
278
packages/core/src/utils/ripgrepUtils.test.ts
Normal file
@@ -0,0 +1,278 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
|
||||
import {
|
||||
canUseRipgrep,
|
||||
ensureRipgrepPath,
|
||||
getRipgrepPath,
|
||||
} from './ripgrepUtils.js';
|
||||
import { fileExists } from './fileUtils.js';
|
||||
import path from 'node:path';
|
||||
|
||||
// Mock fileUtils
|
||||
vi.mock('./fileUtils.js', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('./fileUtils.js')>();
|
||||
return {
|
||||
...actual,
|
||||
fileExists: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
describe('ripgrepUtils', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('getRipgrepPath', () => {
|
||||
it('should return path with .exe extension on Windows', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock Windows x64
|
||||
Object.defineProperty(process, 'platform', { value: 'win32' });
|
||||
Object.defineProperty(process, 'arch', { value: 'x64' });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
expect(rgPath).toContain('x64-win32');
|
||||
expect(rgPath).toContain('rg.exe');
|
||||
expect(rgPath).toContain(path.join('vendor', 'ripgrep'));
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should return path without .exe extension on macOS', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock macOS arm64
|
||||
Object.defineProperty(process, 'platform', { value: 'darwin' });
|
||||
Object.defineProperty(process, 'arch', { value: 'arm64' });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
expect(rgPath).toContain('arm64-darwin');
|
||||
expect(rgPath).toContain('rg');
|
||||
expect(rgPath).not.toContain('.exe');
|
||||
expect(rgPath).toContain(path.join('vendor', 'ripgrep'));
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should return path without .exe extension on Linux', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock Linux x64
|
||||
Object.defineProperty(process, 'platform', { value: 'linux' });
|
||||
Object.defineProperty(process, 'arch', { value: 'x64' });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
expect(rgPath).toContain('x64-linux');
|
||||
expect(rgPath).toContain('rg');
|
||||
expect(rgPath).not.toContain('.exe');
|
||||
expect(rgPath).toContain(path.join('vendor', 'ripgrep'));
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should throw error for unsupported platform', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock unsupported platform
|
||||
Object.defineProperty(process, 'platform', { value: 'freebsd' });
|
||||
Object.defineProperty(process, 'arch', { value: 'x64' });
|
||||
|
||||
expect(() => getRipgrepPath()).toThrow('Unsupported platform: freebsd');
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should throw error for unsupported architecture', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock unsupported architecture
|
||||
Object.defineProperty(process, 'platform', { value: 'darwin' });
|
||||
Object.defineProperty(process, 'arch', { value: 'ia32' });
|
||||
|
||||
expect(() => getRipgrepPath()).toThrow('Unsupported architecture: ia32');
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
|
||||
it('should handle all supported platform/arch combinations', () => {
|
||||
const originalPlatform = process.platform;
|
||||
const originalArch = process.arch;
|
||||
|
||||
const combinations: Array<{
|
||||
platform: string;
|
||||
arch: string;
|
||||
}> = [
|
||||
{ platform: 'darwin', arch: 'x64' },
|
||||
{ platform: 'darwin', arch: 'arm64' },
|
||||
{ platform: 'linux', arch: 'x64' },
|
||||
{ platform: 'linux', arch: 'arm64' },
|
||||
{ platform: 'win32', arch: 'x64' },
|
||||
];
|
||||
|
||||
combinations.forEach(({ platform, arch }) => {
|
||||
Object.defineProperty(process, 'platform', { value: platform });
|
||||
Object.defineProperty(process, 'arch', { value: arch });
|
||||
|
||||
const rgPath = getRipgrepPath();
|
||||
const binaryName = platform === 'win32' ? 'rg.exe' : 'rg';
|
||||
const expectedPathSegment = path.join(
|
||||
`${arch}-${platform}`,
|
||||
binaryName,
|
||||
);
|
||||
expect(rgPath).toContain(expectedPathSegment);
|
||||
});
|
||||
|
||||
// Restore original values
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
});
|
||||
|
||||
describe('canUseRipgrep', () => {
|
||||
it('should return true if ripgrep binary exists (builtin)', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
|
||||
const result = await canUseRipgrep(true);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should return true if ripgrep binary exists (default)', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('should fall back to system rg if bundled ripgrep binary does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
// When useBuiltin is true but bundled binary doesn't exist,
|
||||
// it should fall back to checking system rg (which will spawn a process)
|
||||
// In this test environment, system rg is likely available, so result should be true
|
||||
// unless spawn fails
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
// The test may pass or fail depending on system rg availability
|
||||
// Just verify that fileExists was called to check bundled binary first
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
// Result depends on whether system rg is installed
|
||||
expect(typeof result).toBe('boolean');
|
||||
});
|
||||
|
||||
// Note: Tests for system ripgrep detection (useBuiltin=false) would require mocking
|
||||
// the child_process spawn function, which is complex in ESM. These cases are tested
|
||||
// indirectly through integration tests.
|
||||
|
||||
it('should return false if platform is unsupported', async () => {
|
||||
const originalPlatform = process.platform;
|
||||
|
||||
// Mock unsupported platform
|
||||
Object.defineProperty(process, 'platform', { value: 'aix' });
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).not.toHaveBeenCalled();
|
||||
|
||||
// Restore original value
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
});
|
||||
|
||||
it('should return false if architecture is unsupported', async () => {
|
||||
const originalArch = process.arch;
|
||||
|
||||
// Mock unsupported architecture
|
||||
Object.defineProperty(process, 'arch', { value: 's390x' });
|
||||
|
||||
const result = await canUseRipgrep();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(fileExists).not.toHaveBeenCalled();
|
||||
|
||||
// Restore original value
|
||||
Object.defineProperty(process, 'arch', { value: originalArch });
|
||||
});
|
||||
});
|
||||
|
||||
describe('ensureRipgrepBinary', () => {
|
||||
it('should return ripgrep path if binary exists', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(true);
|
||||
|
||||
const rgPath = await ensureRipgrepPath();
|
||||
|
||||
expect(rgPath).toBeDefined();
|
||||
expect(rgPath).toContain('rg');
|
||||
expect(fileExists).toHaveBeenCalledOnce();
|
||||
expect(fileExists).toHaveBeenCalledWith(rgPath);
|
||||
});
|
||||
|
||||
it('should throw error if binary does not exist', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(
|
||||
/Ripgrep binary not found/,
|
||||
);
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(/Platform:/);
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(/Architecture:/);
|
||||
|
||||
expect(fileExists).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should throw error with correct path information', async () => {
|
||||
(fileExists as Mock).mockResolvedValue(false);
|
||||
|
||||
try {
|
||||
await ensureRipgrepPath();
|
||||
// Should not reach here
|
||||
expect(true).toBe(false);
|
||||
} catch (error) {
|
||||
expect(error).toBeInstanceOf(Error);
|
||||
const errorMessage = (error as Error).message;
|
||||
expect(errorMessage).toContain('Ripgrep binary not found at');
|
||||
expect(errorMessage).toContain(process.platform);
|
||||
expect(errorMessage).toContain(process.arch);
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw error if platform is unsupported', async () => {
|
||||
const originalPlatform = process.platform;
|
||||
|
||||
// Mock unsupported platform
|
||||
Object.defineProperty(process, 'platform', { value: 'openbsd' });
|
||||
|
||||
await expect(ensureRipgrepPath()).rejects.toThrow(
|
||||
'Unsupported platform: openbsd',
|
||||
);
|
||||
|
||||
// Restore original value
|
||||
Object.defineProperty(process, 'platform', { value: originalPlatform });
|
||||
});
|
||||
});
|
||||
});
|
||||
132
packages/core/src/utils/ripgrepUtils.ts
Normal file
132
packages/core/src/utils/ripgrepUtils.ts
Normal file
@@ -0,0 +1,132 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { fileExists } from './fileUtils.js';
|
||||
|
||||
// Get the directory of the current module
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
type Platform = 'darwin' | 'linux' | 'win32';
|
||||
type Architecture = 'x64' | 'arm64';
|
||||
|
||||
/**
|
||||
* Maps process.platform values to vendor directory names
|
||||
*/
|
||||
function getPlatformString(platform: string): Platform {
|
||||
switch (platform) {
|
||||
case 'darwin':
|
||||
case 'linux':
|
||||
case 'win32':
|
||||
return platform;
|
||||
default:
|
||||
throw new Error(`Unsupported platform: ${platform}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps process.arch values to vendor directory names
|
||||
*/
|
||||
function getArchitectureString(arch: string): Architecture {
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
case 'arm64':
|
||||
return arch;
|
||||
default:
|
||||
throw new Error(`Unsupported architecture: ${arch}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the path to the bundled ripgrep binary for the current platform
|
||||
*/
|
||||
export function getRipgrepPath(): string {
|
||||
const platform = getPlatformString(process.platform);
|
||||
const arch = getArchitectureString(process.arch);
|
||||
|
||||
// Binary name includes .exe on Windows
|
||||
const binaryName = platform === 'win32' ? 'rg.exe' : 'rg';
|
||||
|
||||
// Path resolution:
|
||||
// When running from transpiled code: dist/src/utils/ripgrepUtils.js -> ../../../vendor/ripgrep/
|
||||
// When running from bundle: dist/index.js -> vendor/ripgrep/
|
||||
|
||||
// Detect if we're running from a bundle (single file)
|
||||
// In bundle, __filename will be something like /path/to/dist/index.js
|
||||
// In transpiled code, __filename will be /path/to/dist/src/utils/ripgrepUtils.js
|
||||
const isBundled = !__filename.includes(path.join('src', 'utils'));
|
||||
|
||||
const vendorPath = isBundled
|
||||
? path.join(
|
||||
__dirname,
|
||||
'vendor',
|
||||
'ripgrep',
|
||||
`${arch}-${platform}`,
|
||||
binaryName,
|
||||
)
|
||||
: path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'..',
|
||||
'..',
|
||||
'vendor',
|
||||
'ripgrep',
|
||||
`${arch}-${platform}`,
|
||||
binaryName,
|
||||
);
|
||||
|
||||
return vendorPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if ripgrep binary is available
|
||||
* @param useBuiltin If true, tries bundled ripgrep first, then falls back to system ripgrep.
|
||||
* If false, only checks for system ripgrep.
|
||||
*/
|
||||
export async function canUseRipgrep(
|
||||
useBuiltin: boolean = true,
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
if (useBuiltin) {
|
||||
// Try bundled ripgrep first
|
||||
const rgPath = getRipgrepPath();
|
||||
if (await fileExists(rgPath)) {
|
||||
return true;
|
||||
}
|
||||
// Fallback to system rg if bundled binary is not available
|
||||
}
|
||||
|
||||
// Check for system ripgrep by trying to spawn 'rg --version'
|
||||
const { spawn } = await import('node:child_process');
|
||||
return await new Promise<boolean>((resolve) => {
|
||||
const proc = spawn('rg', ['--version']);
|
||||
proc.on('error', () => resolve(false));
|
||||
proc.on('exit', (code) => resolve(code === 0));
|
||||
});
|
||||
} catch (_error) {
|
||||
// Unsupported platform/arch or other error
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures ripgrep binary exists and returns its path
|
||||
* @throws Error if ripgrep binary is not available
|
||||
*/
|
||||
export async function ensureRipgrepPath(): Promise<string> {
|
||||
const rgPath = getRipgrepPath();
|
||||
|
||||
if (!(await fileExists(rgPath))) {
|
||||
throw new Error(
|
||||
`Ripgrep binary not found at ${rgPath}. ` +
|
||||
`Platform: ${process.platform}, Architecture: ${process.arch}`,
|
||||
);
|
||||
}
|
||||
|
||||
return rgPath;
|
||||
}
|
||||
3
packages/core/vendor/ripgrep/COPYING
vendored
Normal file
3
packages/core/vendor/ripgrep/COPYING
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
This project is dual-licensed under the Unlicense and MIT licenses.
|
||||
|
||||
You may use this code under the terms of either license.
|
||||
BIN
packages/core/vendor/ripgrep/arm64-darwin/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/arm64-darwin/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/arm64-linux/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/arm64-linux/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/x64-darwin/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/x64-darwin/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/x64-linux/rg
vendored
Executable file
BIN
packages/core/vendor/ripgrep/x64-linux/rg
vendored
Executable file
Binary file not shown.
BIN
packages/core/vendor/ripgrep/x64-win32/rg.exe
vendored
Normal file
BIN
packages/core/vendor/ripgrep/x64-win32/rg.exe
vendored
Normal file
Binary file not shown.
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.0.14",
|
||||
"version": "0.1.2",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
@@ -17,24 +17,74 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { copyFileSync, existsSync, mkdirSync } from 'node:fs';
|
||||
import { copyFileSync, existsSync, mkdirSync, statSync } from 'node:fs';
|
||||
import { dirname, join, basename } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { glob } from 'glob';
|
||||
import fs from 'node:fs';
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const root = join(__dirname, '..');
|
||||
const bundleDir = join(root, 'bundle');
|
||||
const distDir = join(root, 'dist');
|
||||
const coreVendorDir = join(root, 'packages', 'core', 'vendor');
|
||||
|
||||
// Create the bundle directory if it doesn't exist
|
||||
if (!existsSync(bundleDir)) {
|
||||
mkdirSync(bundleDir);
|
||||
// Create the dist directory if it doesn't exist
|
||||
if (!existsSync(distDir)) {
|
||||
mkdirSync(distDir);
|
||||
}
|
||||
|
||||
// Find and copy all .sb files from packages to the root of the bundle directory
|
||||
// Find and copy all .sb files from packages to the root of the dist directory
|
||||
const sbFiles = glob.sync('packages/**/*.sb', { cwd: root });
|
||||
for (const file of sbFiles) {
|
||||
copyFileSync(join(root, file), join(bundleDir, basename(file)));
|
||||
copyFileSync(join(root, file), join(distDir, basename(file)));
|
||||
}
|
||||
|
||||
console.log('Assets copied to bundle/');
|
||||
console.log('Copied sandbox profiles to dist/');
|
||||
|
||||
// Copy vendor directory (contains ripgrep binaries)
|
||||
console.log('Copying vendor directory...');
|
||||
if (existsSync(coreVendorDir)) {
|
||||
const destVendorDir = join(distDir, 'vendor');
|
||||
copyRecursiveSync(coreVendorDir, destVendorDir);
|
||||
console.log('Copied vendor directory to dist/');
|
||||
} else {
|
||||
console.warn(`Warning: Vendor directory not found at ${coreVendorDir}`);
|
||||
}
|
||||
|
||||
console.log('\n✅ All bundle assets copied to dist/');
|
||||
|
||||
/**
|
||||
* Recursively copy directory
|
||||
*/
|
||||
function copyRecursiveSync(src, dest) {
|
||||
if (!existsSync(src)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const stats = statSync(src);
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
if (!existsSync(dest)) {
|
||||
mkdirSync(dest, { recursive: true });
|
||||
}
|
||||
|
||||
const entries = fs.readdirSync(src);
|
||||
for (const entry of entries) {
|
||||
// Skip .DS_Store files
|
||||
if (entry === '.DS_Store') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const srcPath = join(src, entry);
|
||||
const destPath = join(dest, entry);
|
||||
copyRecursiveSync(srcPath, destPath);
|
||||
}
|
||||
} else {
|
||||
copyFileSync(src, dest);
|
||||
// Preserve execute permissions for binaries
|
||||
const srcStats = statSync(src);
|
||||
if (srcStats.mode & 0o111) {
|
||||
fs.chmodSync(dest, srcStats.mode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
29
scripts/esbuild-shims.js
Normal file
29
scripts/esbuild-shims.js
Normal file
@@ -0,0 +1,29 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Shims for esbuild ESM bundles to support require() calls
|
||||
* This file is injected into the bundle via esbuild's inject option
|
||||
*/
|
||||
|
||||
import { createRequire } from 'node:module';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { dirname } from 'node:path';
|
||||
|
||||
// Create require function for the current module and make it global
|
||||
const _require = createRequire(import.meta.url);
|
||||
|
||||
// Make require available globally for dynamic requires
|
||||
if (typeof globalThis.require === 'undefined') {
|
||||
globalThis.require = _require;
|
||||
}
|
||||
|
||||
// Export for esbuild injection
|
||||
export const require = _require;
|
||||
|
||||
// Setup __filename and __dirname for compatibility
|
||||
export const __filename = fileURLToPath(import.meta.url);
|
||||
export const __dirname = dirname(__filename);
|
||||
@@ -1,213 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { execSync } from 'child_process';
|
||||
|
||||
/**
|
||||
* Determines the correct previous tag for release notes generation.
|
||||
* This function handles the complexity of mixed tag types (regular releases vs nightly releases).
|
||||
*
|
||||
* @param {string} currentTag - The current release tag (e.g., "v0.1.23")
|
||||
* @returns {string|null} - The previous tag to compare against, or null if no suitable tag found
|
||||
*/
|
||||
export function getPreviousTag(currentTag) {
|
||||
try {
|
||||
// Parse the current tag to understand its type
|
||||
const currentTagInfo = parseTag(currentTag);
|
||||
if (!currentTagInfo) {
|
||||
console.error(`Invalid current tag format: ${currentTag}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Find the appropriate previous tag based on the current tag type
|
||||
let previousTag = null;
|
||||
|
||||
if (currentTagInfo.isNightly) {
|
||||
// For nightly releases, find the last stable release
|
||||
previousTag = findLastStableTag(currentTagInfo);
|
||||
} else {
|
||||
// For stable releases, find the previous stable release
|
||||
previousTag = findPreviousStableTag(currentTagInfo);
|
||||
}
|
||||
|
||||
return previousTag;
|
||||
} catch (error) {
|
||||
console.error('Error getting previous tag:', error.message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a tag string to extract version information and type
|
||||
*/
|
||||
function parseTag(tag) {
|
||||
// Remove 'v' prefix if present
|
||||
const cleanTag = tag.startsWith('v') ? tag.substring(1) : tag;
|
||||
|
||||
// Match pattern: X.Y.Z or X.Y.Z-prerelease
|
||||
const match = cleanTag.match(/^(\d+)\.(\d+)\.(\d+)(?:-(.+))?$/);
|
||||
if (!match) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const [, major, minor, patch, prerelease] = match;
|
||||
|
||||
return {
|
||||
original: tag,
|
||||
major: parseInt(major),
|
||||
minor: parseInt(minor),
|
||||
patch: parseInt(patch),
|
||||
prerelease: prerelease || null,
|
||||
isNightly: prerelease && prerelease.startsWith('nightly'),
|
||||
isPreview: prerelease && prerelease.startsWith('preview'),
|
||||
version: `${major}.${minor}.${patch}`,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the last stable tag for a nightly release
|
||||
* Assumes version numbers are incremental and checks backwards from current version
|
||||
*/
|
||||
function findLastStableTag(currentTagInfo) {
|
||||
// For nightly releases, find the stable version of the same version number first
|
||||
const baseVersion = `v${currentTagInfo.version}`;
|
||||
|
||||
// Check if the stable version of the current version exists
|
||||
if (tagExists(baseVersion)) {
|
||||
return baseVersion;
|
||||
}
|
||||
|
||||
// If not, look for the previous stable versions by decrementing version numbers
|
||||
let { major, minor, patch } = currentTagInfo;
|
||||
|
||||
// Try decrementing patch version first
|
||||
while (patch > 0) {
|
||||
patch--;
|
||||
const candidateTag = `v${major}.${minor}.${patch}`;
|
||||
if (tagExists(candidateTag)) {
|
||||
return candidateTag;
|
||||
}
|
||||
}
|
||||
|
||||
// Try decrementing minor version
|
||||
while (minor > 0) {
|
||||
minor--;
|
||||
patch = 999; // Start from a high patch number and work backwards
|
||||
while (patch >= 0) {
|
||||
const candidateTag = `v${major}.${minor}.${patch}`;
|
||||
if (tagExists(candidateTag)) {
|
||||
return candidateTag;
|
||||
}
|
||||
patch--;
|
||||
// Don't check too many patch versions to avoid infinite loops
|
||||
if (patch < 0) break;
|
||||
}
|
||||
}
|
||||
|
||||
// Try decrementing major version
|
||||
while (major > 0) {
|
||||
major--;
|
||||
minor = 999; // Start from a high minor number and work backwards
|
||||
while (minor >= 0) {
|
||||
patch = 999;
|
||||
while (patch >= 0) {
|
||||
const candidateTag = `v${major}.${minor}.${patch}`;
|
||||
if (tagExists(candidateTag)) {
|
||||
return candidateTag;
|
||||
}
|
||||
patch--;
|
||||
if (patch < 0) break;
|
||||
}
|
||||
minor--;
|
||||
if (minor < 0) break;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the previous stable tag for a stable release
|
||||
* Assumes version numbers are incremental and checks backwards from current version
|
||||
*/
|
||||
function findPreviousStableTag(currentTagInfo) {
|
||||
let { major, minor, patch } = currentTagInfo;
|
||||
|
||||
// Try decrementing patch version first
|
||||
while (patch > 0) {
|
||||
patch--;
|
||||
const candidateTag = `v${major}.${minor}.${patch}`;
|
||||
if (tagExists(candidateTag)) {
|
||||
return candidateTag;
|
||||
}
|
||||
}
|
||||
|
||||
// Try decrementing minor version
|
||||
while (minor > 0) {
|
||||
minor--;
|
||||
patch = 999; // Start from a high patch number and work backwards
|
||||
while (patch >= 0) {
|
||||
const candidateTag = `v${major}.${minor}.${patch}`;
|
||||
if (tagExists(candidateTag)) {
|
||||
return candidateTag;
|
||||
}
|
||||
patch--;
|
||||
// Don't check too many patch versions to avoid infinite loops
|
||||
if (patch < 0) break;
|
||||
}
|
||||
}
|
||||
|
||||
// Try decrementing major version
|
||||
while (major > 0) {
|
||||
major--;
|
||||
minor = 999; // Start from a high minor number and work backwards
|
||||
while (minor >= 0) {
|
||||
patch = 999;
|
||||
while (patch >= 0) {
|
||||
const candidateTag = `v${major}.${minor}.${patch}`;
|
||||
if (tagExists(candidateTag)) {
|
||||
return candidateTag;
|
||||
}
|
||||
patch--;
|
||||
if (patch < 0) break;
|
||||
}
|
||||
minor--;
|
||||
if (minor < 0) break;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a git tag exists
|
||||
*/
|
||||
function tagExists(tag) {
|
||||
try {
|
||||
execSync(`git rev-parse --verify ${tag}`, { stdio: 'ignore' });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// CLI usage
|
||||
if (process.argv[1] === new URL(import.meta.url).pathname) {
|
||||
const currentTag = process.argv[2];
|
||||
|
||||
if (!currentTag) {
|
||||
console.error('Usage: node get-previous-tag.js <current-tag>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const previousTag = getPreviousTag(currentTag);
|
||||
if (previousTag) {
|
||||
console.log(previousTag);
|
||||
} else {
|
||||
console.error('No suitable previous tag found');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
@@ -26,36 +26,8 @@ function getArgs() {
|
||||
return args;
|
||||
}
|
||||
|
||||
function getLatestTag(pattern) {
|
||||
const command = `git tag -l '${pattern}'`;
|
||||
try {
|
||||
const tags = execSync(command)
|
||||
.toString()
|
||||
.trim()
|
||||
.split('\n')
|
||||
.filter(Boolean);
|
||||
if (tags.length === 0) return '';
|
||||
|
||||
// Convert tags to versions (remove 'v' prefix) and sort by semver
|
||||
const versions = tags
|
||||
.map((tag) => tag.replace(/^v/, ''))
|
||||
.filter((version) => semver.valid(version))
|
||||
.sort((a, b) => semver.rcompare(a, b)); // rcompare for descending order
|
||||
|
||||
if (versions.length === 0) return '';
|
||||
|
||||
// Return the latest version with 'v' prefix restored
|
||||
return `v${versions[0]}`;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Failed to get latest git tag for pattern "${pattern}": ${error.message}`,
|
||||
);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
function getVersionFromNPM(distTag) {
|
||||
const command = `npm view @google/gemini-cli version --tag=${distTag}`;
|
||||
const command = `npm view @qwen-code/qwen-code version --tag=${distTag}`;
|
||||
try {
|
||||
return execSync(command).toString().trim();
|
||||
} catch (error) {
|
||||
@@ -67,7 +39,7 @@ function getVersionFromNPM(distTag) {
|
||||
}
|
||||
|
||||
function getAllVersionsFromNPM() {
|
||||
const command = `npm view @google/gemini-cli versions --json`;
|
||||
const command = `npm view @qwen-code/qwen-code versions --json`;
|
||||
try {
|
||||
const versionsJson = execSync(command).toString().trim();
|
||||
return JSON.parse(versionsJson);
|
||||
@@ -78,7 +50,7 @@ function getAllVersionsFromNPM() {
|
||||
}
|
||||
|
||||
function isVersionDeprecated(version) {
|
||||
const command = `npm view @google/gemini-cli@${version} deprecated`;
|
||||
const command = `npm view @qwen-code/qwen-code@${version} deprecated`;
|
||||
try {
|
||||
const output = execSync(command).toString().trim();
|
||||
return output.length > 0;
|
||||
@@ -159,7 +131,7 @@ function detectRollbackAndGetBaseline(npmDistTag) {
|
||||
function doesVersionExist(version) {
|
||||
// Check NPM
|
||||
try {
|
||||
const command = `npm view @google/gemini-cli@${version} version 2>/dev/null`;
|
||||
const command = `npm view @qwen-code/qwen-code@${version} version 2>/dev/null`;
|
||||
const output = execSync(command).toString().trim();
|
||||
if (output === version) {
|
||||
console.error(`Version ${version} already exists on NPM.`);
|
||||
@@ -229,11 +201,20 @@ function getAndVerifyTags(npmDistTag, _gitTagPattern) {
|
||||
};
|
||||
}
|
||||
|
||||
function getLatestStableReleaseTag() {
|
||||
try {
|
||||
const { latestTag } = getAndVerifyTags('latest', 'v[0-9].[0-9].[0-9]');
|
||||
return latestTag;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Failed to determine latest stable release tag: ${error.message}`,
|
||||
);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
function promoteNightlyVersion() {
|
||||
const { latestVersion, latestTag } = getAndVerifyTags(
|
||||
'nightly',
|
||||
'v*-nightly*',
|
||||
);
|
||||
const { latestVersion } = getAndVerifyTags('nightly', 'v*-nightly*');
|
||||
const baseVersion = latestVersion.split('-')[0];
|
||||
const versionParts = baseVersion.split('.');
|
||||
const major = versionParts[0];
|
||||
@@ -244,7 +225,6 @@ function promoteNightlyVersion() {
|
||||
return {
|
||||
releaseVersion: `${major}.${nextMinor}.0-nightly.${date}.${gitShortHash}`,
|
||||
npmTag: 'nightly',
|
||||
previousReleaseTag: latestTag,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -254,12 +234,9 @@ function getNightlyVersion() {
|
||||
const date = new Date().toISOString().slice(0, 10).replace(/-/g, '');
|
||||
const gitShortHash = execSync('git rev-parse --short HEAD').toString().trim();
|
||||
const releaseVersion = `${baseVersion}-nightly.${date}.${gitShortHash}`;
|
||||
const previousReleaseTag = getLatestTag('v*-nightly*');
|
||||
|
||||
return {
|
||||
releaseVersion,
|
||||
npmTag: 'nightly',
|
||||
previousReleaseTag,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -290,15 +267,9 @@ function getStableVersion(args) {
|
||||
releaseVersion = latestPreviewVersion.replace(/-preview.*/, '');
|
||||
}
|
||||
|
||||
const { latestTag: previousStableTag } = getAndVerifyTags(
|
||||
'latest',
|
||||
'v[0-9].[0-9].[0-9]',
|
||||
);
|
||||
|
||||
return {
|
||||
releaseVersion,
|
||||
npmTag: 'latest',
|
||||
previousReleaseTag: previousStableTag,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -321,15 +292,9 @@ function getPreviewVersion(args) {
|
||||
latestNightlyVersion.replace(/-nightly.*/, '') + '-preview.0';
|
||||
}
|
||||
|
||||
const { latestTag: previousPreviewTag } = getAndVerifyTags(
|
||||
'preview',
|
||||
'v*-preview*',
|
||||
);
|
||||
|
||||
return {
|
||||
releaseVersion,
|
||||
npmTag: 'preview',
|
||||
previousReleaseTag: previousPreviewTag,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -341,7 +306,7 @@ function getPatchVersion(patchFrom) {
|
||||
}
|
||||
const distTag = patchFrom === 'stable' ? 'latest' : 'preview';
|
||||
const pattern = distTag === 'latest' ? 'v[0-9].[0-9].[0-9]' : 'v*-preview*';
|
||||
const { latestVersion, latestTag } = getAndVerifyTags(distTag, pattern);
|
||||
const { latestVersion } = getAndVerifyTags(distTag, pattern);
|
||||
|
||||
if (patchFrom === 'stable') {
|
||||
// For stable versions, increment the patch number: 0.5.4 -> 0.5.5
|
||||
@@ -353,7 +318,6 @@ function getPatchVersion(patchFrom) {
|
||||
return {
|
||||
releaseVersion,
|
||||
npmTag: distTag,
|
||||
previousReleaseTag: latestTag,
|
||||
};
|
||||
} else {
|
||||
// For preview versions, increment the preview number: 0.6.0-preview.2 -> 0.6.0-preview.3
|
||||
@@ -373,7 +337,6 @@ function getPatchVersion(patchFrom) {
|
||||
return {
|
||||
releaseVersion,
|
||||
npmTag: distTag,
|
||||
previousReleaseTag: latestTag,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -438,6 +401,8 @@ export function getVersion(options = {}) {
|
||||
...versionData,
|
||||
};
|
||||
|
||||
result.previousReleaseTag = getLatestStableReleaseTag();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,51 +1,110 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Prepares the bundled CLI package for npm publishing
|
||||
* This script adds publishing metadata (package.json, README, LICENSE) to dist/
|
||||
* All runtime assets (cli.js, vendor/, *.sb) are already in dist/ from the bundle step
|
||||
*/
|
||||
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { execSync } from 'node:child_process';
|
||||
|
||||
// ES module equivalent of __dirname
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
const rootDir = path.resolve(__dirname, '..');
|
||||
|
||||
function copyFiles(packageName, filesToCopy) {
|
||||
const packageDir = path.resolve(rootDir, 'packages', packageName);
|
||||
if (!fs.existsSync(packageDir)) {
|
||||
console.error(`Error: Package directory not found at ${packageDir}`);
|
||||
process.exit(1);
|
||||
}
|
||||
const distDir = path.join(rootDir, 'dist');
|
||||
const cliBundlePath = path.join(distDir, 'cli.js');
|
||||
const vendorDir = path.join(distDir, 'vendor');
|
||||
|
||||
console.log(`Preparing package: ${packageName}`);
|
||||
for (const [source, dest] of Object.entries(filesToCopy)) {
|
||||
const sourcePath = path.resolve(rootDir, source);
|
||||
const destPath = path.resolve(packageDir, dest);
|
||||
try {
|
||||
fs.copyFileSync(sourcePath, destPath);
|
||||
console.log(`Copied ${source} to packages/${packageName}/`);
|
||||
} catch (err) {
|
||||
console.error(`Error copying ${source}:`, err);
|
||||
process.exit(1);
|
||||
}
|
||||
// Verify dist directory and bundle exist
|
||||
if (!fs.existsSync(distDir)) {
|
||||
console.error('Error: dist/ directory not found');
|
||||
console.error('Please run "npm run bundle" first');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!fs.existsSync(cliBundlePath)) {
|
||||
console.error(`Error: Bundle not found at ${cliBundlePath}`);
|
||||
console.error('Please run "npm run bundle" first');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!fs.existsSync(vendorDir)) {
|
||||
console.error(`Error: Vendor directory not found at ${vendorDir}`);
|
||||
console.error('Please run "npm run bundle" first');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Copy README and LICENSE
|
||||
console.log('Copying documentation files...');
|
||||
const filesToCopy = ['README.md', 'LICENSE'];
|
||||
for (const file of filesToCopy) {
|
||||
const sourcePath = path.join(rootDir, file);
|
||||
const destPath = path.join(distDir, file);
|
||||
if (fs.existsSync(sourcePath)) {
|
||||
fs.copyFileSync(sourcePath, destPath);
|
||||
console.log(`Copied ${file}`);
|
||||
} else {
|
||||
console.warn(`Warning: ${file} not found at ${sourcePath}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare 'core' package
|
||||
copyFiles('core', {
|
||||
'README.md': 'README.md',
|
||||
LICENSE: 'LICENSE',
|
||||
'.npmrc': '.npmrc',
|
||||
});
|
||||
// Copy package.json from root and modify it for publishing
|
||||
console.log('Creating package.json for distribution...');
|
||||
const rootPackageJson = JSON.parse(
|
||||
fs.readFileSync(path.join(rootDir, 'package.json'), 'utf-8'),
|
||||
);
|
||||
const corePackageJson = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(rootDir, 'packages', 'core', 'package.json'),
|
||||
'utf-8',
|
||||
),
|
||||
);
|
||||
|
||||
// Prepare 'cli' package
|
||||
copyFiles('cli', {
|
||||
'README.md': 'README.md',
|
||||
LICENSE: 'LICENSE',
|
||||
});
|
||||
const runtimeDependencies = {};
|
||||
if (corePackageJson.dependencies?.tiktoken) {
|
||||
runtimeDependencies.tiktoken = corePackageJson.dependencies.tiktoken;
|
||||
}
|
||||
|
||||
console.log('Successfully prepared all packages.');
|
||||
// Create a clean package.json for the published package
|
||||
const distPackageJson = {
|
||||
name: rootPackageJson.name,
|
||||
version: rootPackageJson.version,
|
||||
description:
|
||||
rootPackageJson.description || 'Qwen Code - AI-powered coding assistant',
|
||||
repository: rootPackageJson.repository,
|
||||
type: 'module',
|
||||
main: 'cli.js',
|
||||
bin: {
|
||||
qwen: 'cli.js',
|
||||
},
|
||||
files: ['cli.js', 'vendor', 'README.md', 'LICENSE'],
|
||||
config: rootPackageJson.config,
|
||||
dependencies: runtimeDependencies,
|
||||
optionalDependencies: {
|
||||
'@lydell/node-pty': '1.1.0',
|
||||
'@lydell/node-pty-darwin-arm64': '1.1.0',
|
||||
'@lydell/node-pty-darwin-x64': '1.1.0',
|
||||
'@lydell/node-pty-linux-x64': '1.1.0',
|
||||
'@lydell/node-pty-win32-arm64': '1.1.0',
|
||||
'@lydell/node-pty-win32-x64': '1.1.0',
|
||||
'node-pty': '^1.0.0',
|
||||
},
|
||||
engines: rootPackageJson.engines,
|
||||
};
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(distDir, 'package.json'),
|
||||
JSON.stringify(distPackageJson, null, 2) + '\n',
|
||||
);
|
||||
|
||||
console.log('\n✅ Package prepared for publishing at dist/');
|
||||
console.log('\nPackage structure:');
|
||||
execSync('ls -lh dist/', { stdio: 'inherit', cwd: rootDir });
|
||||
|
||||
@@ -57,7 +57,7 @@ describe('getVersion', () => {
|
||||
// For doesVersionExist checks - default to not found
|
||||
if (
|
||||
command.includes('npm view') &&
|
||||
command.includes('@google/gemini-cli@')
|
||||
command.includes('@qwen-code/qwen-code@')
|
||||
) {
|
||||
throw new Error('NPM version not found');
|
||||
}
|
||||
@@ -83,7 +83,7 @@ describe('getVersion', () => {
|
||||
const result = getVersion({ type: 'preview' });
|
||||
expect(result.releaseVersion).toBe('0.8.0-preview.0');
|
||||
expect(result.npmTag).toBe('preview');
|
||||
expect(result.previousReleaseTag).toBe('v0.7.0-preview.1');
|
||||
expect(result.previousReleaseTag).toBe('v0.6.1');
|
||||
});
|
||||
|
||||
it('should calculate the next nightly version from package.json', () => {
|
||||
@@ -92,7 +92,7 @@ describe('getVersion', () => {
|
||||
// Note: The base version now comes from package.json, not the previous nightly tag.
|
||||
expect(result.releaseVersion).toBe('0.8.0-nightly.20250917.d3bf8a3d');
|
||||
expect(result.npmTag).toBe('nightly');
|
||||
expect(result.previousReleaseTag).toBe('v0.8.0-nightly.20250916.abcdef');
|
||||
expect(result.previousReleaseTag).toBe('v0.6.1');
|
||||
});
|
||||
|
||||
it('should calculate the next patch version for a stable release', () => {
|
||||
@@ -108,7 +108,7 @@ describe('getVersion', () => {
|
||||
const result = getVersion({ type: 'patch', 'patch-from': 'preview' });
|
||||
expect(result.releaseVersion).toBe('0.7.0-preview.2');
|
||||
expect(result.npmTag).toBe('preview');
|
||||
expect(result.previousReleaseTag).toBe('v0.7.0-preview.1');
|
||||
expect(result.previousReleaseTag).toBe('v0.6.1');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -124,7 +124,7 @@ describe('getVersion', () => {
|
||||
// Mock the deprecation check
|
||||
if (
|
||||
command.includes(
|
||||
'npm view @google/gemini-cli@0.9.0-nightly.20250917.deprecated deprecated',
|
||||
'npm view @qwen-code/qwen-code@0.9.0-nightly.20250917.deprecated deprecated',
|
||||
)
|
||||
)
|
||||
return 'This version is deprecated';
|
||||
@@ -162,14 +162,14 @@ describe('getVersion', () => {
|
||||
// The calculated preview 0.8.0-preview.0 already exists on NPM
|
||||
if (
|
||||
command.includes(
|
||||
'npm view @google/gemini-cli@0.8.0-preview.0 version',
|
||||
'npm view @qwen-code/qwen-code@0.8.0-preview.0 version',
|
||||
)
|
||||
)
|
||||
return '0.8.0-preview.0';
|
||||
// The next one is available
|
||||
if (
|
||||
command.includes(
|
||||
'npm view @google/gemini-cli@0.8.0-preview.1 version',
|
||||
'npm view @qwen-code/qwen-code@0.8.0-preview.1 version',
|
||||
)
|
||||
)
|
||||
throw new Error('Not found');
|
||||
|
||||
Reference in New Issue
Block a user