mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-27 12:09:14 +00:00
Compare commits
13 Commits
v0.0.11-ni
...
dev/yolo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
514f292770 | ||
|
|
a0d77f5a44 | ||
|
|
a9a84014e4 | ||
|
|
92af02c494 | ||
|
|
740768dc1b | ||
|
|
49d7947028 | ||
|
|
b01ddf0aed | ||
|
|
67e2e270bd | ||
|
|
adabd96a42 | ||
|
|
4a96646732 | ||
|
|
9ed3f887af | ||
|
|
19590766b9 | ||
|
|
969fc2aff9 |
18
CHANGELOG.md
18
CHANGELOG.md
@@ -1,5 +1,23 @@
|
||||
# Changelog
|
||||
|
||||
## 0.0.11
|
||||
|
||||
- Added subagents feature with file-based configuration system for specialized AI assistants.
|
||||
- Added Welcome Back Dialog with project summary and enhanced quit options.
|
||||
- Fixed performance issues with SharedTokenManager causing 20-minute delays.
|
||||
- Fixed tool calls UI issues and improved user experience.
|
||||
- Fixed credential clearing when switching authentication types.
|
||||
- Enhanced subagent capabilities to use tools requiring user confirmation.
|
||||
- Improved ReadManyFiles tool with shared line limits across files.
|
||||
- Re-implemented tokenLimits class for better compatibility with Qwen and other model types.
|
||||
- Fixed chunk validation to avoid unnecessary retries.
|
||||
- Resolved EditTool naming inconsistency causing agent confusion loops.
|
||||
- Fixed unexpected re-authentication when auth-token is expired.
|
||||
- Added Terminal Bench integration tests.
|
||||
- Updated multilingual documentation links in README.
|
||||
- Fixed various Windows compatibility issues.
|
||||
- Miscellaneous improvements and bug fixes.
|
||||
|
||||
## 0.0.10
|
||||
|
||||
- Synced upstream `gemini-cli` to v0.2.1.
|
||||
|
||||
@@ -4,7 +4,7 @@ Your uninstall method depends on how you ran the CLI. Follow the instructions fo
|
||||
|
||||
## Method 1: Using npx
|
||||
|
||||
npx runs packages from a temporary cache without a permanent installation. To "uninstall" the CLI, you must clear this cache, which will remove gemini-cli and any other packages previously executed with npx.
|
||||
npx runs packages from a temporary cache without a permanent installation. To "uninstall" the CLI, you must clear this cache, which will remove qwen-code and any other packages previously executed with npx.
|
||||
|
||||
The npx cache is a directory named `_npx` inside your main npm cache folder. You can find your npm cache path by running `npm config get cache`.
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Description:** Saves the current conversation history. You must add a `<tag>` for identifying the conversation state.
|
||||
- **Usage:** `/chat save <tag>`
|
||||
- **Details on Checkpoint Location:** The default locations for saved chat checkpoints are:
|
||||
- Linux/macOS: `~/.config/google-generative-ai/checkpoints/`
|
||||
- Windows: `C:\Users\<YourUsername>\AppData\Roaming\google-generative-ai\checkpoints\`
|
||||
- Linux/macOS: `~/.config/qwen-code/checkpoints/`
|
||||
- Windows: `C:\Users\<YourUsername>\AppData\Roaming\qwen-code\checkpoints\`
|
||||
- When you run `/chat list`, the CLI only scans these specific directories to find available checkpoints.
|
||||
- **Note:** These checkpoints are for manually saving and resuming conversation states. For automatic checkpoints created before file modifications, see the [Checkpointing documentation](../checkpointing.md).
|
||||
- **`resume`**
|
||||
|
||||
@@ -23,8 +23,9 @@ Qwen Code uses `settings.json` files for persistent configuration. There are thr
|
||||
- **Project settings file:**
|
||||
- **Location:** `.qwen/settings.json` within your project's root directory.
|
||||
- **Scope:** Applies only when running Qwen Code from that specific project. Project settings override user settings.
|
||||
|
||||
- **System settings file:**
|
||||
- **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable.
|
||||
- **Location:** `/etc/qwen-code/settings.json` (Linux), `C:\ProgramData\qwen-code\settings.json` (Windows) or `/Library/Application Support/QwenCode/settings.json` (macOS). The path can be overridden using the `QWEN_CODE_SYSTEM_SETTINGS_PATH` environment variable.
|
||||
- **Scope:** Applies to all Qwen Code sessions on the system, for all users. System settings override user and project settings. May be useful for system administrators at enterprises to have controls over users' Qwen Code setups.
|
||||
|
||||
**Note on environment variables in settings:** String values within your `settings.json` files can reference environment variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will be automatically resolved when the settings are loaded. For example, if you have an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like this: `"apiKey": "$MY_API_TOKEN"`.
|
||||
@@ -369,35 +370,16 @@ The CLI automatically loads environment variables from an `.env` file. The loadi
|
||||
|
||||
**Environment Variable Exclusion:** Some environment variables (like `DEBUG` and `DEBUG_MODE`) are automatically excluded from project `.env` files by default to prevent interference with the CLI behavior. Variables from `.qwen/.env` files are never excluded. You can customize this behavior using the `excludedProjectEnvVars` setting in your `settings.json` file.
|
||||
|
||||
- **`GEMINI_API_KEY`**:
|
||||
- Your API key for the Gemini API.
|
||||
- **`OPENAI_API_KEY`**:
|
||||
- One of several available [authentication methods](./authentication.md).
|
||||
- Set this in your shell profile (e.g., `~/.bashrc`, `~/.zshrc`) or an `.env` file.
|
||||
- **`GEMINI_MODEL`**:
|
||||
- Specifies the default Gemini model to use.
|
||||
- **`OPENAI_BASE_URL`**:
|
||||
- One of several available [authentication methods](./authentication.md).
|
||||
- Set this in your shell profile (e.g., `~/.bashrc`, `~/.zshrc`) or an `.env` file.
|
||||
- **`OPENAI_MODEL`**:
|
||||
- Specifies the default OPENAI model to use.
|
||||
- Overrides the hardcoded default
|
||||
- Example: `export GEMINI_MODEL="gemini-2.5-flash"`
|
||||
- **`GOOGLE_API_KEY`**:
|
||||
- Your Google Cloud API key.
|
||||
- Required for using Vertex AI in express mode.
|
||||
- Ensure you have the necessary permissions.
|
||||
- Example: `export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"`.
|
||||
- **`GOOGLE_CLOUD_PROJECT`**:
|
||||
- Your Google Cloud Project ID.
|
||||
- Required for using Code Assist or Vertex AI.
|
||||
- If using Vertex AI, ensure you have the necessary permissions in this project.
|
||||
- **Cloud Shell Note:** When running in a Cloud Shell environment, this variable defaults to a special project allocated for Cloud Shell users. If you have `GOOGLE_CLOUD_PROJECT` set in your global environment in Cloud Shell, it will be overridden by this default. To use a different project in Cloud Shell, you must define `GOOGLE_CLOUD_PROJECT` in a `.env` file.
|
||||
- Example: `export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`.
|
||||
- **`GOOGLE_APPLICATION_CREDENTIALS`** (string):
|
||||
- **Description:** The path to your Google Application Credentials JSON file.
|
||||
- **Example:** `export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/credentials.json"`
|
||||
- **`OTLP_GOOGLE_CLOUD_PROJECT`**:
|
||||
- Your Google Cloud Project ID for Telemetry in Google Cloud
|
||||
- Example: `export OTLP_GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`.
|
||||
- **`GOOGLE_CLOUD_LOCATION`**:
|
||||
- Your Google Cloud Project Location (e.g., us-central1).
|
||||
- Required for using Vertex AI in non express mode.
|
||||
- Example: `export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"`.
|
||||
- Example: `export OPENAI_MODEL="qwen3-coder-plus"`
|
||||
- **`GEMINI_SANDBOX`**:
|
||||
- Alternative to the `sandbox` setting in `settings.json`.
|
||||
- Accepts `true`, `false`, `docker`, `podman`, or a custom command string.
|
||||
@@ -427,8 +409,8 @@ The CLI automatically loads environment variables from an `.env` file. The loadi
|
||||
Arguments passed directly when running the CLI can override other configurations for that specific session.
|
||||
|
||||
- **`--model <model_name>`** (**`-m <model_name>`**):
|
||||
- Specifies the Gemini model to use for this session.
|
||||
- Example: `npm start -- --model gemini-1.5-pro-latest`
|
||||
- Specifies the Qwen model to use for this session.
|
||||
- Example: `npm start -- --model qwen3-coder-plus`
|
||||
- **`--prompt <your_prompt>`** (**`-p <your_prompt>`**):
|
||||
- Used to pass a prompt directly to the command. This invokes Qwen Code in a non-interactive mode.
|
||||
- **`--prompt-interactive <your_prompt>`** (**`-i <your_prompt>`**):
|
||||
@@ -495,7 +477,7 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
|
||||
While not strictly configuration for the CLI's _behavior_, context files (defaulting to `QWEN.md` but configurable via the `contextFileName` setting) are crucial for configuring the _instructional context_ (also referred to as "memory"). This powerful feature allows you to give project-specific instructions, coding style guides, or any relevant background information to the AI, making its responses more tailored and accurate to your needs. The CLI includes UI elements, such as an indicator in the footer showing the number of loaded context files, to keep you informed about the active context.
|
||||
|
||||
- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the Gemini model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically.
|
||||
- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the Qwen model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically.
|
||||
|
||||
### Example Context File Content (e.g., `QWEN.md`)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Qwen Code automatically optimizes API costs through token caching when using API
|
||||
|
||||
**Token caching is available for:**
|
||||
|
||||
- API key users (Gemini API key)
|
||||
- API key users (Qwen API key)
|
||||
- Vertex AI users (with project and location setup)
|
||||
|
||||
**Token caching is not available for:**
|
||||
|
||||
@@ -41,7 +41,7 @@ For security and isolation, Qwen Code can be run inside a container. This is the
|
||||
You can run the published sandbox image directly. This is useful for environments where you only have Docker and want to run the CLI.
|
||||
```bash
|
||||
# Run the published sandbox image
|
||||
docker run --rm -it ghcr.io/qwenlm/qwen-code:0.0.10
|
||||
docker run --rm -it ghcr.io/qwenlm/qwen-code:0.0.11
|
||||
```
|
||||
- **Using the `--sandbox` flag:**
|
||||
If you have Qwen Code installed locally (using the standard installation described above), you can instruct it to run inside the sandbox container.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# IDE Integration
|
||||
|
||||
Gemini CLI can integrate with your IDE to provide a more seamless and context-aware experience. This integration allows the CLI to understand your workspace better and enables powerful features like native in-editor diffing.
|
||||
Qwen Code can integrate with your IDE to provide a more seamless and context-aware experience. This integration allows the CLI to understand your workspace better and enables powerful features like native in-editor diffing.
|
||||
|
||||
Currently, the only supported IDE is [Visual Studio Code](https://code.visualstudio.com/) and other editors that support VS Code extensions.
|
||||
|
||||
@@ -11,13 +11,13 @@ Currently, the only supported IDE is [Visual Studio Code](https://code.visualstu
|
||||
- Your active cursor position.
|
||||
- Any text you have selected (up to a 16KB limit; longer selections will be truncated).
|
||||
|
||||
- **Native Diffing:** When Gemini suggests code modifications, you can view the changes directly within your IDE's native diff viewer. This allows you to review, edit, and accept or reject the suggested changes seamlessly.
|
||||
- **Native Diffing:** When Qwen suggests code modifications, you can view the changes directly within your IDE's native diff viewer. This allows you to review, edit, and accept or reject the suggested changes seamlessly.
|
||||
|
||||
- **VS Code Commands:** You can access Gemini CLI features directly from the VS Code Command Palette (`Cmd+Shift+P` or `Ctrl+Shift+P`):
|
||||
- `Gemini CLI: Run`: Starts a new Gemini CLI session in the integrated terminal.
|
||||
- `Gemini CLI: Accept Diff`: Accepts the changes in the active diff editor.
|
||||
- `Gemini CLI: Close Diff Editor`: Rejects the changes and closes the active diff editor.
|
||||
- `Gemini CLI: View Third-Party Notices`: Displays the third-party notices for the extension.
|
||||
- **VS Code Commands:** You can access Qwen Code features directly from the VS Code Command Palette (`Cmd+Shift+P` or `Ctrl+Shift+P`):
|
||||
- `Qwen Code: Run`: Starts a new Qwen Code session in the integrated terminal.
|
||||
- `Qwen Code: Accept Diff`: Accepts the changes in the active diff editor.
|
||||
- `Qwen Code: Close Diff Editor`: Rejects the changes and closes the active diff editor.
|
||||
- `Qwen Code: View Third-Party Notices`: Displays the third-party notices for the extension.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
@@ -25,11 +25,11 @@ There are three ways to set up the IDE integration:
|
||||
|
||||
### 1. Automatic Nudge (Recommended)
|
||||
|
||||
When you run Gemini CLI inside a supported editor, it will automatically detect your environment and prompt you to connect. Answering "Yes" will automatically run the necessary setup, which includes installing the companion extension and enabling the connection.
|
||||
When you run Qwen Code inside a supported editor, it will automatically detect your environment and prompt you to connect. Answering "Yes" will automatically run the necessary setup, which includes installing the companion extension and enabling the connection.
|
||||
|
||||
### 2. Manual Installation from CLI
|
||||
|
||||
If you previously dismissed the prompt or want to install the extension manually, you can run the following command inside Gemini CLI:
|
||||
If you previously dismissed the prompt or want to install the extension manually, you can run the following command inside Qwen Code:
|
||||
|
||||
```
|
||||
/ide install
|
||||
@@ -41,8 +41,8 @@ This will find the correct extension for your IDE and install it.
|
||||
|
||||
You can also install the extension directly from a marketplace.
|
||||
|
||||
- **For Visual Studio Code:** Install from the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=google.gemini-cli-vscode-ide-companion).
|
||||
- **For VS Code Forks:** To support forks of VS Code, the extension is also published on the [Open VSX Registry](https://open-vsx.org/extension/google/gemini-cli-vscode-ide-companion). Follow your editor's instructions for installing extensions from this registry.
|
||||
- **For Visual Studio Code:** Install from the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion).
|
||||
- **For VS Code Forks:** To support forks of VS Code, the extension is also published on the [Open VSX Registry](https://open-vsx.org/extension/qwenlm/qwen-code-vscode-ide-companion). Follow your editor's instructions for installing extensions from this registry.
|
||||
|
||||
After any installation method, it's recommended to open a new terminal window to ensure the integration is activated correctly. Once installed, you can use `/ide enable` to connect.
|
||||
|
||||
@@ -61,7 +61,7 @@ You can control the IDE integration from within the CLI:
|
||||
/ide disable
|
||||
```
|
||||
|
||||
When enabled, Gemini CLI will automatically attempt to connect to the IDE companion extension.
|
||||
When enabled, Qwen Code will automatically attempt to connect to the IDE companion extension.
|
||||
|
||||
### Checking the Status
|
||||
|
||||
@@ -83,14 +83,14 @@ When you ask Gemini to modify a file, it can open a diff view directly in your e
|
||||
|
||||
- Click the **checkmark icon** in the diff editor's title bar.
|
||||
- Save the file (e.g., with `Cmd+S` or `Ctrl+S`).
|
||||
- Open the Command Palette and run **Gemini CLI: Accept Diff**.
|
||||
- Open the Command Palette and run **Qwen Code: Accept Diff**.
|
||||
- Respond with `yes` in the CLI when prompted.
|
||||
|
||||
**To reject a diff**, you can:
|
||||
|
||||
- Click the **'x' icon** in the diff editor's title bar.
|
||||
- Close the diff editor tab.
|
||||
- Open the Command Palette and run **Gemini CLI: Close Diff Editor**.
|
||||
- Open the Command Palette and run **Qwen Code: Close Diff Editor**.
|
||||
- Respond with `no` in the CLI when prompted.
|
||||
|
||||
You can also **modify the suggested changes** directly in the diff view before accepting them.
|
||||
@@ -99,10 +99,10 @@ If you select ‘Yes, allow always’ in the CLI, changes will no longer show up
|
||||
|
||||
## Using with Sandboxing
|
||||
|
||||
If you are using Gemini CLI within a sandbox, please be aware of the following:
|
||||
If you are using Qwen Code within a sandbox, please be aware of the following:
|
||||
|
||||
- **On macOS:** The IDE integration requires network access to communicate with the IDE companion extension. You must use a Seatbelt profile that allows network access.
|
||||
- **In a Docker Container:** If you run Gemini CLI inside a Docker (or Podman) container, the IDE integration can still connect to the VS Code extension running on your host machine. The CLI is configured to automatically find the IDE server on `host.docker.internal`. No special configuration is usually required, but you may need to ensure your Docker networking setup allows connections from the container to the host.
|
||||
- **In a Docker Container:** If you run Qwen Code inside a Docker (or Podman) container, the IDE integration can still connect to the VS Code extension running on your host machine. The CLI is configured to automatically find the IDE server on `host.docker.internal`. No special configuration is usually required, but you may need to ensure your Docker networking setup allows connections from the container to the host.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@@ -111,9 +111,9 @@ If you encounter issues with IDE integration, here are some common error message
|
||||
### Connection Errors
|
||||
|
||||
- **Message:** `🔴 Disconnected: Failed to connect to IDE companion extension for [IDE Name]. Please ensure the extension is running and try restarting your terminal. To install the extension, run /ide install.`
|
||||
- **Cause:** Gemini CLI could not find the necessary environment variables (`GEMINI_CLI_IDE_WORKSPACE_PATH` or `GEMINI_CLI_IDE_SERVER_PORT`) to connect to the IDE. This usually means the IDE companion extension is not running or did not initialize correctly.
|
||||
- **Cause:** Qwen Code could not find the necessary environment variables (`QWEN_CODE_IDE_WORKSPACE_PATH` or `QWEN_CODE_IDE_SERVER_PORT`) to connect to the IDE. This usually means the IDE companion extension is not running or did not initialize correctly.
|
||||
- **Solution:**
|
||||
1. Make sure you have installed the **Gemini CLI Companion** extension in your IDE and that it is enabled.
|
||||
1. Make sure you have installed the **Qwen Code Companion** extension in your IDE and that it is enabled.
|
||||
2. Open a new terminal window in your IDE to ensure it picks up the correct environment.
|
||||
|
||||
- **Message:** `🔴 Disconnected: IDE connection error. The connection was lost unexpectedly. Please try reconnecting by running /ide enable`
|
||||
@@ -122,7 +122,7 @@ If you encounter issues with IDE integration, here are some common error message
|
||||
|
||||
### Configuration Errors
|
||||
|
||||
- **Message:** `🔴 Disconnected: Directory mismatch. Gemini CLI is running in a different location than the open workspace in [IDE Name]. Please run the CLI from the same directory as your project's root folder.`
|
||||
- **Message:** `🔴 Disconnected: Directory mismatch. Qwen Code is running in a different location than the open workspace in [IDE Name]. Please run the CLI from the same directory as your project's root folder.`
|
||||
- **Cause:** The CLI's current working directory is outside the folder or workspace you have open in your IDE.
|
||||
- **Solution:** `cd` into the same directory that is open in your IDE and restart the CLI.
|
||||
|
||||
@@ -132,10 +132,10 @@ If you encounter issues with IDE integration, here are some common error message
|
||||
|
||||
### General Errors
|
||||
|
||||
- **Message:** `IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: [List of IDEs]`
|
||||
- **Cause:** You are running Gemini CLI in a terminal or environment that is not a supported IDE.
|
||||
- **Solution:** Run Gemini CLI from the integrated terminal of a supported IDE, like VS Code.
|
||||
- **Message:** `IDE integration is not supported in your current environment. To use this feature, run Qwen Code in one of these supported IDEs: [List of IDEs]`
|
||||
- **Cause:** You are running Qwen Code in a terminal or environment that is not a supported IDE.
|
||||
- **Solution:** Run Qwen Code from the integrated terminal of a supported IDE, like VS Code.
|
||||
|
||||
- **Message:** `No installer is available for [IDE Name]. Please install the IDE companion manually from its marketplace.`
|
||||
- **Cause:** You ran `/ide install`, but the CLI does not have an automated installer for your specific IDE.
|
||||
- **Solution:** Open your IDE's extension marketplace, search for "Gemini CLI Companion", and install it manually.
|
||||
- **Solution:** Open your IDE's extension marketplace, search for "Qwen Code Companion", and install it manually.
|
||||
|
||||
@@ -148,7 +148,7 @@ This command will do the following:
|
||||
3. Create the package tarballs that would be published to npm.
|
||||
4. Print a summary of the packages that would be published.
|
||||
|
||||
You can then inspect the generated tarballs to ensure that they contain the correct files and that the `package.json` files have been updated correctly. The tarballs will be created in the root of each package's directory (e.g., `packages/cli/google-gemini-cli-0.1.6.tgz`).
|
||||
You can then inspect the generated tarballs to ensure that they contain the correct files and that the `package.json` files have been updated correctly. The tarballs will be created in the root of each package's directory (e.g., `packages/cli/qwen-code-0.1.6.tgz`).
|
||||
|
||||
By performing a dry run, you can be confident that your changes to the packaging process are correct and that the packages will be published successfully.
|
||||
|
||||
@@ -187,7 +187,7 @@ This is the most critical stage where files are moved and transformed into their
|
||||
- File movement: packages/cli/package.json -> (in-memory transformation) -> `bundle`/package.json
|
||||
- Why: The final package.json must be different from the one used in development. Key changes include:
|
||||
- Removing devDependencies.
|
||||
- Removing workspace-specific "dependencies": { "@gemini-cli/core": "workspace:\*" } and ensuring the core code is
|
||||
- Removing workspace-specific "dependencies": { "@qwen-code/core": "workspace:\*" } and ensuring the core code is
|
||||
bundled directly into the final JavaScript file.
|
||||
- Ensuring the bin, main, and files fields point to the correct locations within the final package structure.
|
||||
|
||||
@@ -277,4 +277,4 @@ This tells NPM that any folder inside the `packages` directory is a separate pac
|
||||
|
||||
- **Simplified Dependency Management**: Running `npm install` from the root of the project will install all dependencies for all packages in the workspace and link them together. This means you don't need to run `npm install` in each package's directory.
|
||||
- **Automatic Linking**: Packages within the workspace can depend on each other. When you run `npm install`, NPM will automatically create symlinks between the packages. This means that when you make changes to one package, the changes are immediately available to other packages that depend on it.
|
||||
- **Simplified Script Execution**: You can run scripts in any package from the root of the project using the `--workspace` flag. For example, to run the `build` script in the `cli` package, you can run `npm run build --workspace @google/gemini-cli`.
|
||||
- **Simplified Script Execution**: You can run scripts in any package from the root of the project using the `--workspace` flag. For example, to run the `build` script in the `cli` package, you can run `npm run build --workspace @qwen-code/qwen-code`.
|
||||
|
||||
@@ -192,7 +192,7 @@ Logs are timestamped records of specific events. The following events are logged
|
||||
- `error_type` (if applicable)
|
||||
- `metadata` (if applicable, dictionary of string -> any)
|
||||
|
||||
- `qwen-code.api_request`: This event occurs when making a request to Gemini API.
|
||||
- `qwen-code.api_request`: This event occurs when making a request to Qwen API.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
- `request_text` (if applicable)
|
||||
@@ -206,7 +206,7 @@ Logs are timestamped records of specific events. The following events are logged
|
||||
- `duration_ms`
|
||||
- `auth_type`
|
||||
|
||||
- `qwen-code.api_response`: This event occurs upon receiving a response from Gemini API.
|
||||
- `qwen-code.api_response`: This event occurs upon receiving a response from Qwen API.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
- `status_code`
|
||||
@@ -273,7 +273,7 @@ Metrics are numerical measurements of behavior over time. The following metrics
|
||||
- `user_added_lines` (Int, if applicable): Number of lines added/changed by user in AI proposed changes.
|
||||
- `user_removed_lines` (Int, if applicable): Number of lines removed/changed by user in AI proposed changes.
|
||||
|
||||
- `gemini_cli.chat_compression` (Counter, Int): Counts chat compression operations
|
||||
- `qwen-code.chat_compression` (Counter, Int): Counts chat compression operations
|
||||
- **Attributes**:
|
||||
- `tokens_before`: (Int): Number of tokens in context prior to compression
|
||||
- `tokens_after`: (Int): Number of tokens in context after compression
|
||||
|
||||
@@ -157,7 +157,7 @@ search_file_content(pattern="function", include="*.js", maxResults=10)
|
||||
- If `old_string` is provided, it reads the `file_path` and attempts to find exactly one occurrence of `old_string`.
|
||||
- If one occurrence is found, it replaces it with `new_string`.
|
||||
- **Enhanced Reliability (Multi-Stage Edit Correction):** To significantly improve the success rate of edits, especially when the model-provided `old_string` might not be perfectly precise, the tool incorporates a multi-stage edit correction mechanism.
|
||||
- If the initial `old_string` isn't found or matches multiple locations, the tool can leverage the Gemini model to iteratively refine `old_string` (and potentially `new_string`).
|
||||
- If the initial `old_string` isn't found or matches multiple locations, the tool can leverage the Qwen model to iteratively refine `old_string` (and potentially `new_string`).
|
||||
- This self-correction process attempts to identify the unique segment the model intended to modify, making the `edit` operation more robust even with slightly imperfect initial context.
|
||||
- **Failure conditions:** Despite the correction mechanism, the tool will fail if:
|
||||
- `file_path` is not absolute or is outside the root directory.
|
||||
|
||||
@@ -25,7 +25,7 @@ The discovery process is orchestrated by `discoverMcpTools()`, which:
|
||||
1. **Iterates through configured servers** from your `settings.json` `mcpServers` configuration
|
||||
2. **Establishes connections** using appropriate transport mechanisms (Stdio, SSE, or Streamable HTTP)
|
||||
3. **Fetches tool definitions** from each server using the MCP protocol
|
||||
4. **Sanitizes and validates** tool schemas for compatibility with the Gemini API
|
||||
4. **Sanitizes and validates** tool schemas for compatibility with the Qwen API
|
||||
5. **Registers tools** in the global tool registry with conflict resolution
|
||||
|
||||
### Execution Layer (`mcp-tool.ts`)
|
||||
@@ -333,7 +333,7 @@ Upon successful connection:
|
||||
1. **Tool listing:** The client calls the MCP server's tool listing endpoint
|
||||
2. **Schema validation:** Each tool's function declaration is validated
|
||||
3. **Tool filtering:** Tools are filtered based on `includeTools` and `excludeTools` configuration
|
||||
4. **Name sanitization:** Tool names are cleaned to meet Gemini API requirements:
|
||||
4. **Name sanitization:** Tool names are cleaned to meet Qwen API requirements:
|
||||
- Invalid characters (non-alphanumeric, underscore, dot, hyphen) are replaced with underscores
|
||||
- Names longer than 63 characters are truncated with middle replacement (`___`)
|
||||
|
||||
@@ -468,7 +468,7 @@ Discovery State: COMPLETED
|
||||
|
||||
### Tool Usage
|
||||
|
||||
Once discovered, MCP tools are available to the Gemini model like built-in tools. The model will automatically:
|
||||
Once discovered, MCP tools are available to the Qwen model like built-in tools. The model will automatically:
|
||||
|
||||
1. **Select appropriate tools** based on your requests
|
||||
2. **Present confirmation dialogs** (unless the server is trusted)
|
||||
@@ -566,7 +566,7 @@ The MCP integration tracks several states:
|
||||
|
||||
### Schema Compatibility
|
||||
|
||||
- **Property stripping:** The system automatically removes certain schema properties (`$schema`, `additionalProperties`) for Gemini API compatibility
|
||||
- **Property stripping:** The system automatically removes certain schema properties (`$schema`, `additionalProperties`) for Qwen API compatibility
|
||||
- **Name sanitization:** Tool names are automatically sanitized to meet API requirements
|
||||
- **Conflict resolution:** Tool name conflicts between servers are resolved through automatic prefixing
|
||||
|
||||
@@ -620,7 +620,7 @@ When Qwen Code receives this response, it will:
|
||||
2. Present the image data as a separate `inlineData` part.
|
||||
3. Provide a clean, user-friendly summary in the CLI, indicating that both text and an image were received.
|
||||
|
||||
This enables you to build sophisticated tools that can provide rich, multi-modal context to the Gemini model.
|
||||
This enables you to build sophisticated tools that can provide rich, multi-modal context to the Qwen model.
|
||||
|
||||
## MCP Prompts as Slash Commands
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ save_memory(fact="My preferred programming language is Python.")
|
||||
Store a project-specific detail:
|
||||
|
||||
```
|
||||
save_memory(fact="The project I'm currently working on is called 'gemini-cli'.")
|
||||
save_memory(fact="The project I'm currently working on is called 'qwen-code'.")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
@@ -42,7 +42,7 @@ web_fetch(url="https://arxiv.org/abs/2401.0001", prompt="What are the key findin
|
||||
Analyze GitHub documentation:
|
||||
|
||||
```
|
||||
web_fetch(url="https://github.com/google/gemini-react/blob/main/README.md", prompt="What are the installation steps and main features?")
|
||||
web_fetch(url="https://github.com/QwenLM/Qwen/blob/main/README.md", prompt="What are the installation steps and main features?")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
@@ -9,16 +9,6 @@ This guide provides solutions to common issues and debugging tips, including top
|
||||
|
||||
## Authentication or login errors
|
||||
|
||||
- **Error: `Failed to login. Message: Request contains an invalid argument`**
|
||||
- Users with Google Workspace accounts or Google Cloud accounts
|
||||
associated with their Gmail accounts may not be able to activate the free
|
||||
tier of the Google Code Assist plan.
|
||||
- For Google Cloud accounts, you can work around this by setting
|
||||
`GOOGLE_CLOUD_PROJECT` to your project ID.
|
||||
- Alternatively, you can obtain the Gemini API key from
|
||||
[Google AI Studio](http://aistudio.google.com/app/apikey), which also includes a
|
||||
separate free tier.
|
||||
|
||||
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY` or `unable to get local issuer certificate`**
|
||||
- **Cause:** You may be on a corporate network with a firewall that intercepts and inspects SSL/TLS traffic. This often requires a custom root CA certificate to be trusted by Node.js.
|
||||
- **Solution:** Set the `NODE_EXTRA_CA_CERTS` environment variable to the absolute path of your corporate root CA certificate file.
|
||||
@@ -37,7 +27,7 @@ This guide provides solutions to common issues and debugging tips, including top
|
||||
Refer to [Qwen Code Configuration](./cli/configuration.md) for more details.
|
||||
|
||||
- **Q: Why don't I see cached token counts in my stats output?**
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Google Cloud Vertex AI) but not for OAuth users (such as Google Personal/Enterprise accounts like Google Gmail or Google Workspace, respectively). This is because the Gemini Code Assist API does not support cached content creation. You can still view your total token usage using the `/stats` command.
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Qwen API key or Google Cloud Vertex AI) but not for OAuth users (such as Google Personal/Enterprise accounts like Google Gmail or Google Workspace, respectively). This is because the Qwen Code Assist API does not support cached content creation. You can still view your total token usage using the `/stats` command.
|
||||
|
||||
## Common error messages and solutions
|
||||
|
||||
|
||||
12
package-lock.json
generated
12
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -12512,7 +12512,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -12696,7 +12696,7 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.13.0",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
@@ -12861,7 +12861,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
"typescript": "^5.3.3"
|
||||
@@ -12872,7 +12872,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.10"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node scripts/start.js",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,7 +25,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.10"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
|
||||
@@ -603,7 +603,9 @@ export async function loadCliConfig(
|
||||
interactive,
|
||||
trustedFolder,
|
||||
shouldUseNodePtyShell: settings.shouldUseNodePtyShell,
|
||||
skipStartupContext: settings.skipStartupContext,
|
||||
skipNextSpeakerCheck: settings.skipNextSpeakerCheck,
|
||||
toolOutputCharLimit: settings.toolOutputCharLimit,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -1198,16 +1198,16 @@ describe('Settings Loading and Merging', () => {
|
||||
delete process.env['TEST_PORT'];
|
||||
});
|
||||
|
||||
describe('when GEMINI_CLI_SYSTEM_SETTINGS_PATH is set', () => {
|
||||
describe('when QWEN_CODE_SYSTEM_SETTINGS_PATH is set', () => {
|
||||
const MOCK_ENV_SYSTEM_SETTINGS_PATH = '/mock/env/system/settings.json';
|
||||
|
||||
beforeEach(() => {
|
||||
process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH'] =
|
||||
process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH'] =
|
||||
MOCK_ENV_SYSTEM_SETTINGS_PATH;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
delete process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH'];
|
||||
delete process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH'];
|
||||
});
|
||||
|
||||
it('should load system settings from the path specified in the environment variable', () => {
|
||||
|
||||
@@ -25,8 +25,8 @@ export const USER_SETTINGS_PATH = path.join(USER_SETTINGS_DIR, 'settings.json');
|
||||
export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE'];
|
||||
|
||||
export function getSystemSettingsPath(): string {
|
||||
if (process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH']) {
|
||||
return process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH'];
|
||||
if (process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH']) {
|
||||
return process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH'];
|
||||
}
|
||||
if (platform() === 'darwin') {
|
||||
return '/Library/Application Support/QwenCode/settings.json';
|
||||
|
||||
@@ -577,6 +577,16 @@ export const SETTINGS_SCHEMA = {
|
||||
description: 'The maximum number of tokens allowed in a session.',
|
||||
showInDialog: false,
|
||||
},
|
||||
toolOutputCharLimit: {
|
||||
type: 'number',
|
||||
label: 'Tool Output Character Limit',
|
||||
category: 'General',
|
||||
requiresRestart: false,
|
||||
default: undefined as number | undefined,
|
||||
description:
|
||||
'Max characters for tool outputs (read_file, read_many_files, shell). If set, text content is truncated to this limit.',
|
||||
showInDialog: true,
|
||||
},
|
||||
systemPromptMappings: {
|
||||
type: 'object',
|
||||
label: 'System Prompt Mappings',
|
||||
@@ -595,12 +605,22 @@ export const SETTINGS_SCHEMA = {
|
||||
description: 'The API key for the Tavily API.',
|
||||
showInDialog: false,
|
||||
},
|
||||
skipStartupContext: {
|
||||
type: 'boolean',
|
||||
label: 'Skip Startup Context',
|
||||
category: 'General',
|
||||
requiresRestart: false,
|
||||
default: true,
|
||||
description:
|
||||
'Do not prepend environment/folder structure context or the initial acknowledgment message.',
|
||||
showInDialog: true,
|
||||
},
|
||||
skipNextSpeakerCheck: {
|
||||
type: 'boolean',
|
||||
label: 'Skip Next Speaker Check',
|
||||
category: 'General',
|
||||
requiresRestart: false,
|
||||
default: false,
|
||||
default: true,
|
||||
description: 'Skip the next speaker check.',
|
||||
showInDialog: true,
|
||||
},
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
isTelemetrySdkInitialized,
|
||||
GeminiEventType,
|
||||
parseAndFormatApiError,
|
||||
ApprovalMode,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { Content, Part, FunctionCall } from '@google/genai';
|
||||
|
||||
@@ -39,6 +40,12 @@ export async function runNonInteractive(
|
||||
|
||||
const geminiClient = config.getGeminiClient();
|
||||
|
||||
// In YOLO mode, disable next_speaker check to avoid auto-continue.
|
||||
if (config.getApprovalMode && config.getApprovalMode() === ApprovalMode.YOLO) {
|
||||
(config as unknown as { getSkipNextSpeakerCheck: () => boolean }).getSkipNextSpeakerCheck =
|
||||
() => true;
|
||||
}
|
||||
|
||||
const abortController = new AbortController();
|
||||
let currentMessages: Content[] = [
|
||||
{ role: 'user', parts: [{ text: input }] },
|
||||
|
||||
@@ -41,8 +41,8 @@ export function IdeIntegrationNudge({
|
||||
const { displayName: ideName } = getIdeInfo(ide);
|
||||
// Assume extension is already installed if the env variables are set.
|
||||
const isExtensionPreInstalled =
|
||||
!!process.env['GEMINI_CLI_IDE_SERVER_PORT'] &&
|
||||
!!process.env['GEMINI_CLI_IDE_WORKSPACE_PATH'];
|
||||
!!process.env['QWEN_CODE_IDE_SERVER_PORT'] &&
|
||||
!!process.env['QWEN_CODE_IDE_WORKSPACE_PATH'];
|
||||
|
||||
const OPTIONS: Array<RadioSelectItem<IdeIntegrationNudgeResult>> = [
|
||||
{
|
||||
|
||||
@@ -204,7 +204,7 @@ export const StatsDisplay: React.FC<StatsDisplayProps> = ({
|
||||
<StatRow title="Tool Calls:">
|
||||
<Text>
|
||||
{tools.totalCalls} ({' '}
|
||||
<Text color={theme.status.success}>✔ {tools.totalSuccess}</Text>{' '}
|
||||
<Text color={theme.status.success}>✓ {tools.totalSuccess}</Text>{' '}
|
||||
<Text color={theme.status.error}>✖ {tools.totalFail}</Text> )
|
||||
</Text>
|
||||
</StatRow>
|
||||
|
||||
@@ -7,7 +7,7 @@ exports[`<SessionSummaryDisplay /> > renders the summary display with a title 1`
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: │
|
||||
│ Tool Calls: 0 ( ✔ 0 ✖ 0 ) │
|
||||
│ Tool Calls: 0 ( ✓ 0 ✖ 0 ) │
|
||||
│ Success Rate: 0.0% │
|
||||
│ Code Changes: +42 -15 │
|
||||
│ │
|
||||
|
||||
@@ -7,7 +7,7 @@ exports[`<StatsDisplay /> > Code Changes Display > displays Code Changes when li
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 1 ( ✔ 1 ✖ 0 ) │
|
||||
│ Tool Calls: 1 ( ✓ 1 ✖ 0 ) │
|
||||
│ Success Rate: 100.0% │
|
||||
│ Code Changes: +42 -18 │
|
||||
│ │
|
||||
@@ -28,7 +28,7 @@ exports[`<StatsDisplay /> > Code Changes Display > hides Code Changes when no li
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 1 ( ✔ 1 ✖ 0 ) │
|
||||
│ Tool Calls: 1 ( ✓ 1 ✖ 0 ) │
|
||||
│ Success Rate: 100.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -48,7 +48,7 @@ exports[`<StatsDisplay /> > Conditional Color Tests > renders success rate in gr
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 10 ( ✔ 10 ✖ 0 ) │
|
||||
│ Tool Calls: 10 ( ✓ 10 ✖ 0 ) │
|
||||
│ Success Rate: 100.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -68,7 +68,7 @@ exports[`<StatsDisplay /> > Conditional Color Tests > renders success rate in re
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 10 ( ✔ 5 ✖ 5 ) │
|
||||
│ Tool Calls: 10 ( ✓ 5 ✖ 5 ) │
|
||||
│ Success Rate: 50.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -88,7 +88,7 @@ exports[`<StatsDisplay /> > Conditional Color Tests > renders success rate in ye
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 10 ( ✔ 9 ✖ 1 ) │
|
||||
│ Tool Calls: 10 ( ✓ 9 ✖ 1 ) │
|
||||
│ Success Rate: 90.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -108,7 +108,7 @@ exports[`<StatsDisplay /> > Conditional Rendering Tests > hides Efficiency secti
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 0 ( ✔ 0 ✖ 0 ) │
|
||||
│ Tool Calls: 0 ( ✓ 0 ✖ 0 ) │
|
||||
│ Success Rate: 0.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -132,7 +132,7 @@ exports[`<StatsDisplay /> > Conditional Rendering Tests > hides User Agreement w
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 2 ( ✔ 1 ✖ 1 ) │
|
||||
│ Tool Calls: 2 ( ✓ 1 ✖ 1 ) │
|
||||
│ Success Rate: 50.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -152,7 +152,7 @@ exports[`<StatsDisplay /> > Title Rendering > renders the custom title when a ti
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 0 ( ✔ 0 ✖ 0 ) │
|
||||
│ Tool Calls: 0 ( ✓ 0 ✖ 0 ) │
|
||||
│ Success Rate: 0.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -172,7 +172,7 @@ exports[`<StatsDisplay /> > Title Rendering > renders the default title when no
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 0 ( ✔ 0 ✖ 0 ) │
|
||||
│ Tool Calls: 0 ( ✓ 0 ✖ 0 ) │
|
||||
│ Success Rate: 0.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -192,7 +192,7 @@ exports[`<StatsDisplay /> > renders a table with two models correctly 1`] = `
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 0 ( ✔ 0 ✖ 0 ) │
|
||||
│ Tool Calls: 0 ( ✓ 0 ✖ 0 ) │
|
||||
│ Success Rate: 0.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
@@ -221,7 +221,7 @@ exports[`<StatsDisplay /> > renders all sections when all data is present 1`] =
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 2 ( ✔ 1 ✖ 1 ) │
|
||||
│ Tool Calls: 2 ( ✓ 1 ✖ 1 ) │
|
||||
│ Success Rate: 50.0% │
|
||||
│ User Agreement: 100.0% (1 reviewed) │
|
||||
│ │
|
||||
@@ -250,7 +250,7 @@ exports[`<StatsDisplay /> > renders only the Performance section in its zero sta
|
||||
│ │
|
||||
│ Interaction Summary │
|
||||
│ Session ID: test-session-id │
|
||||
│ Tool Calls: 0 ( ✔ 0 ✖ 0 ) │
|
||||
│ Tool Calls: 0 ( ✓ 0 ✖ 0 ) │
|
||||
│ Success Rate: 0.0% │
|
||||
│ │
|
||||
│ Performance │
|
||||
|
||||
@@ -80,6 +80,7 @@ export const ToolGroupMessage: React.FC<ToolGroupMessageProps> = ({
|
||||
marginLeft={1}
|
||||
borderDimColor={hasPending}
|
||||
borderColor={borderColor}
|
||||
gap={1}
|
||||
>
|
||||
{toolCalls.map((tool) => {
|
||||
const isConfirming = toolAwaitingApproval?.callId === tool.callId;
|
||||
|
||||
@@ -84,19 +84,19 @@ describe('<ToolMessage />', () => {
|
||||
StreamingState.Idle,
|
||||
);
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('✔'); // Success indicator
|
||||
expect(output).toContain('✓'); // Success indicator
|
||||
expect(output).toContain('test-tool');
|
||||
expect(output).toContain('A tool for testing');
|
||||
expect(output).toContain('MockMarkdown:Test result');
|
||||
});
|
||||
|
||||
describe('ToolStatusIndicator rendering', () => {
|
||||
it('shows ✔ for Success status', () => {
|
||||
it('shows ✓ for Success status', () => {
|
||||
const { lastFrame } = renderWithContext(
|
||||
<ToolMessage {...baseProps} status={ToolCallStatus.Success} />,
|
||||
StreamingState.Idle,
|
||||
);
|
||||
expect(lastFrame()).toContain('✔');
|
||||
expect(lastFrame()).toContain('✓');
|
||||
});
|
||||
|
||||
it('shows o for Pending status', () => {
|
||||
@@ -138,7 +138,7 @@ describe('<ToolMessage />', () => {
|
||||
);
|
||||
expect(lastFrame()).toContain('⊷');
|
||||
expect(lastFrame()).not.toContain('MockRespondingSpinner');
|
||||
expect(lastFrame()).not.toContain('✔');
|
||||
expect(lastFrame()).not.toContain('✓');
|
||||
});
|
||||
|
||||
it('shows paused spinner for Executing status when streamingState is WaitingForConfirmation', () => {
|
||||
@@ -148,7 +148,7 @@ describe('<ToolMessage />', () => {
|
||||
);
|
||||
expect(lastFrame()).toContain('⊷');
|
||||
expect(lastFrame()).not.toContain('MockRespondingSpinner');
|
||||
expect(lastFrame()).not.toContain('✔');
|
||||
expect(lastFrame()).not.toContain('✓');
|
||||
});
|
||||
|
||||
it('shows MockRespondingSpinner for Executing status when streamingState is Responding', () => {
|
||||
@@ -157,7 +157,7 @@ describe('<ToolMessage />', () => {
|
||||
StreamingState.Responding, // Simulate app still responding
|
||||
);
|
||||
expect(lastFrame()).toContain('MockRespondingSpinner');
|
||||
expect(lastFrame()).not.toContain('✔');
|
||||
expect(lastFrame()).not.toContain('✓');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -269,7 +269,7 @@ const ToolStatusIndicator: React.FC<ToolStatusIndicatorProps> = ({
|
||||
/>
|
||||
)}
|
||||
{status === ToolCallStatus.Success && (
|
||||
<Text color={Colors.AccentGreen}>✔</Text>
|
||||
<Text color={Colors.AccentGreen}>✓</Text>
|
||||
)}
|
||||
{status === ToolCallStatus.Confirming && (
|
||||
<Text color={Colors.AccentYellow}>?</Text>
|
||||
@@ -321,7 +321,8 @@ const ToolInfo: React.FC<ToolInfo> = ({
|
||||
>
|
||||
<Text color={nameColor} bold>
|
||||
{name}
|
||||
</Text>{' '}
|
||||
</Text>
|
||||
<Text> </Text>
|
||||
<Text color={Colors.Gray}>{description}</Text>
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
@@ -288,7 +288,7 @@ const ToolCallItem: React.FC<{
|
||||
case 'awaiting_approval':
|
||||
return <Text color={theme.status.warning}>?</Text>;
|
||||
case 'success':
|
||||
return <Text color={color}>✔</Text>;
|
||||
return <Text color={color}>✓</Text>;
|
||||
case 'failed':
|
||||
return (
|
||||
<Text color={color} bold>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -548,6 +548,17 @@ describe('oauth2', () => {
|
||||
expect(updatedAccountData.old).toContain('test@example.com');
|
||||
});
|
||||
|
||||
it('should handle Qwen module clearing gracefully', async () => {
|
||||
// This test verifies that clearCachedCredentialFile doesn't throw
|
||||
// when Qwen modules are available and can be cleared
|
||||
|
||||
// Since dynamic imports in tests are complex, we'll just verify
|
||||
// that the function completes without error and doesn't throw
|
||||
await expect(clearCachedCredentialFile()).resolves.not.toThrow();
|
||||
|
||||
// The actual Qwen clearing logic is tested separately in the Qwen module tests
|
||||
});
|
||||
|
||||
it('should clear the in-memory OAuth client cache', async () => {
|
||||
const mockSetCredentials = vi.fn();
|
||||
const mockGetAccessToken = vi
|
||||
|
||||
@@ -402,6 +402,25 @@ export async function clearCachedCredentialFile() {
|
||||
await clearCachedGoogleAccount();
|
||||
// Clear the in-memory OAuth client cache to force re-authentication
|
||||
clearOauthClientCache();
|
||||
|
||||
/**
|
||||
* Also clear Qwen SharedTokenManager cache and credentials file to prevent stale credentials
|
||||
* when switching between auth types
|
||||
* TODO: We do not depend on code_assist, we'll have to build an independent auth-cleaning procedure.
|
||||
*/
|
||||
try {
|
||||
const { SharedTokenManager } = await import(
|
||||
'../qwen/sharedTokenManager.js'
|
||||
);
|
||||
const { clearQwenCredentials } = await import('../qwen/qwenOAuth2.js');
|
||||
|
||||
const sharedManager = SharedTokenManager.getInstance();
|
||||
sharedManager.clearCache();
|
||||
|
||||
await clearQwenCredentials();
|
||||
} catch (qwenError) {
|
||||
console.debug('Could not clear Qwen credentials:', qwenError);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to clear cached credentials:', e);
|
||||
}
|
||||
|
||||
@@ -77,6 +77,11 @@ export interface BugCommandSettings {
|
||||
|
||||
export interface ChatCompressionSettings {
|
||||
contextPercentageThreshold?: number;
|
||||
/**
|
||||
* When true, disables automatic chat history compression while in YOLO approval mode.
|
||||
* Manual compression via commands remains available.
|
||||
*/
|
||||
disableInYolo?: boolean;
|
||||
}
|
||||
|
||||
export interface SummarizeToolOutputSettings {
|
||||
@@ -232,7 +237,10 @@ export interface ConfigParameters {
|
||||
interactive?: boolean;
|
||||
trustedFolder?: boolean;
|
||||
shouldUseNodePtyShell?: boolean;
|
||||
skipStartupContext?: boolean;
|
||||
skipNextSpeakerCheck?: boolean;
|
||||
// Character limit for tool text outputs (files and shell)
|
||||
toolOutputCharLimit?: number;
|
||||
}
|
||||
|
||||
export class Config {
|
||||
@@ -317,8 +325,10 @@ export class Config {
|
||||
private readonly interactive: boolean;
|
||||
private readonly trustedFolder: boolean | undefined;
|
||||
private readonly shouldUseNodePtyShell: boolean;
|
||||
private readonly skipStartupContext: boolean;
|
||||
private readonly skipNextSpeakerCheck: boolean;
|
||||
private initialized: boolean = false;
|
||||
private readonly toolOutputCharLimit?: number;
|
||||
|
||||
constructor(params: ConfigParameters) {
|
||||
this.sessionId = params.sessionId;
|
||||
@@ -398,7 +408,9 @@ export class Config {
|
||||
this.interactive = params.interactive ?? false;
|
||||
this.trustedFolder = params.trustedFolder;
|
||||
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
|
||||
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? false;
|
||||
this.skipStartupContext = params.skipStartupContext ?? true;
|
||||
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? true;
|
||||
this.toolOutputCharLimit = params.toolOutputCharLimit;
|
||||
|
||||
// Web search
|
||||
this.tavilyApiKey = params.tavilyApiKey;
|
||||
@@ -857,10 +869,22 @@ export class Config {
|
||||
return this.shouldUseNodePtyShell;
|
||||
}
|
||||
|
||||
getSkipStartupContext(): boolean {
|
||||
return this.skipStartupContext;
|
||||
}
|
||||
|
||||
getSkipNextSpeakerCheck(): boolean {
|
||||
return this.skipNextSpeakerCheck;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the configured maximum number of characters for tool outputs.
|
||||
* If undefined, no character-based truncation is applied by tools.
|
||||
*/
|
||||
getToolOutputCharLimit(): number | undefined {
|
||||
return this.toolOutputCharLimit;
|
||||
}
|
||||
|
||||
async getGitService(): Promise<GitService> {
|
||||
if (!this.gitService) {
|
||||
this.gitService = new GitService(this.targetDir);
|
||||
|
||||
@@ -24,7 +24,7 @@ import {
|
||||
GeminiEventType,
|
||||
ChatCompressionInfo,
|
||||
} from './turn.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { ApprovalMode, Config } from '../config/config.js';
|
||||
import { UserTierId } from '../code_assist/types.js';
|
||||
import {
|
||||
getCoreSystemPrompt,
|
||||
@@ -228,19 +228,24 @@ export class GeminiClient {
|
||||
|
||||
async startChat(extraHistory?: Content[]): Promise<GeminiChat> {
|
||||
this.forceFullIdeContext = true;
|
||||
const envParts = await getEnvironmentContext(this.config);
|
||||
const envParts = this.config.getSkipStartupContext()
|
||||
? []
|
||||
: await getEnvironmentContext(this.config);
|
||||
const toolRegistry = this.config.getToolRegistry();
|
||||
const toolDeclarations = toolRegistry.getFunctionDeclarations();
|
||||
const tools: Tool[] = [{ functionDeclarations: toolDeclarations }];
|
||||
const history: Content[] = [
|
||||
{
|
||||
role: 'user',
|
||||
parts: envParts,
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
...(
|
||||
envParts.length
|
||||
? [
|
||||
{ role: 'user', parts: envParts },
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }],
|
||||
},
|
||||
]
|
||||
: []
|
||||
),
|
||||
...(extraHistory ?? []),
|
||||
];
|
||||
try {
|
||||
@@ -473,10 +478,18 @@ export class GeminiClient {
|
||||
// Track the original model from the first call to detect model switching
|
||||
const initialModel = originalModel || this.config.getModel();
|
||||
|
||||
const compressed = await this.tryCompressChat(prompt_id);
|
||||
const chatCompression = this.config.getChatCompression();
|
||||
const disableAutoCompressionInYolo =
|
||||
this.config.getApprovalMode() === ApprovalMode.YOLO &&
|
||||
// Default to disabling auto-compression in YOLO unless explicitly set to false
|
||||
(chatCompression?.disableInYolo ?? true);
|
||||
|
||||
if (compressed) {
|
||||
yield { type: GeminiEventType.ChatCompressed, value: compressed };
|
||||
if (!disableAutoCompressionInYolo) {
|
||||
const compressed = await this.tryCompressChat(prompt_id);
|
||||
|
||||
if (compressed) {
|
||||
yield { type: GeminiEventType.ChatCompressed, value: compressed };
|
||||
}
|
||||
}
|
||||
|
||||
// Check session token limit after compression using accurate token counting
|
||||
@@ -551,17 +564,25 @@ export class GeminiClient {
|
||||
|
||||
const turn = new Turn(this.getChat(), prompt_id);
|
||||
|
||||
const loopDetected = await this.loopDetector.turnStarted(signal);
|
||||
if (loopDetected) {
|
||||
yield { type: GeminiEventType.LoopDetected };
|
||||
return turn;
|
||||
// Disable loop detection entirely in YOLO mode
|
||||
const loopDetectionDisabled =
|
||||
this.config.getApprovalMode() === ApprovalMode.YOLO;
|
||||
|
||||
if (!loopDetectionDisabled) {
|
||||
const loopDetected = await this.loopDetector.turnStarted(signal);
|
||||
if (loopDetected) {
|
||||
yield { type: GeminiEventType.LoopDetected };
|
||||
return turn;
|
||||
}
|
||||
}
|
||||
|
||||
const resultStream = turn.run(request, signal);
|
||||
for await (const event of resultStream) {
|
||||
if (this.loopDetector.addAndCheck(event)) {
|
||||
yield { type: GeminiEventType.LoopDetected };
|
||||
return turn;
|
||||
if (!loopDetectionDisabled) {
|
||||
if (this.loopDetector.addAndCheck(event)) {
|
||||
yield { type: GeminiEventType.LoopDetected };
|
||||
return turn;
|
||||
}
|
||||
}
|
||||
yield event;
|
||||
if (event.type === GeminiEventType.Error) {
|
||||
|
||||
@@ -18,7 +18,7 @@ describe('IdeClient.validateWorkspacePath', () => {
|
||||
expect(result.isValid).toBe(true);
|
||||
});
|
||||
|
||||
it('should return invalid if GEMINI_CLI_IDE_WORKSPACE_PATH is undefined', () => {
|
||||
it('should return invalid if QWEN_CODE_IDE_WORKSPACE_PATH is undefined', () => {
|
||||
const result = IdeClient.validateWorkspacePath(
|
||||
undefined,
|
||||
'VS Code',
|
||||
@@ -28,7 +28,7 @@ describe('IdeClient.validateWorkspacePath', () => {
|
||||
expect(result.error).toContain('Failed to connect');
|
||||
});
|
||||
|
||||
it('should return invalid if GEMINI_CLI_IDE_WORKSPACE_PATH is empty', () => {
|
||||
it('should return invalid if QWEN_CODE_IDE_WORKSPACE_PATH is empty', () => {
|
||||
const result = IdeClient.validateWorkspacePath(
|
||||
'',
|
||||
'VS Code',
|
||||
|
||||
@@ -26,17 +26,20 @@ const QWEN_LOCK_FILENAME = 'oauth_creds.lock';
|
||||
// Token and Cache Configuration
|
||||
const TOKEN_REFRESH_BUFFER_MS = 30 * 1000; // 30 seconds
|
||||
const LOCK_TIMEOUT_MS = 10000; // 10 seconds lock timeout
|
||||
const CACHE_CHECK_INTERVAL_MS = 1000; // 1 second cache check interval
|
||||
const CACHE_CHECK_INTERVAL_MS = 5000; // 5 seconds cache check interval (increased from 1 second)
|
||||
|
||||
// Lock acquisition configuration (can be overridden for testing)
|
||||
interface LockConfig {
|
||||
maxAttempts: number;
|
||||
attemptInterval: number;
|
||||
// Add exponential backoff parameters
|
||||
maxInterval: number;
|
||||
}
|
||||
|
||||
const DEFAULT_LOCK_CONFIG: LockConfig = {
|
||||
maxAttempts: 50,
|
||||
attemptInterval: 200,
|
||||
maxAttempts: 20, // Reduced from 50 to prevent excessive waiting
|
||||
attemptInterval: 100, // Reduced from 200ms to check more frequently
|
||||
maxInterval: 2000, // Maximum interval for exponential backoff
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -300,7 +303,25 @@ export class SharedTokenManager {
|
||||
|
||||
try {
|
||||
const filePath = this.getCredentialFilePath();
|
||||
const stats = await fs.stat(filePath);
|
||||
// Add timeout to file stat operation
|
||||
const withTimeout = async <T>(
|
||||
promise: Promise<T>,
|
||||
timeoutMs: number,
|
||||
): Promise<T> =>
|
||||
Promise.race([
|
||||
promise,
|
||||
new Promise<never>((_, reject) =>
|
||||
setTimeout(
|
||||
() =>
|
||||
reject(
|
||||
new Error(`File operation timed out after ${timeoutMs}ms`),
|
||||
),
|
||||
timeoutMs,
|
||||
),
|
||||
),
|
||||
]);
|
||||
|
||||
const stats = await withTimeout(fs.stat(filePath), 3000);
|
||||
const fileModTime = stats.mtimeMs;
|
||||
|
||||
// Reload credentials if file has been modified since last cache
|
||||
@@ -423,6 +444,7 @@ export class SharedTokenManager {
|
||||
qwenClient: IQwenOAuth2Client,
|
||||
forceRefresh = false,
|
||||
): Promise<QwenCredentials> {
|
||||
const startTime = Date.now();
|
||||
const lockPath = this.getLockFilePath();
|
||||
|
||||
try {
|
||||
@@ -439,6 +461,15 @@ export class SharedTokenManager {
|
||||
// Acquire distributed file lock
|
||||
await this.acquireLock(lockPath);
|
||||
|
||||
// Check if the operation is taking too long
|
||||
const lockAcquisitionTime = Date.now() - startTime;
|
||||
if (lockAcquisitionTime > 5000) {
|
||||
// 5 seconds warning threshold
|
||||
console.warn(
|
||||
`Token refresh lock acquisition took ${lockAcquisitionTime}ms`,
|
||||
);
|
||||
}
|
||||
|
||||
// Double-check if another process already refreshed the token (unless force refresh is requested)
|
||||
// Skip the time-based throttling since we're already in a locked refresh operation
|
||||
await this.forceFileCheck(qwenClient);
|
||||
@@ -456,6 +487,13 @@ export class SharedTokenManager {
|
||||
// Perform the actual token refresh
|
||||
const response = await qwenClient.refreshAccessToken();
|
||||
|
||||
// Check if the token refresh is taking too long
|
||||
const totalOperationTime = Date.now() - startTime;
|
||||
if (totalOperationTime > 10000) {
|
||||
// 10 seconds warning threshold
|
||||
console.warn(`Token refresh operation took ${totalOperationTime}ms`);
|
||||
}
|
||||
|
||||
if (!response || isErrorResponse(response)) {
|
||||
const errorData = response as ErrorData;
|
||||
throw new TokenManagerError(
|
||||
@@ -551,9 +589,27 @@ export class SharedTokenManager {
|
||||
const dirPath = path.dirname(filePath);
|
||||
const tempPath = `${filePath}.tmp.${randomUUID()}`;
|
||||
|
||||
// Add timeout wrapper for file operations
|
||||
const withTimeout = async <T>(
|
||||
promise: Promise<T>,
|
||||
timeoutMs: number,
|
||||
): Promise<T> =>
|
||||
Promise.race([
|
||||
promise,
|
||||
new Promise<never>((_, reject) =>
|
||||
setTimeout(
|
||||
() => reject(new Error(`Operation timed out after ${timeoutMs}ms`)),
|
||||
timeoutMs,
|
||||
),
|
||||
),
|
||||
]);
|
||||
|
||||
// Create directory with restricted permissions
|
||||
try {
|
||||
await fs.mkdir(dirPath, { recursive: true, mode: 0o700 });
|
||||
await withTimeout(
|
||||
fs.mkdir(dirPath, { recursive: true, mode: 0o700 }),
|
||||
5000,
|
||||
);
|
||||
} catch (error) {
|
||||
throw new TokenManagerError(
|
||||
TokenError.FILE_ACCESS_ERROR,
|
||||
@@ -566,18 +622,21 @@ export class SharedTokenManager {
|
||||
|
||||
try {
|
||||
// Write to temporary file first with restricted permissions
|
||||
await fs.writeFile(tempPath, credString, { mode: 0o600 });
|
||||
await withTimeout(
|
||||
fs.writeFile(tempPath, credString, { mode: 0o600 }),
|
||||
5000,
|
||||
);
|
||||
|
||||
// Atomic move to final location
|
||||
await fs.rename(tempPath, filePath);
|
||||
await withTimeout(fs.rename(tempPath, filePath), 5000);
|
||||
|
||||
// Update cached file modification time atomically after successful write
|
||||
const stats = await fs.stat(filePath);
|
||||
const stats = await withTimeout(fs.stat(filePath), 5000);
|
||||
this.memoryCache.fileModTime = stats.mtimeMs;
|
||||
} catch (error) {
|
||||
// Clean up temp file if it exists
|
||||
try {
|
||||
await fs.unlink(tempPath);
|
||||
await withTimeout(fs.unlink(tempPath), 1000);
|
||||
} catch (_cleanupError) {
|
||||
// Ignore cleanup errors - temp file might not exist
|
||||
}
|
||||
@@ -628,9 +687,11 @@ export class SharedTokenManager {
|
||||
* @throws TokenManagerError if lock cannot be acquired within timeout period
|
||||
*/
|
||||
private async acquireLock(lockPath: string): Promise<void> {
|
||||
const { maxAttempts, attemptInterval } = this.lockConfig;
|
||||
const { maxAttempts, attemptInterval, maxInterval } = this.lockConfig;
|
||||
const lockId = randomUUID(); // Use random UUID instead of PID for security
|
||||
|
||||
let currentInterval = attemptInterval;
|
||||
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
try {
|
||||
// Attempt to create lock file atomically (exclusive mode)
|
||||
@@ -671,8 +732,10 @@ export class SharedTokenManager {
|
||||
);
|
||||
}
|
||||
|
||||
// Wait before retrying
|
||||
await new Promise((resolve) => setTimeout(resolve, attemptInterval));
|
||||
// Wait before retrying with exponential backoff
|
||||
await new Promise((resolve) => setTimeout(resolve, currentInterval));
|
||||
// Increase interval for next attempt (exponential backoff), but cap at maxInterval
|
||||
currentInterval = Math.min(currentInterval * 1.5, maxInterval);
|
||||
} else {
|
||||
throw new TokenManagerError(
|
||||
TokenError.FILE_ACCESS_ERROR,
|
||||
|
||||
@@ -781,11 +781,15 @@ export class SubAgentScope {
|
||||
);
|
||||
}
|
||||
|
||||
const envParts = await getEnvironmentContext(this.runtimeContext);
|
||||
const envHistory: Content[] = [
|
||||
{ role: 'user', parts: envParts },
|
||||
{ role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
|
||||
];
|
||||
const envParts = this.runtimeContext.getSkipStartupContext()
|
||||
? []
|
||||
: await getEnvironmentContext(this.runtimeContext);
|
||||
const envHistory: Content[] = envParts.length
|
||||
? [
|
||||
{ role: 'user', parts: envParts },
|
||||
{ role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
|
||||
]
|
||||
: [];
|
||||
|
||||
const start_history = [
|
||||
...envHistory,
|
||||
|
||||
@@ -156,6 +156,8 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
|
||||
params,
|
||||
this.config.getGeminiClient(),
|
||||
abortSignal,
|
||||
// Disable LLM-based corrections in YOLO mode
|
||||
this.config.getApprovalMode() !== ApprovalMode.YOLO,
|
||||
);
|
||||
finalOldString = correctedEdit.params.old_string;
|
||||
finalNewString = correctedEdit.params.new_string;
|
||||
|
||||
@@ -122,21 +122,31 @@ class ReadFileToolInvocation extends BaseToolInvocation<
|
||||
}
|
||||
|
||||
let llmContent: PartUnion;
|
||||
const charLimit = this.config.getToolOutputCharLimit();
|
||||
if (result.isTruncated) {
|
||||
const [start, end] = result.linesShown!;
|
||||
const total = result.originalLineCount!;
|
||||
const nextOffset = this.params.offset
|
||||
? this.params.offset + end - start + 1
|
||||
: end;
|
||||
llmContent = `
|
||||
IMPORTANT: The file content has been truncated.
|
||||
Status: Showing lines ${start}-${end} of ${total} total lines.
|
||||
Action: To read more of the file, you can use the 'offset' and 'limit' parameters in a subsequent 'read_file' call. For example, to read the next section of the file, use offset: ${nextOffset}.
|
||||
|
||||
--- FILE CONTENT (truncated) ---
|
||||
${result.llmContent}`;
|
||||
const header = `\nIMPORTANT: The file content has been truncated.\nStatus: Showing lines ${start}-${end} of ${total} total lines.\nAction: To read more of the file, you can use the 'offset' and 'limit' parameters in a subsequent 'read_file' call. For example, to read the next section of the file, use offset: ${nextOffset}.\n\n--- FILE CONTENT (truncated) ---\n`;
|
||||
const body = typeof result.llmContent === 'string' ? result.llmContent : '';
|
||||
let truncatedBody = body;
|
||||
if (typeof charLimit === 'number' && charLimit > 0 && body.length > charLimit) {
|
||||
truncatedBody = `${body.slice(0, charLimit)}\n[... File content truncated to ${charLimit} characters ...]`;
|
||||
}
|
||||
llmContent = header + truncatedBody;
|
||||
} else {
|
||||
llmContent = result.llmContent || '';
|
||||
let body = result.llmContent || '';
|
||||
if (
|
||||
typeof body === 'string' &&
|
||||
typeof charLimit === 'number' &&
|
||||
charLimit > 0 &&
|
||||
body.length > charLimit
|
||||
) {
|
||||
body = `${body.slice(0, charLimit)}\n[... File content truncated to ${charLimit} characters ...]`;
|
||||
}
|
||||
llmContent = body;
|
||||
}
|
||||
|
||||
const lines =
|
||||
|
||||
@@ -228,6 +228,7 @@ ${finalExclusionPatternsForDescription
|
||||
const skippedFiles: Array<{ path: string; reason: string }> = [];
|
||||
const processedFilesRelativePaths: string[] = [];
|
||||
const contentParts: PartListUnion = [];
|
||||
const charLimit = this.config.getToolOutputCharLimit();
|
||||
|
||||
const effectiveExcludes = useDefaultExcludes
|
||||
? [...DEFAULT_EXCLUDES, ...exclude]
|
||||
@@ -436,6 +437,9 @@ ${finalExclusionPatternsForDescription
|
||||
);
|
||||
|
||||
const results = await Promise.allSettled(fileProcessingPromises);
|
||||
let remainingContentChars =
|
||||
typeof charLimit === 'number' && charLimit > 0 ? charLimit : Number.POSITIVE_INFINITY;
|
||||
let globalTruncated = false;
|
||||
|
||||
for (const result of results) {
|
||||
if (result.status === 'fulfilled') {
|
||||
@@ -449,22 +453,47 @@ ${finalExclusionPatternsForDescription
|
||||
});
|
||||
} else {
|
||||
// Handle successfully processed files
|
||||
const { filePath, relativePathForDisplay, fileReadResult } =
|
||||
fileResult;
|
||||
const { filePath, relativePathForDisplay, fileReadResult } = fileResult;
|
||||
|
||||
if (typeof fileReadResult.llmContent === 'string') {
|
||||
// Separator does not count toward char budget
|
||||
const separator = DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace(
|
||||
'{filePath}',
|
||||
filePath,
|
||||
);
|
||||
let fileContentForLlm = '';
|
||||
|
||||
let prefix = `${separator}\n\n`;
|
||||
// Warning header (if any) does not count toward char budget
|
||||
if (fileReadResult.isTruncated) {
|
||||
fileContentForLlm += `[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]\n\n`;
|
||||
prefix += `[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]\n\n`;
|
||||
}
|
||||
contentParts.push(prefix);
|
||||
|
||||
// Apply global char budget to the actual file content only
|
||||
if (remainingContentChars > 0) {
|
||||
const body = fileReadResult.llmContent;
|
||||
if (body.length <= remainingContentChars) {
|
||||
contentParts.push(body + '\n\n');
|
||||
remainingContentChars -= body.length;
|
||||
} else {
|
||||
contentParts.push(
|
||||
body.slice(0, Math.max(0, remainingContentChars)),
|
||||
);
|
||||
contentParts.push(
|
||||
`\n[... Content truncated to ${charLimit} characters across files ...]\n`,
|
||||
);
|
||||
remainingContentChars = 0;
|
||||
globalTruncated = true;
|
||||
}
|
||||
} else if (!globalTruncated && typeof charLimit === 'number') {
|
||||
// No remaining budget, emit a single global truncation marker after first overflow
|
||||
contentParts.push(
|
||||
`\n[... Content truncated to ${charLimit} characters across files ...]\n`,
|
||||
);
|
||||
globalTruncated = true;
|
||||
}
|
||||
fileContentForLlm += fileReadResult.llmContent;
|
||||
contentParts.push(`${separator}\n\n${fileContentForLlm}\n\n`);
|
||||
} else {
|
||||
// This is a Part for image/pdf, which we don't add the separator to.
|
||||
// Non-text parts (image/pdf) do not count toward char budget
|
||||
contentParts.push(fileReadResult.llmContent);
|
||||
}
|
||||
|
||||
@@ -538,6 +567,10 @@ ${finalExclusionPatternsForDescription
|
||||
'No files matching the criteria were found or all were skipped.',
|
||||
);
|
||||
}
|
||||
if (globalTruncated && typeof charLimit === 'number') {
|
||||
displayMessage += `\n\nNote: Output truncated to ${charLimit} characters (text content only).`;
|
||||
}
|
||||
|
||||
return {
|
||||
llmContent: contentParts,
|
||||
returnDisplay: displayMessage.trim(),
|
||||
|
||||
@@ -279,6 +279,24 @@ class ShellToolInvocation extends BaseToolInvocation<
|
||||
}
|
||||
}
|
||||
|
||||
// Apply character truncation (middle) to both llmContent and returnDisplay if configured
|
||||
const charLimit = this.config.getToolOutputCharLimit();
|
||||
const middleTruncate = (s: string, limit: number): string => {
|
||||
if (!s || s.length <= limit) return s;
|
||||
const marker = '\n[... Output truncated due to length ...]\n';
|
||||
const keep = Math.max(0, Math.floor((limit - marker.length) / 2));
|
||||
if (keep <= 0) {
|
||||
return s.slice(0, limit);
|
||||
}
|
||||
return s.slice(0, keep) + marker + s.slice(s.length - keep);
|
||||
};
|
||||
if (typeof charLimit === 'number' && charLimit > 0) {
|
||||
llmContent = middleTruncate(llmContent, charLimit);
|
||||
if (returnDisplayMessage) {
|
||||
returnDisplayMessage = middleTruncate(returnDisplayMessage, charLimit);
|
||||
}
|
||||
}
|
||||
|
||||
const summarizeConfig = this.config.getSummarizeToolOutputConfig();
|
||||
if (summarizeConfig && summarizeConfig[ShellTool.Name]) {
|
||||
const summary = await summarizeToolOutput(
|
||||
|
||||
@@ -116,6 +116,8 @@ export async function getCorrectedFileContent(
|
||||
},
|
||||
config.getGeminiClient(),
|
||||
abortSignal,
|
||||
// Disable LLM-based corrections in YOLO mode
|
||||
config.getApprovalMode() !== ApprovalMode.YOLO,
|
||||
);
|
||||
correctedContent = correctedParams.new_string;
|
||||
} else {
|
||||
@@ -124,6 +126,8 @@ export async function getCorrectedFileContent(
|
||||
proposedContent,
|
||||
config.getGeminiClient(),
|
||||
abortSignal,
|
||||
// Disable LLM-based corrections in YOLO mode
|
||||
config.getApprovalMode() !== ApprovalMode.YOLO,
|
||||
);
|
||||
}
|
||||
return { originalContent, correctedContent, fileExists };
|
||||
|
||||
@@ -160,6 +160,7 @@ export async function ensureCorrectEdit(
|
||||
originalParams: EditToolParams, // This is the EditToolParams from edit.ts, without \'corrected\'
|
||||
client: GeminiClient,
|
||||
abortSignal: AbortSignal,
|
||||
llmCorrectionsEnabled: boolean = true,
|
||||
): Promise<CorrectedEditResult> {
|
||||
const cacheKey = `${currentContent}---${originalParams.old_string}---${originalParams.new_string}`;
|
||||
const cachedResult = editCorrectionCache.get(cacheKey);
|
||||
@@ -178,7 +179,7 @@ export async function ensureCorrectEdit(
|
||||
let occurrences = countOccurrences(currentContent, finalOldString);
|
||||
|
||||
if (occurrences === expectedReplacements) {
|
||||
if (newStringPotentiallyEscaped) {
|
||||
if (newStringPotentiallyEscaped && llmCorrectionsEnabled) {
|
||||
finalNewString = await correctNewStringEscaping(
|
||||
client,
|
||||
finalOldString,
|
||||
@@ -225,7 +226,7 @@ export async function ensureCorrectEdit(
|
||||
|
||||
if (occurrences === expectedReplacements) {
|
||||
finalOldString = unescapedOldStringAttempt;
|
||||
if (newStringPotentiallyEscaped) {
|
||||
if (newStringPotentiallyEscaped && llmCorrectionsEnabled) {
|
||||
finalNewString = await correctNewString(
|
||||
client,
|
||||
originalParams.old_string, // original old
|
||||
@@ -263,38 +264,48 @@ export async function ensureCorrectEdit(
|
||||
}
|
||||
}
|
||||
|
||||
const llmCorrectedOldString = await correctOldStringMismatch(
|
||||
client,
|
||||
currentContent,
|
||||
unescapedOldStringAttempt,
|
||||
abortSignal,
|
||||
);
|
||||
const llmOldOccurrences = countOccurrences(
|
||||
currentContent,
|
||||
llmCorrectedOldString,
|
||||
);
|
||||
if (llmCorrectionsEnabled) {
|
||||
const llmCorrectedOldString = await correctOldStringMismatch(
|
||||
client,
|
||||
currentContent,
|
||||
unescapedOldStringAttempt,
|
||||
abortSignal,
|
||||
);
|
||||
const llmOldOccurrences = countOccurrences(
|
||||
currentContent,
|
||||
llmCorrectedOldString,
|
||||
);
|
||||
|
||||
if (llmOldOccurrences === expectedReplacements) {
|
||||
finalOldString = llmCorrectedOldString;
|
||||
occurrences = llmOldOccurrences;
|
||||
if (llmOldOccurrences === expectedReplacements) {
|
||||
finalOldString = llmCorrectedOldString;
|
||||
occurrences = llmOldOccurrences;
|
||||
|
||||
if (newStringPotentiallyEscaped) {
|
||||
const baseNewStringForLLMCorrection = unescapeStringForGeminiBug(
|
||||
originalParams.new_string,
|
||||
);
|
||||
finalNewString = await correctNewString(
|
||||
client,
|
||||
originalParams.old_string, // original old
|
||||
llmCorrectedOldString, // corrected old
|
||||
baseNewStringForLLMCorrection, // base new for correction
|
||||
abortSignal,
|
||||
);
|
||||
if (newStringPotentiallyEscaped) {
|
||||
const baseNewStringForLLMCorrection = unescapeStringForGeminiBug(
|
||||
originalParams.new_string,
|
||||
);
|
||||
finalNewString = await correctNewString(
|
||||
client,
|
||||
originalParams.old_string, // original old
|
||||
llmCorrectedOldString, // corrected old
|
||||
baseNewStringForLLMCorrection, // base new for correction
|
||||
abortSignal,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// LLM correction also failed for old_string
|
||||
const result: CorrectedEditResult = {
|
||||
params: { ...originalParams },
|
||||
occurrences: 0, // Explicitly 0 as LLM failed
|
||||
};
|
||||
editCorrectionCache.set(cacheKey, result);
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
// LLM correction also failed for old_string
|
||||
// LLM corrections disabled -> return as-is to surface mismatch upstream
|
||||
const result: CorrectedEditResult = {
|
||||
params: { ...originalParams },
|
||||
occurrences: 0, // Explicitly 0 as LLM failed
|
||||
occurrences: 0,
|
||||
};
|
||||
editCorrectionCache.set(cacheKey, result);
|
||||
return result;
|
||||
@@ -336,6 +347,7 @@ export async function ensureCorrectFileContent(
|
||||
content: string,
|
||||
client: GeminiClient,
|
||||
abortSignal: AbortSignal,
|
||||
llmCorrectionsEnabled: boolean = true,
|
||||
): Promise<string> {
|
||||
const cachedResult = fileContentCorrectionCache.get(content);
|
||||
if (cachedResult) {
|
||||
@@ -349,11 +361,9 @@ export async function ensureCorrectFileContent(
|
||||
return content;
|
||||
}
|
||||
|
||||
const correctedContent = await correctStringEscaping(
|
||||
content,
|
||||
client,
|
||||
abortSignal,
|
||||
);
|
||||
const correctedContent = llmCorrectionsEnabled
|
||||
? await correctStringEscaping(content, client, abortSignal)
|
||||
: content;
|
||||
fileContentCorrectionCache.set(content, correctedContent);
|
||||
return correctedContent;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import mime from 'mime-types';
|
||||
import { FileSystemService } from '../services/fileSystemService.js';
|
||||
|
||||
// Constants for text file processing
|
||||
export const DEFAULT_MAX_LINES_TEXT_FILE = 2000;
|
||||
export const DEFAULT_MAX_LINES_TEXT_FILE = 500;
|
||||
const MAX_LINE_LENGTH_TEXT_FILE = 2000;
|
||||
|
||||
// Default values for encoding and separator format
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.0.10",
|
||||
"version": "0.0.11",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
Reference in New Issue
Block a user