Compare commits

...

26 Commits

Author SHA1 Message Date
github-actions[bot]
c13149a3b5 chore(release): v0.0.2 2025-08-01 10:29:04 +00:00
奕桁
2786f10e8c update dev version 2025-08-01 18:28:16 +08:00
奕桁
964d211270 update dev version 2025-08-01 18:05:05 +08:00
Yiheng Xu
a09a9f2261 fix release.yml 2025-08-01 17:51:42 +08:00
奕桁
09c2a1871b release v0.0.1 2025-08-01 17:26:59 +08:00
Yiheng Xu
999f3af098 fix release workflow (#172) 2025-08-01 17:13:07 +08:00
Fan
bdf946a321 fix config (#163)
Co-authored-by: test <test@gmail.com>
2025-08-01 01:24:33 +08:00
奕桁
44de3f686c fix lint 2025-08-01 00:31:34 +08:00
Yiheng Xu
08415c9597 Update README.md 2025-08-01 00:28:32 +08:00
Junyang Lin
d360b86588 Update README.md 2025-07-31 21:38:38 +08:00
Ares Chen
66d43dbc5d Update README.md to clarify the requirement for using Modelscope inference API (#131) 2025-07-31 21:19:15 +08:00
Fan
dc6dcea93d Update: add telemetry service (#161)
* init: telemetry for qwen code

* fix

* update
2025-07-31 21:12:22 +08:00
Yiheng Xu
e27610789f Support openrouter (#162)
* support openrouter

* fix lint
2025-07-31 21:10:33 +08:00
奕桁
cff88350f4 update version 2025-07-31 20:37:17 +08:00
Yiheng Xu
1bfe5a796a Fix Sandbox docker mode (#160)
* Fix E2E

* Fix sandbox docker
2025-07-31 18:28:07 +08:00
koalazf.99
9f8ec8c0be fix type: sessionTokenLimit for token usage control 2025-07-31 15:23:31 +08:00
Yiheng Xu
bb6db7e492 Fix E2E (#156) 2025-07-31 15:16:23 +08:00
Allen Hutchison
718f68d247 feat: update /bug command to point to Qwen-Code repo (#154)
This commit updates the /bug command to point to the Qwen-Code
repository instead of the Gemini CLI repository.

The following files were modified:

- `packages/cli/src/ui/hooks/slashCommandProcessor.ts`: Updated the URL
  for the /bug command.
- `packages/cli/src/ui/hooks/slashCommandProcessor.test.ts`: Updated the
  test for the /bug command to reflect the new URL.
- `docs/cli/commands.md`: Updated the documentation for the /bug
  command and replaced all instances of "Gemini CLI" with "Qwen Code".
2025-07-31 14:04:43 +08:00
koalazf.99
b8e2852f96 update: installation instruction 2025-07-30 18:21:28 +08:00
Fan
df5c4e8079 Update: compress/hard constrained token usage (#136)
* setup truncation & folder structure

* fix: xml in prompt; qwen code in stats page

* fix: clear & continue logic

* preflight

* add maxSessionLimit in README
2025-07-30 18:14:24 +08:00
tanzhenxin
a08bcb2f41 Fix: Enhanced OpenAI Usage Logging and Response Metadata Handling (#141)
* feat: add support for cached tokens in OpenAI usage logging

* fix: enhance response metadata handling in OpenAI content generator

* fix: run format
2025-07-30 16:28:10 +08:00
tanzhenxin
8e3b413fdd Fix Default Model Configuration and Fallback Behavior (#142)
* rename the default model to 'qwen3-coder-plus'

* fix: use DEFAULT_GEMINI_MODEL fallback when OPENAI_MODEL is unset

* fix: npm run format
2025-07-30 16:27:28 +08:00
pomelo
bd0d3479c1 feat: Add systemPromptMappings Configuration Feature (#108)
* feat: update system prompt for qwen3-coder

* feat: add default systemPromptMappings for Qwen models

- Add default systemPromptMappings configuration for qwen3-coder-plus model
- Support DashScope compatible mode API endpoints
- Include Qwen coder system prompt template with git repository and sandbox placeholders
- Add comprehensive test coverage for default and custom systemPromptMappings
- Update documentation to reflect the new default configuration behavior
- Ensure backward compatibility with existing user configurations

* feat: remove default system prompt template

* fix: test ci

* feat: handle code indentation issues

* feat: update prompt.test.snapshots

* feat: add URL trailing slash compatibility for system prompt mappings

- Add normalizeUrl() function to standardize URLs by removing trailing slashes
- Add urlMatches() function to compare URLs ignoring trailing slash differences
- Replace direct includes() comparison with urlMatches() for baseUrl matching
- Add comprehensive tests to verify URL matching with/without trailing slashes
- Fixes issue where URLs like 'https://api.example.com' and 'https://api.example.com/' were treated as different

* feat: update code
2025-07-29 13:11:41 +08:00
Yiheng Xu
dc087deace Update README.md 2025-07-25 14:39:35 -07:00
Fan
d7890d6463 update: add info about modelscope-api (#116) 2025-07-25 13:15:25 +08:00
zeyu cui
778837507e add star history (#109)
* add star history

* preflight

---------

Co-authored-by: koalazf.99 <koala99.zf@gmail.com>
2025-07-25 00:11:47 +08:00
52 changed files with 2021 additions and 672 deletions

View File

@@ -44,5 +44,7 @@ jobs:
- name: Run E2E tests
env:
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
run: npm run test:integration:${{ matrix.sandbox }} -- --verbose --keep-output

View File

@@ -37,7 +37,7 @@ jobs:
environment:
name: production-release
url: ${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ steps.version.outputs.RELEASE_TAG }}
if: github.repository == 'google-gemini/gemini-cli'
if: github.repository == 'QwenLM/qwen-code'
permissions:
contents: write
packages: write
@@ -95,7 +95,9 @@ jobs:
npm run test:integration:sandbox:none
npm run test:integration:sandbox:docker
env:
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
- name: Configure Git User
run: |
@@ -133,22 +135,22 @@ jobs:
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version: '20'
registry-url: 'https://wombat-dressing-room.appspot.com'
scope: '@google'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: Publish @google/gemini-cli-core
run: npm publish --workspace=@google/gemini-cli-core --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
- name: Publish @qwen-code/qwen-code-core
run: npm publish --workspace=@qwen-code/qwen-code-core --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: ${{ secrets.WOMBAT_TOKEN_CORE }}
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Install latest core package
if: steps.vars.outputs.is_dry_run == 'false'
run: npm install @google/gemini-cli-core@${{ steps.version.outputs.RELEASE_VERSION }} --workspace=@google/gemini-cli --save-exact
run: npm install @qwen-code/qwen-code-core@${{ steps.version.outputs.RELEASE_VERSION }} --workspace=@qwen-code/qwen-code --save-exact
- name: Publish @google/gemini-cli
run: npm publish --workspace=@google/gemini-cli --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
- name: Publish @qwen-code/qwen-code
run: npm publish --workspace=@qwen-code/qwen-code --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: ${{ secrets.WOMBAT_TOKEN_CLI }}
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Create GitHub Release and Tag
if: ${{ steps.vars.outputs.is_dry_run == 'false' }}

View File

@@ -1,6 +1,6 @@
FROM docker.io/library/node:20-slim
ARG SANDBOX_NAME="gemini-cli-sandbox"
ARG SANDBOX_NAME="qwen-code-sandbox"
ARG CLI_VERSION_ARG
ENV SANDBOX="$SANDBOX_NAME"
ENV CLI_VERSION=$CLI_VERSION_ARG
@@ -39,12 +39,12 @@ ENV PATH=$PATH:/usr/local/share/npm-global/bin
# switch to non-root user node
USER node
# install gemini-cli and clean up
COPY packages/cli/dist/google-gemini-cli-*.tgz /usr/local/share/npm-global/gemini-cli.tgz
COPY packages/core/dist/google-gemini-cli-core-*.tgz /usr/local/share/npm-global/gemini-core.tgz
RUN npm install -g /usr/local/share/npm-global/gemini-cli.tgz /usr/local/share/npm-global/gemini-core.tgz \
# install qwen-code and clean up
COPY packages/cli/dist/qwen-code-*.tgz /usr/local/share/npm-global/qwen-code.tgz
COPY packages/core/dist/qwen-code-qwen-code-core-*.tgz /usr/local/share/npm-global/qwen-code-core.tgz
RUN npm install -g /usr/local/share/npm-global/qwen-code.tgz /usr/local/share/npm-global/qwen-code-core.tgz \
&& npm cache clean --force \
&& rm -f /usr/local/share/npm-global/gemini-{cli,core}.tgz
&& rm -f /usr/local/share/npm-global/qwen-{code,code-core}.tgz
# default entrypoint when none specified
CMD ["gemini"]
CMD ["qwen"]

264
README.md
View File

@@ -1,11 +1,26 @@
# Qwen Code
<div align="center">
![Qwen Code Screenshot](./docs/assets/qwen-screenshot.png)
Qwen Code is a command-line AI workflow tool adapted from [**Gemini CLI**](https://github.com/google-gemini/gemini-cli) (Please refer to [this document](./README.gemini.md) for more details), optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder) models with enhanced parser support & tool support.
[![npm version](https://img.shields.io/npm/v/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
[![License](https://img.shields.io/github/license/QwenLM/qwen-code.svg)](./LICENSE)
[![Node.js Version](https://img.shields.io/badge/node-%3E%3D20.0.0-brightgreen.svg)](https://nodejs.org/)
[![Downloads](https://img.shields.io/npm/dm/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
**AI-powered command-line workflow tool for developers**
[Installation](#installation) • [Quick Start](#quick-start) • [Features](#key-features) • [Documentation](./docs/) • [Contributing](./CONTRIBUTING.md)
</div>
Qwen Code is a powerful command-line AI workflow tool adapted from [**Gemini CLI**](https://github.com/google-gemini/gemini-cli) ([details](./README.gemini.md)), specifically optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder) models. It enhances your development workflow with advanced code understanding, automated tasks, and intelligent assistance.
> [!WARNING]
> Qwen Code may issue multiple API calls per cycle, resulting in higher token usage, similar to Claude Code. Were actively working to enhance API efficiency and improve the overall developer experience.
> **Token Usage Notice**: Qwen Code may issue multiple API calls per cycle, resulting in higher token usage (similar to Claude Code). We're actively optimizing API efficiency.
>
> 💡 **Free Option**: ModelScope provides **2,000 free API calls per day** for users in mainland China. OpenRouter offers up to **1,000 free API calls per day** worldwide. For setup instructions, see [API Configuration](#api-configuration).
## Key Features
@@ -13,7 +28,7 @@ Qwen Code is a command-line AI workflow tool adapted from [**Gemini CLI**](https
- **Workflow Automation** - Automate operational tasks like handling pull requests and complex rebases
- **Enhanced Parser** - Adapted parser specifically optimized for Qwen-Coder models
## Quick Start
## Installation
### Prerequisites
@@ -23,20 +38,14 @@ Ensure you have [Node.js version 20](https://nodejs.org/en/download) or higher i
curl -qL https://www.npmjs.com/install.sh | sh
```
### Installation
### Install from npm
```bash
npm install -g @qwen-code/qwen-code
npm install -g @qwen-code/qwen-code@latest
qwen --version
```
Then run from anywhere:
```bash
qwen
```
Or you can install it from source:
### Install from source
```bash
git clone https://github.com/QwenLM/qwen-code.git
@@ -45,93 +54,234 @@ npm install
npm install -g .
```
### API Configuration
Set your Qwen API key (In Qwen Code project, you can also set your API key in `.env` file). the `.env` file should be placed in the root directory of your current project.
> ⚠️ **Notice:** <br>
> **If you are in mainland China, please go to https://bailian.console.aliyun.com/ to apply for your API key** <br>
> **If you are not in mainland China, please go to https://modelstudio.console.alibabacloud.com/ to apply for your API key**
## Quick Start
```bash
# If you are in mainland China, use the following URL:
# https://dashscope.aliyuncs.com/compatible-mode/v1
# If you are not in mainland China, use the following URL:
# https://dashscope-intl.aliyuncs.com/compatible-mode/v1
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="your_api_base_url_here"
export OPENAI_MODEL="your_api_model_here"
# Start Qwen Code
qwen
# Example commands
> Explain this codebase structure
> Help me refactor this function
> Generate unit tests for this module
```
### Session Management
Control your token usage with configurable session limits to optimize costs and performance.
#### Configure Session Token Limit
Create or edit `.qwen/settings.json` in your home directory:
```json
{
"sessionTokenLimit": 32000
}
```
#### Session Commands
- **`/compress`** - Compress conversation history to continue within token limits
- **`/clear`** - Clear all conversation history and start fresh
- **`/status`** - Check current token usage and limits
> 📝 **Note**: Session token limit applies to a single conversation, not cumulative API calls.
### API Configuration
Qwen Code supports multiple API providers. You can configure your API key through environment variables or a `.env` file in your project root.
#### Configuration Methods
1. **Environment Variables**
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="your_api_endpoint"
export OPENAI_MODEL="your_model_choice"
```
2. **Project `.env` File**
Create a `.env` file in your project root:
```env
OPENAI_API_KEY=your_api_key_here
OPENAI_BASE_URL=your_api_endpoint
OPENAI_MODEL=your_model_choice
```
#### API Provider Options
> ⚠️ **Regional Notice:**
>
> - **Mainland China**: Use Alibaba Cloud Bailian or ModelScope
> - **International**: Use Alibaba Cloud ModelStudio or OpenRouter
<details>
<summary><b>🇨🇳 For Users in Mainland China</b></summary>
**Option 1: Alibaba Cloud Bailian** ([Apply for API Key](https://bailian.console.aliyun.com/))
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
export OPENAI_MODEL="qwen3-coder-plus"
```
**Option 2: ModelScope (Free Tier)** ([Apply for API Key](https://modelscope.cn/docs/model-service/API-Inference/intro))
- ✅ **2,000 free API calls per day**
- ⚠️ Connect your Aliyun account to avoid authentication errors
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://api-inference.modelscope.cn/v1"
export OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
```
</details>
<details>
<summary><b>🌍 For International Users</b></summary>
**Option 1: Alibaba Cloud ModelStudio** ([Apply for API Key](https://modelstudio.console.alibabacloud.com/))
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
export OPENAI_MODEL="qwen3-coder-plus"
```
**Option 2: OpenRouter (Free Tier Available)** ([Apply for API Key](https://openrouter.ai/))
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://openrouter.ai/api/v1"
export OPENAI_MODEL="qwen/qwen3-coder:free"
```
</details>
## Usage Examples
### Explore Codebases
### 🔍 Explore Codebases
```sh
```bash
cd your-project/
qwen
# Architecture analysis
> Describe the main pieces of this system's architecture
> What are the key dependencies and how do they interact?
> Find all API endpoints and their authentication methods
```
### Code Development
### 💻 Code Development
```sh
```bash
# Refactoring
> Refactor this function to improve readability and performance
> Convert this class to use dependency injection
> Split this large module into smaller, focused components
# Code generation
> Create a REST API endpoint for user management
> Generate unit tests for the authentication module
> Add error handling to all database operations
```
### Automate Workflows
### 🔄 Automate Workflows
```sh
> Analyze git commits from the last 7 days, grouped by feature and team member
```
```bash
# Git automation
> Analyze git commits from the last 7 days, grouped by feature
> Create a changelog from recent commits
> Find all TODO comments and create GitHub issues
```sh
# File operations
> Convert all images in this directory to PNG format
> Rename all test files to follow the *.test.ts pattern
> Find and remove all console.log statements
```
### 🐛 Debugging & Analysis
```bash
# Performance analysis
> Identify performance bottlenecks in this React component
> Find all N+1 query problems in the codebase
# Security audit
> Check for potential SQL injection vulnerabilities
> Find all hardcoded credentials or API keys
```
## Popular Tasks
### Understand New Codebases
### 📚 Understand New Codebases
```text
> What are the core business logic components?
> What security mechanisms are in place?
> How does the data flow work?
> How does the data flow through the system?
> What are the main design patterns used?
> Generate a dependency graph for this module
```
### Code Refactoring & Optimization
### 🔨 Code Refactoring & Optimization
```text
> What parts of this module can be optimized?
> Help me refactor this class to follow better design patterns
> Help me refactor this class to follow SOLID principles
> Add proper error handling and logging
> Convert callbacks to async/await pattern
> Implement caching for expensive operations
```
### Documentation & Testing
### 📝 Documentation & Testing
```text
> Generate comprehensive JSDoc comments for this function
> Write unit tests for this component
> Create API documentation
> Generate comprehensive JSDoc comments for all public APIs
> Write unit tests with edge cases for this component
> Create API documentation in OpenAPI format
> Add inline comments explaining complex algorithms
> Generate a README for this module
```
### 🚀 Development Acceleration
```text
> Set up a new Express server with authentication
> Create a React component with TypeScript and tests
> Implement a rate limiter middleware
> Add database migrations for new schema
> Configure CI/CD pipeline for this project
```
## Commands & Shortcuts
### Session Commands
- `/help` - Display available commands
- `/clear` - Clear conversation history
- `/compress` - Compress history to save tokens
- `/status` - Show current session information
- `/exit` or `/quit` - Exit Qwen Code
### Keyboard Shortcuts
- `Ctrl+C` - Cancel current operation
- `Ctrl+D` - Exit (on empty line)
- `Up/Down` - Navigate command history
## Benchmark Results
### Terminal-Bench
### Terminal-Bench Performance
| Agent | Model | Accuracy |
| --------- | ------------------ | -------- |
| Qwen Code | Qwen3-Coder-480A35 | 37.5 |
## Project Structure
```
qwen-code/
├── packages/ # Core packages
├── docs/ # Documentation
├── examples/ # Example code
└── tests/ # Test files
```
| Qwen Code | Qwen3-Coder-480A35 | 37.5% |
| Qwen Code | Qwen3-Coder-30BA3B | 31.3% |
## Development & Contributing
@@ -148,3 +298,7 @@ This project is based on [Google Gemini CLI](https://github.com/google-gemini/ge
## License
[LICENSE](./LICENSE)
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=QwenLM/qwen-code&type=Date)](https://www.star-history.com/#QwenLM/qwen-code&Date)

View File

@@ -1,13 +1,13 @@
# CLI Commands
Gemini CLI supports several built-in commands to help you manage your session, customize the interface, and control its behavior. These commands are prefixed with a forward slash (`/`), an at symbol (`@`), or an exclamation mark (`!`).
Qwen Code supports several built-in commands to help you manage your session, customize the interface, and control its behavior. These commands are prefixed with a forward slash (`/`), an at symbol (`@`), or an exclamation mark (`!`).
## Slash commands (`/`)
Slash commands provide meta-level control over the CLI itself.
- **`/bug`**
- **Description:** File an issue about Gemini CLI. By default, the issue is filed within the GitHub repository for Gemini CLI. The string you enter after `/bug` will become the headline for the bug being filed. The default `/bug` behavior can be modified using the `bugCommand` setting in your `.qwen/settings.json` files.
- **Description:** File an issue about Qwen Code. By default, the issue is filed within the GitHub repository for Qwen Code. The string you enter after `/bug` will become the headline for the bug being filed. The default `/bug` behavior can be modified using the `bugCommand` setting in your `.qwen/settings.json` files.
- **`/chat`**
- **Description:** Save and resume conversation history for branching conversation state interactively, or resuming a previous state from a later session.
@@ -32,10 +32,10 @@ Slash commands provide meta-level control over the CLI itself.
- **Description:** Open a dialog for selecting supported editors.
- **`/extensions`**
- **Description:** Lists all active extensions in the current Gemini CLI session. See [Gemini CLI Extensions](../extension.md).
- **Description:** Lists all active extensions in the current Qwen Code session. See [Qwen Code Extensions](../extension.md).
- **`/help`** (or **`/?`**)
- **Description:** Display help information about the Gemini CLI, including available commands and their usage.
- **Description:** Display help information about the Qwen Code, including available commands and their usage.
- **`/mcp`**
- **Description:** List configured Model Context Protocol (MCP) servers, their connection status, server details, and available tools.
@@ -65,10 +65,10 @@ Slash commands provide meta-level control over the CLI itself.
- **Note:** Only available if the CLI is invoked with the `--checkpointing` option or configured via [settings](./configuration.md). See [Checkpointing documentation](../checkpointing.md) for more details.
- **`/stats`**
- **Description:** Display detailed statistics for the current Gemini CLI session, including token usage, cached token savings (when available), and session duration. Note: Cached token information is only displayed when cached tokens are being used, which occurs with API key authentication but not with OAuth authentication at this time.
- **Description:** Display detailed statistics for the current Qwen Code session, including token usage, cached token savings (when available), and session duration. Note: Cached token information is only displayed when cached tokens are being used, which occurs with API key authentication but not with OAuth authentication at this time.
- [**`/theme`**](./themes.md)
- **Description:** Open a dialog that lets you change the visual theme of Gemini CLI.
- **Description:** Open a dialog that lets you change the visual theme of Qwen Code.
- **`/auth`**
- **Description:** Open a dialog that lets you change the authentication method.
@@ -77,7 +77,7 @@ Slash commands provide meta-level control over the CLI itself.
- **Description:** Show version info. Please share this information when filing issues.
- [**`/tools`**](../tools/index.md)
- **Description:** Display a list of tools that are currently available within Gemini CLI.
- **Description:** Display a list of tools that are currently available within Qwen Code.
- **Sub-commands:**
- **`desc`** or **`descriptions`**:
- **Description:** Show detailed descriptions of each tool, including each tool's name with its full description as provided to the model.
@@ -88,7 +88,7 @@ Slash commands provide meta-level control over the CLI itself.
- **Description:** Display the Privacy Notice and allow users to select whether they consent to the collection of their data for service improvement purposes.
- **`/quit`** (or **`/exit`**)
- **Description:** Exit Gemini CLI.
- **Description:** Exit Qwen Code.
## At commands (`@`)
@@ -119,13 +119,13 @@ At commands are used to include the content of files or directories as part of y
## Shell mode & passthrough commands (`!`)
The `!` prefix lets you interact with your system's shell directly from within Gemini CLI.
The `!` prefix lets you interact with your system's shell directly from within Qwen Code.
- **`!<shell_command>`**
- **Description:** Execute the given `<shell_command>` in your system's default shell. Any output or errors from the command are displayed in the terminal.
- **Examples:**
- `!ls -la` (executes `ls -la` and returns to Gemini CLI)
- `!git status` (executes `git status` and returns to Gemini CLI)
- `!ls -la` (executes `ls -la` and returns to Qwen Code)
- `!git status` (executes `git status` and returns to Qwen Code)
- **`!` (Toggle shell mode)**
- **Description:** Typing `!` on its own toggles shell mode.
@@ -133,6 +133,6 @@ The `!` prefix lets you interact with your system's shell directly from within G
- When active, shell mode uses a different coloring and a "Shell Mode Indicator".
- While in shell mode, text you type is interpreted directly as a shell command.
- **Exiting shell mode:**
- When exited, the UI reverts to its standard appearance and normal Gemini CLI behavior resumes.
- When exited, the UI reverts to its standard appearance and normal Qwen Code behavior resumes.
- **Caution for all `!` usage:** Commands you execute in shell mode have the same permissions and impact as if you ran them directly in your terminal.

View File

@@ -215,6 +215,38 @@ In addition to a project settings file, a project's `.gemini` directory can cont
"enableOpenAILogging": true
```
- **`systemPromptMappings`** (array):
- **Description:** Configures custom system prompt templates for specific model names and base URLs. This allows you to use different system prompts for different AI models or API endpoints.
- **Default:** `undefined` (uses default system prompt)
- **Properties:**
- **`baseUrls`** (array of strings, optional): Array of base URLs to exactly match against `OPENAI_BASE_URL` environment variable. If not specified, matches any base URL.
- **`modelNames`** (array of strings, optional): Array of model names to exactly match against `OPENAI_MODEL` environment variable. If not specified, matches any model.
- **`template`** (string): The system prompt template to use when both baseUrl and modelNames match. Supports placeholders:
- `{RUNTIME_VARS_IS_GIT_REPO}`: Replaced with `true` or `false` based on whether the current directory is a git repository
- `{RUNTIME_VARS_SANDBOX}`: Replaced with the sandbox type (e.g., `"sandbox-exec"`, `"docker"`, or empty string)
- **Example:**
```json
"systemPromptMappings": [
{
"baseUrls": [
"https://dashscope.aliyuncs.com/compatible-mode/v1",
"https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
],
"modelNames": ["qwen3-coder-plus"],
"template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"is_git_repository\":{RUNTIME_VARS_IS_GIT_REPO},\"sandbox\":\"{RUNTIME_VARS_SANDBOX}\"}}"
},
{
"modelNames": ["gpt-4"],
"template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {RUNTIME_VARS_SANDBOX}"
},
{
"baseUrls": ["api.openai.com"],
"template": "You are an AI coding assistant. Working in git repository: {RUNTIME_VARS_IS_GIT_REPO}"
}
]
```
### Example `settings.json`:
```json
@@ -242,7 +274,22 @@ In addition to a project settings file, a project's `.gemini` directory can cont
"hideTips": false,
"hideBanner": false,
"maxSessionTurns": 10,
"enableOpenAILogging": true
"enableOpenAILogging": true,
"systemPromptMappings": [
{
"baseUrl": "dashscope",
"modelNames": ["qwen3"],
"template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"VARS_IS_GIT_REPO\":{VARS_IS_GIT_REPO},\"sandbox\":\"{sandbox}\"}}"
},
{
"modelNames": ["gpt-4"],
"template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {sandbox}"
},
{
"baseUrl": "api.openai.com",
"template": "You are an AI coding assistant. Working in git repository: {VARS_IS_GIT_REPO}"
}
]
}
```

20
package-lock.json generated
View File

@@ -1,18 +1,15 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"workspaces": [
"packages/*"
],
"dependencies": {
"@qwen-code/qwen-code": "^0.0.1-alpha.8"
},
"bin": {
"qwen": "bundle/gemini.js"
},
@@ -10582,6 +10579,12 @@
"tslib": "^2"
}
},
"node_modules/tiktoken": {
"version": "1.0.21",
"resolved": "https://registry.npmjs.org/tiktoken/-/tiktoken-1.0.21.tgz",
"integrity": "sha512-/kqtlepLMptX0OgbYD9aMYbM7EFrMZCL7EoHM8Psmg2FuhXoo/bH64KqOiZGGwa6oS9TPdSEDKBnV2LuB8+5vQ==",
"license": "MIT"
},
"node_modules/tinybench": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
@@ -11939,7 +11942,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"dependencies": {
"@qwen-code/qwen-code-core": "file:../core",
"@types/update-notifier": "^6.0.8",
@@ -12117,7 +12120,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"dependencies": {
"@google/genai": "1.8.0",
"@modelcontextprotocol/sdk": "^1.11.0",
@@ -12143,6 +12146,7 @@
"shell-quote": "^1.8.3",
"simple-git": "^3.28.0",
"strip-ansi": "^7.1.0",
"tiktoken": "^1.0.21",
"undici": "^7.10.0",
"ws": "^8.18.0"
},
@@ -12190,7 +12194,7 @@
},
"packages/vscode-ide-companion": {
"name": "@qwen-code/qwen-code-vscode-ide-companion",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",
"cors": "^2.8.5",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"engines": {
"node": ">=20"
},
@@ -10,10 +10,10 @@
],
"repository": {
"type": "git",
"url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.0.1-alpha.8"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.2"
},
"scripts": {
"start": "node scripts/start.js",
@@ -81,8 +81,5 @@
"typescript-eslint": "^8.30.1",
"vitest": "^3.2.4",
"yargs": "^18.0.0"
},
"dependencies": {
"@qwen-code/qwen-code": "^0.0.1-alpha.8"
}
}

View File

@@ -1,10 +1,10 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.1-alpha.8",
"description": "Gemini CLI",
"version": "0.0.2",
"description": "Qwen Code",
"repository": {
"type": "git",
"url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"type": "module",
"main": "dist/index.js",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.0.1-alpha.8"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.2"
},
"dependencies": {
"@qwen-code/qwen-code-core": "file:../core",

View File

@@ -6,7 +6,7 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as os from 'os';
import { loadCliConfig, parseArguments } from './config.js';
import { loadCliConfig, parseArguments, CliArgs } from './config.js';
import { Settings } from './settings.js';
import { Extension } from './extension.js';
import * as ServerConfig from '@qwen-code/qwen-code-core';
@@ -78,14 +78,15 @@ vi.mock('@qwen-code/qwen-code-core', async () => {
getTelemetryLogPromptsEnabled(): boolean {
return (
(this as unknown as { telemetrySettings?: { logPrompts?: boolean } })
.telemetrySettings?.logPrompts ?? true
.telemetrySettings?.logPrompts ?? false
);
}
getTelemetryOtlpEndpoint(): string {
return (
(this as unknown as { telemetrySettings?: { otlpEndpoint?: string } })
.telemetrySettings?.otlpEndpoint ?? 'http://localhost:4317'
.telemetrySettings?.otlpEndpoint ??
'http://tracing-analysis-dc-hz.aliyuncs.com:8090'
);
}
@@ -349,7 +350,9 @@ describe('loadCliConfig telemetry', () => {
const argv = await parseArguments();
const settings: Settings = { telemetry: { enabled: true } };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getTelemetryOtlpEndpoint()).toBe('http://localhost:4317');
expect(config.getTelemetryOtlpEndpoint()).toBe(
'http://tracing-analysis-dc-hz.aliyuncs.com:8090',
);
});
it('should use telemetry target from settings if CLI flag is not present', async () => {
@@ -408,12 +411,12 @@ describe('loadCliConfig telemetry', () => {
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
});
it('should use default log prompts (true) if no value is provided via CLI or settings', async () => {
it('should use default log prompts (false) if no value is provided via CLI or settings', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
const settings: Settings = { telemetry: { enabled: true } };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getTelemetryLogPromptsEnabled()).toBe(true);
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
});
it('should set enableOpenAILogging to true when --openai-logging flag is present', async () => {
@@ -1001,9 +1004,73 @@ describe('loadCliConfig ideMode', () => {
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getIdeMode()).toBe(true);
const mcpServers = config.getMcpServers();
expect(mcpServers['_ide_server']).toBeDefined();
expect(mcpServers['_ide_server'].httpUrl).toBe('http://localhost:3000/mcp');
expect(mcpServers['_ide_server'].description).toBe('IDE connection');
expect(mcpServers['_ide_server'].trust).toBe(false);
expect(mcpServers?.['_ide_server']).toBeDefined();
expect(mcpServers?.['_ide_server']?.httpUrl).toBe(
'http://localhost:3000/mcp',
);
expect(mcpServers?.['_ide_server']?.description).toBe('IDE connection');
expect(mcpServers?.['_ide_server']?.trust).toBe(false);
});
});
describe('loadCliConfig systemPromptMappings', () => {
it('should use default systemPromptMappings when not provided in settings', async () => {
const mockSettings: Settings = {
theme: 'dark',
};
const mockExtensions: Extension[] = [];
const mockSessionId = 'test-session';
const mockArgv: CliArgs = {
model: 'test-model',
} as CliArgs;
const config = await loadCliConfig(
mockSettings,
mockExtensions,
mockSessionId,
mockArgv,
);
expect(config.getSystemPromptMappings()).toEqual([
{
baseUrls: [
'https://dashscope.aliyuncs.com/compatible-mode/v1/',
'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/',
],
modelNames: ['qwen3-coder-plus'],
template:
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
},
]);
});
it('should use custom systemPromptMappings when provided in settings', async () => {
const customSystemPromptMappings = [
{
baseUrls: ['https://custom-api.com'],
modelNames: ['custom-model'],
template: 'Custom template',
},
];
const mockSettings: Settings = {
theme: 'dark',
systemPromptMappings: customSystemPromptMappings,
};
const mockExtensions: Extension[] = [];
const mockSessionId = 'test-session';
const mockArgv: CliArgs = {
model: 'test-model',
} as CliArgs;
const config = await loadCliConfig(
mockSettings,
mockExtensions,
mockSessionId,
mockArgv,
);
expect(config.getSystemPromptMappings()).toEqual(
customSystemPromptMappings,
);
});
});

View File

@@ -382,6 +382,8 @@ export async function loadCliConfig(
model: argv.model!,
extensionContextFilePaths,
maxSessionTurns: settings.maxSessionTurns ?? -1,
sessionTokenLimit: settings.sessionTokenLimit ?? 32000,
maxFolderItems: settings.maxFolderItems ?? 20,
listExtensions: argv.listExtensions || false,
activeExtensions: activeExtensions.map((e) => ({
name: e.config.name,
@@ -394,6 +396,17 @@ export async function loadCliConfig(
? settings.enableOpenAILogging
: argv.openaiLogging) ?? false,
sampling_params: settings.sampling_params,
systemPromptMappings: settings.systemPromptMappings ?? [
{
baseUrls: [
'https://dashscope.aliyuncs.com/compatible-mode/v1/',
'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/',
],
modelNames: ['qwen3-coder-plus'],
template:
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
},
],
});
}

View File

@@ -85,6 +85,12 @@ export interface Settings {
// Setting for setting maximum number of user/model/tool turns in a session.
maxSessionTurns?: number;
// Setting for maximum token limit for conversation history before blocking requests
sessionTokenLimit?: number;
// Setting for maximum number of files and folders to show in folder structure
maxFolderItems?: number;
// Sampling parameters for content generation
sampling_params?: {
top_p?: number;
@@ -96,6 +102,13 @@ export interface Settings {
max_tokens?: number;
};
// System prompt mappings for different base URLs and model names
systemPromptMappings?: Array<{
baseUrls?: string[];
modelNames?: string[];
template?: string;
}>;
// Add other settings here.
ideMode?: boolean;
}

View File

@@ -323,16 +323,34 @@ async function validateNonInterActiveAuth(
nonInteractiveConfig: Config,
) {
// making a special case for the cli. many headless environments might not have a settings.json set
// so if GEMINI_API_KEY is set, we'll use that. However since the oauth things are interactive anyway, we'll
// so if GEMINI_API_KEY or OPENAI_API_KEY is set, we'll use that. However since the oauth things are interactive anyway, we'll
// still expect that exists
if (!selectedAuthType && !process.env.GEMINI_API_KEY) {
if (
!selectedAuthType &&
!process.env.GEMINI_API_KEY &&
!process.env.OPENAI_API_KEY
) {
console.error(
`Please set an Auth method in your ${USER_SETTINGS_PATH} OR specify GEMINI_API_KEY env variable file before running`,
`Please set an Auth method in your ${USER_SETTINGS_PATH} OR specify GEMINI_API_KEY or OPENAI_API_KEY env variable before running`,
);
process.exit(1);
}
selectedAuthType = selectedAuthType || AuthType.USE_GEMINI;
// Determine auth type based on available environment variables
if (!selectedAuthType) {
if (process.env.OPENAI_API_KEY) {
selectedAuthType = AuthType.USE_OPENAI;
} else if (process.env.GEMINI_API_KEY) {
selectedAuthType = AuthType.USE_GEMINI;
}
}
// This should never happen due to the check above, but TypeScript needs assurance
if (!selectedAuthType) {
console.error('No valid authentication method found');
process.exit(1);
}
const err = validateAuthMethod(selectedAuthType);
if (err != null) {
console.error(err);

View File

@@ -60,7 +60,9 @@ export const createMockCommandContext = (
byName: {},
},
},
promptCount: 0,
} as SessionStatsState,
resetSession: vi.fn(),
},
};

View File

@@ -43,17 +43,22 @@ describe('clearCommand', () => {
expect(mockResetChat).toHaveBeenCalledTimes(1);
expect(mockContext.session.resetSession).toHaveBeenCalledTimes(1);
expect(mockContext.ui.clear).toHaveBeenCalledTimes(1);
// Check the order of operations.
const setDebugMessageOrder = (mockContext.ui.setDebugMessage as Mock).mock
.invocationCallOrder[0];
const resetChatOrder = mockResetChat.mock.invocationCallOrder[0];
const resetSessionOrder = (mockContext.session.resetSession as Mock).mock
.invocationCallOrder[0];
const clearOrder = (mockContext.ui.clear as Mock).mock
.invocationCallOrder[0];
expect(setDebugMessageOrder).toBeLessThan(resetChatOrder);
expect(resetChatOrder).toBeLessThan(clearOrder);
expect(resetChatOrder).toBeLessThan(resetSessionOrder);
expect(resetSessionOrder).toBeLessThan(clearOrder);
});
it('should not attempt to reset chat if config service is not available', async () => {
@@ -73,6 +78,7 @@ describe('clearCommand', () => {
'Clearing terminal and resetting chat.',
);
expect(mockResetChat).not.toHaveBeenCalled();
expect(nullConfigContext.session.resetSession).toHaveBeenCalledTimes(1);
expect(nullConfigContext.ui.clear).toHaveBeenCalledTimes(1);
});
});

View File

@@ -12,6 +12,7 @@ export const clearCommand: SlashCommand = {
action: async (context, _args) => {
context.ui.setDebugMessage('Clearing terminal and resetting chat.');
await context.services.config?.getGeminiClient()?.resetChat();
context.session.resetSession();
context.ui.clear();
},
};

View File

@@ -38,6 +38,7 @@ export interface CommandContext {
// Session-specific data
session: {
stats: SessionStatsState;
resetSession: () => void;
};
}

View File

@@ -36,7 +36,7 @@ export const AboutBox: React.FC<AboutBoxProps> = ({
>
<Box marginBottom={1}>
<Text bold color={Colors.AccentPurple}>
About Gemini CLI
About Qwen Code
</Text>
</Box>
<Box flexDirection="row">

View File

@@ -63,7 +63,7 @@ describe('<HistoryItemDisplay />', () => {
const { lastFrame } = render(
<HistoryItemDisplay {...baseItem} item={item} />,
);
expect(lastFrame()).toContain('About Gemini CLI');
expect(lastFrame()).toContain('About Qwen Code');
});
it('renders ModelStatsDisplay for "model_stats" type', () => {

View File

@@ -50,6 +50,7 @@ interface SessionStatsContextValue {
stats: SessionStatsState;
startNewPrompt: () => void;
getPromptCount: () => number;
resetSession: () => void;
}
// --- Context Definition ---
@@ -109,13 +110,23 @@ export const SessionStatsProvider: React.FC<{ children: React.ReactNode }> = ({
[stats.promptCount],
);
const resetSession = useCallback(() => {
setStats({
sessionStartTime: new Date(),
metrics: uiTelemetryService.getMetrics(),
lastPromptTokenCount: uiTelemetryService.getLastPromptTokenCount(),
promptCount: 0,
});
}, []);
const value = useMemo(
() => ({
stats,
startNewPrompt,
getPromptCount,
resetSession,
}),
[stats, startNewPrompt, getPromptCount],
[stats, startNewPrompt, getPromptCount, resetSession],
);
return (

View File

@@ -554,7 +554,7 @@ describe('useSlashCommandProcessor', () => {
* **Memory Usage:** ${memoryUsage}
`;
let url =
'https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml';
'https://github.com/QwenLM/Qwen-Code/issues/new?template=bug_report.yml';
if (description) {
url += `&title=${encodeURIComponent(description)}`;
}

View File

@@ -172,6 +172,7 @@ export const useSlashCommandProcessor = (
},
session: {
stats: session.stats,
resetSession: session.resetSession,
},
}),
[
@@ -183,6 +184,7 @@ export const useSlashCommandProcessor = (
clearItems,
refreshStatic,
session.stats,
session.resetSession,
onDebugMessage,
],
);
@@ -538,7 +540,7 @@ export const useSlashCommandProcessor = (
// Filter out MCP tools by checking if they have a serverName property
const geminiTools = tools.filter((tool) => !('serverName' in tool));
let message = 'Available Gemini CLI tools:\n\n';
let message = 'Available Qwen Code tools:\n\n';
if (geminiTools.length > 0) {
geminiTools.forEach((tool) => {
@@ -618,7 +620,7 @@ export const useSlashCommandProcessor = (
`;
let bugReportUrl =
'https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml&title={title}&info={info}';
'https://github.com/QwenLM/Qwen-Code/issues/new?template=bug_report.yml&title={title}&info={info}';
const bugCommand = config?.getBugCommand();
if (bugCommand?.urlTemplate) {
bugReportUrl = bugCommand.urlTemplate;

View File

@@ -452,6 +452,23 @@ export const useGeminiStream = (
[addItem, config],
);
const handleSessionTokenLimitExceededEvent = useCallback(
(value: { currentTokens: number; limit: number; message: string }) =>
addItem(
{
type: 'error',
text:
`🚫 Session token limit exceeded: ${value.currentTokens.toLocaleString()} tokens > ${value.limit.toLocaleString()} limit.\n\n` +
`💡 Solutions:\n` +
` • Start a new session: Use /clear command\n` +
` • Increase limit: Add "sessionTokenLimit": (e.g., 128000) to your settings.json\n` +
` • Compress history: Use /compress command to compress history`,
},
Date.now(),
),
[addItem],
);
const handleLoopDetectedEvent = useCallback(() => {
addItem(
{
@@ -501,6 +518,9 @@ export const useGeminiStream = (
case ServerGeminiEventType.MaxSessionTurns:
handleMaxSessionTurnsEvent();
break;
case ServerGeminiEventType.SessionTokenLimitExceeded:
handleSessionTokenLimitExceededEvent(event.value);
break;
case ServerGeminiEventType.LoopDetected:
// handle later because we want to move pending history to history
// before we add loop detected message to history
@@ -525,6 +545,7 @@ export const useGeminiStream = (
scheduleToolCalls,
handleChatCompressionEvent,
handleMaxSessionTurnsEvent,
handleSessionTokenLimitExceededEvent,
],
);

View File

@@ -31,9 +31,9 @@ function getContainerPath(hostPath: string): string {
return hostPath;
}
const LOCAL_DEV_SANDBOX_IMAGE_NAME = 'gemini-cli-sandbox';
const SANDBOX_NETWORK_NAME = 'gemini-cli-sandbox';
const SANDBOX_PROXY_NAME = 'gemini-cli-sandbox-proxy';
const LOCAL_DEV_SANDBOX_IMAGE_NAME = 'qwen-code-sandbox';
const SANDBOX_NETWORK_NAME = 'qwen-code-sandbox';
const SANDBOX_PROXY_NAME = 'qwen-code-sandbox-proxy';
const BUILTIN_SEATBELT_PROFILES = [
'permissive-open',
'permissive-closed',
@@ -172,8 +172,8 @@ function entrypoint(workdir: string): string[] {
? 'npm run debug --'
: 'npm rebuild && npm run start --'
: process.env.DEBUG
? `node --inspect-brk=0.0.0.0:${process.env.DEBUG_PORT || '9229'} $(which gemini)`
: 'gemini';
? `node --inspect-brk=0.0.0.0:${process.env.DEBUG_PORT || '9229'} $(which qwen)`
: 'qwen';
const args = [...shellCmds, cliCmd, ...cliArgs];
@@ -517,6 +517,17 @@ export async function start_sandbox(
args.push('--env', `GOOGLE_API_KEY=${process.env.GOOGLE_API_KEY}`);
}
// copy OPENAI_API_KEY and related env vars for Qwen
if (process.env.OPENAI_API_KEY) {
args.push('--env', `OPENAI_API_KEY=${process.env.OPENAI_API_KEY}`);
}
if (process.env.OPENAI_BASE_URL) {
args.push('--env', `OPENAI_BASE_URL=${process.env.OPENAI_BASE_URL}`);
}
if (process.env.OPENAI_MODEL) {
args.push('--env', `OPENAI_MODEL=${process.env.OPENAI_MODEL}`);
}
// copy GOOGLE_GENAI_USE_VERTEXAI
if (process.env.GOOGLE_GENAI_USE_VERTEXAI) {
args.push(

View File

@@ -1,12 +1,12 @@
{
"name": "@google/gemini-cli-core",
"version": "0.0.1-alpha.8",
"version": "0.0.3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@google/gemini-cli-core",
"version": "0.0.1-alpha.8",
"version": "0.0.3",
"dependencies": {
"@google/genai": "^1.4.0",
"@modelcontextprotocol/sdk": "^1.11.0",

View File

@@ -1,10 +1,10 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"description": "Qwen Code Core",
"repository": {
"type": "git",
"url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"type": "module",
"main": "dist/index.js",
@@ -44,6 +44,7 @@
"shell-quote": "^1.8.3",
"simple-git": "^3.28.0",
"strip-ansi": "^7.1.0",
"tiktoken": "^1.0.21",
"undici": "^7.10.0",
"ws": "^8.18.0"
},

View File

@@ -56,7 +56,7 @@ export interface HttpOptions {
headers?: Record<string, string>;
}
export const CODE_ASSIST_ENDPOINT = 'https://cloudcode-pa.googleapis.com';
export const CODE_ASSIST_ENDPOINT = 'https://localhost:0'; // Disable Google Code Assist API Request
export const CODE_ASSIST_API_VERSION = 'v1internal';
export class CodeAssistServer implements ContentGenerator {

View File

@@ -93,7 +93,7 @@ describe('Server Config (config.ts)', () => {
const QUESTION = 'test question';
const FULL_CONTEXT = false;
const USER_MEMORY = 'Test User Memory';
const TELEMETRY_SETTINGS = { enabled: false };
const TELEMETRY_SETTINGS = { enabled: true };
const EMBEDDING_MODEL = 'gemini-embedding';
const SESSION_ID = 'test-session-id';
const baseParams: ConfigParameters = {
@@ -234,11 +234,11 @@ describe('Server Config (config.ts)', () => {
expect(config.getTelemetryEnabled()).toBe(false);
});
it('Config constructor should default telemetry to default value if not provided', () => {
it('Config constructor should default telemetry to false if not provided', () => {
const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
delete paramsWithoutTelemetry.telemetry;
const config = new Config(paramsWithoutTelemetry);
expect(config.getTelemetryEnabled()).toBe(TELEMETRY_SETTINGS.enabled);
expect(config.getTelemetryEnabled()).toBe(false);
});
it('should have a getFileService method that returns FileDiscoveryService', () => {
@@ -285,20 +285,20 @@ describe('Server Config (config.ts)', () => {
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
});
it('should return default logPrompts setting (true) if not provided', () => {
it('should return default logPrompts setting (false) if not provided', () => {
const params: ConfigParameters = {
...baseParams,
telemetry: { enabled: true },
};
const config = new Config(params);
expect(config.getTelemetryLogPromptsEnabled()).toBe(true);
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
});
it('should return default logPrompts setting (true) if telemetry object is not provided', () => {
it('should return default logPrompts setting (false) if telemetry object is not provided', () => {
const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
delete paramsWithoutTelemetry.telemetry;
const config = new Config(paramsWithoutTelemetry);
expect(config.getTelemetryLogPromptsEnabled()).toBe(true);
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
});
it('should return default telemetry target if telemetry object is not provided', () => {

View File

@@ -37,13 +37,11 @@ import {
DEFAULT_TELEMETRY_TARGET,
DEFAULT_OTLP_ENDPOINT,
TelemetryTarget,
StartSessionEvent,
} from '../telemetry/index.js';
import {
DEFAULT_GEMINI_EMBEDDING_MODEL,
DEFAULT_GEMINI_FLASH_MODEL,
} from './models.js';
import { ClearcutLogger } from '../telemetry/clearcut-logger/clearcut-logger.js';
export enum ApprovalMode {
DEFAULT = 'default',
@@ -140,6 +138,8 @@ export interface ConfigParameters {
model: string;
extensionContextFilePaths?: string[];
maxSessionTurns?: number;
sessionTokenLimit?: number;
maxFolderItems?: number;
listExtensions?: boolean;
activeExtensions?: ActiveExtension[];
noBrowser?: boolean;
@@ -154,6 +154,11 @@ export interface ConfigParameters {
temperature?: number;
max_tokens?: number;
};
systemPromptMappings?: Array<{
baseUrls?: string[];
modelNames?: string[];
template?: string;
}>;
}
export class Config {
@@ -204,8 +209,15 @@ export class Config {
temperature?: number;
max_tokens?: number;
};
private readonly systemPromptMappings?: Array<{
baseUrls?: string[];
modelNames?: string[];
template?: string;
}>;
private modelSwitchedDuringSession: boolean = false;
private readonly maxSessionTurns: number;
private readonly sessionTokenLimit: number;
private readonly maxFolderItems: number;
private readonly listExtensions: boolean;
private readonly _activeExtensions: ActiveExtension[];
flashFallbackHandler?: FlashFallbackHandler;
@@ -235,7 +247,7 @@ export class Config {
enabled: params.telemetry?.enabled ?? false,
target: params.telemetry?.target ?? DEFAULT_TELEMETRY_TARGET,
otlpEndpoint: params.telemetry?.otlpEndpoint ?? DEFAULT_OTLP_ENDPOINT,
logPrompts: params.telemetry?.logPrompts ?? true,
logPrompts: params.telemetry?.logPrompts ?? false,
};
this.usageStatisticsEnabled = params.usageStatisticsEnabled ?? true;
@@ -252,12 +264,15 @@ export class Config {
this.model = params.model;
this.extensionContextFilePaths = params.extensionContextFilePaths ?? [];
this.maxSessionTurns = params.maxSessionTurns ?? -1;
this.sessionTokenLimit = params.sessionTokenLimit ?? 32000;
this.maxFolderItems = params.maxFolderItems ?? 20;
this.listExtensions = params.listExtensions ?? false;
this._activeExtensions = params.activeExtensions ?? [];
this.noBrowser = params.noBrowser ?? false;
this.ideMode = params.ideMode ?? false;
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
this.sampling_params = params.sampling_params;
this.systemPromptMappings = params.systemPromptMappings;
if (params.contextFileName) {
setGeminiMdFilename(params.contextFileName);
@@ -268,9 +283,10 @@ export class Config {
}
if (this.getUsageStatisticsEnabled()) {
ClearcutLogger.getInstance(this)?.logStartSessionEvent(
new StartSessionEvent(this),
);
// ClearcutLogger.getInstance(this)?.logStartSessionEvent(
// new StartSessionEvent(this),
// );
console.log('ClearcutLogger disabled - no data collection.');
} else {
console.log('Data collection is disabled.');
}
@@ -342,6 +358,14 @@ export class Config {
return this.maxSessionTurns;
}
getSessionTokenLimit(): number {
return this.sessionTokenLimit;
}
getMaxFolderItems(): number {
return this.maxFolderItems;
}
setQuotaErrorOccurred(value: boolean): void {
this.quotaErrorOccurred = value;
}
@@ -450,7 +474,7 @@ export class Config {
}
getTelemetryLogPromptsEnabled(): boolean {
return this.telemetrySettings.logPrompts ?? true;
return this.telemetrySettings.logPrompts ?? false;
}
getTelemetryOtlpEndpoint(): string {
@@ -540,6 +564,16 @@ export class Config {
return this.enableOpenAILogging;
}
getSystemPromptMappings():
| Array<{
baseUrls?: string[];
modelNames?: string[];
template?: string;
}>
| undefined {
return this.systemPromptMappings;
}
async refreshMemory(): Promise<{ memoryContent: string; fileCount: number }> {
const { memoryContent, fileCount } = await loadServerHierarchicalMemory(
this.getWorkingDir(),

View File

@@ -4,6 +4,6 @@
* SPDX-License-Identifier: Apache-2.0
*/
export const DEFAULT_GEMINI_MODEL = 'qwen3-coder-max';
export const DEFAULT_GEMINI_MODEL = 'qwen3-coder-plus';
export const DEFAULT_GEMINI_FLASH_MODEL = 'gemini-2.5-flash';
export const DEFAULT_GEMINI_EMBEDDING_MODEL = 'gemini-embedding-001';

File diff suppressed because it is too large Load Diff

View File

@@ -33,6 +33,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
// Reset mocks
vi.clearAllMocks();
// Mock environment variables
vi.stubEnv('OPENAI_BASE_URL', '');
// Mock config
mockConfig = {
getContentGeneratorConfig: vi.fn().mockReturnValue({
@@ -55,7 +58,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
vi.mocked(OpenAI).mockImplementation(() => mockOpenAIClient);
// Create generator instance
generator = new OpenAIContentGenerator('test-api-key', 'gpt-4', mockConfig);
generator = new OpenAIContentGenerator('test-key', 'gpt-4', mockConfig);
});
afterEach(() => {

View File

@@ -195,9 +195,12 @@ describe('Gemini Client (client.ts)', () => {
getWorkingDir: vi.fn().mockReturnValue('/test/dir'),
getFileService: vi.fn().mockReturnValue(fileService),
getMaxSessionTurns: vi.fn().mockReturnValue(0),
getSessionTokenLimit: vi.fn().mockReturnValue(32000),
getMaxFolderItems: vi.fn().mockReturnValue(20),
getQuotaErrorOccurred: vi.fn().mockReturnValue(false),
setQuotaErrorOccurred: vi.fn(),
getNoBrowser: vi.fn().mockReturnValue(false),
getSystemPromptMappings: vi.fn().mockReturnValue(undefined),
};
return mock as unknown as Config;
});

View File

@@ -167,6 +167,7 @@ export class GeminiClient {
const platform = process.platform;
const folderStructure = await getFolderStructure(cwd, {
fileService: this.config.getFileService(),
maxItems: this.config.getMaxFolderItems(),
});
const context = `
This is the Qwen Code. We are setting up the context for our chat.
@@ -238,7 +239,10 @@ export class GeminiClient {
];
try {
const userMemory = this.config.getUserMemory();
const systemInstruction = getCoreSystemPrompt(userMemory);
const systemPromptMappings = this.config.getSystemPromptMappings();
const systemInstruction = getCoreSystemPrompt(userMemory, {
systemPromptMappings,
});
const generateContentConfigWithThinking = isThinkingSupported(
this.config.getModel(),
)
@@ -303,6 +307,49 @@ export class GeminiClient {
if (compressed) {
yield { type: GeminiEventType.ChatCompressed, value: compressed };
}
// Check session token limit after compression using accurate token counting
const sessionTokenLimit = this.config.getSessionTokenLimit();
if (sessionTokenLimit > 0) {
// Get all the content that would be sent in an API call
const currentHistory = this.getChat().getHistory(true);
const userMemory = this.config.getUserMemory();
const systemPrompt = getCoreSystemPrompt(userMemory);
const environment = await this.getEnvironment();
// Create a mock request content to count total tokens
const mockRequestContent = [
{
role: 'system' as const,
parts: [{ text: systemPrompt }, ...environment],
},
...currentHistory,
];
// Use the improved countTokens method for accurate counting
const { totalTokens: totalRequestTokens } =
await this.getContentGenerator().countTokens({
model: this.config.getModel(),
contents: mockRequestContent,
});
if (
totalRequestTokens !== undefined &&
totalRequestTokens > sessionTokenLimit
) {
yield {
type: GeminiEventType.SessionTokenLimitExceeded,
value: {
currentTokens: totalRequestTokens,
limit: sessionTokenLimit,
message:
`Session token limit exceeded: ${totalRequestTokens} tokens > ${sessionTokenLimit} limit. ` +
'Please start a new session or increase the sessionTokenLimit in your settings.json.',
},
};
return new Turn(this.getChat(), prompt_id);
}
}
const turn = new Turn(this.getChat(), prompt_id);
const resultStream = turn.run(request, signal);
for await (const event of resultStream) {
@@ -354,7 +401,10 @@ export class GeminiClient {
model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL;
try {
const userMemory = this.config.getUserMemory();
const systemInstruction = getCoreSystemPrompt(userMemory);
const systemPromptMappings = this.config.getSystemPromptMappings();
const systemInstruction = getCoreSystemPrompt(userMemory, {
systemPromptMappings,
});
const requestConfig = {
abortSignal,
...this.generateContentConfig,
@@ -470,7 +520,10 @@ export class GeminiClient {
try {
const userMemory = this.config.getUserMemory();
const systemInstruction = getCoreSystemPrompt(userMemory);
const systemPromptMappings = this.config.getSystemPromptMappings();
const systemInstruction = getCoreSystemPrompt(userMemory, {
systemPromptMappings,
});
const requestConfig = {
abortSignal,

View File

@@ -116,7 +116,8 @@ export async function createContentGeneratorConfig(
if (authType === AuthType.USE_OPENAI && openaiApiKey) {
contentGeneratorConfig.apiKey = openaiApiKey;
contentGeneratorConfig.model = process.env.OPENAI_MODEL || '';
contentGeneratorConfig.model =
process.env.OPENAI_MODEL || DEFAULT_GEMINI_MODEL;
return contentGeneratorConfig;
}

View File

@@ -4,10 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import {
DEFAULT_GEMINI_MODEL,
DEFAULT_GEMINI_FLASH_MODEL,
} from '../config/models.js';
// 移除未使用的导入
/**
* Checks if the default "pro" model is rate-limited and returns a fallback "flash"
@@ -18,51 +15,9 @@ import {
* and the original model if a switch happened.
*/
export async function getEffectiveModel(
apiKey: string,
_apiKey: string,
currentConfiguredModel: string,
): Promise<string> {
if (currentConfiguredModel !== DEFAULT_GEMINI_MODEL) {
// Only check if the user is trying to use the specific pro model we want to fallback from.
return currentConfiguredModel;
}
const modelToTest = DEFAULT_GEMINI_MODEL;
const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL;
const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelToTest}:generateContent?key=${apiKey}`;
const body = JSON.stringify({
contents: [{ parts: [{ text: 'test' }] }],
generationConfig: {
maxOutputTokens: 1,
temperature: 0,
topK: 1,
thinkingConfig: { thinkingBudget: 128, includeThoughts: false },
},
});
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 2000); // 500ms timeout for the request
try {
const response = await fetch(endpoint, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body,
signal: controller.signal,
});
clearTimeout(timeoutId);
if (response.status === 429) {
console.log(
`[INFO] Your configured model (${modelToTest}) was temporarily unavailable. Switched to ${fallbackModel} for this session.`,
);
return fallbackModel;
}
// For any other case (success, other error codes), we stick to the original model.
return currentConfiguredModel;
} catch (_error) {
clearTimeout(timeoutId);
// On timeout or any other fetch error, stick to the original model.
return currentConfiguredModel;
}
// Disable Google API Model Check
return currentConfiguredModel;
}

View File

@@ -52,6 +52,9 @@ interface OpenAIUsage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
prompt_tokens_details?: {
cached_tokens?: number;
};
}
interface OpenAIChoice {
@@ -115,11 +118,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
timeoutConfig.maxRetries = contentGeneratorConfig.maxRetries;
}
// Check if using OpenRouter and add required headers
const isOpenRouter = baseURL.includes('openrouter.ai');
const defaultHeaders = isOpenRouter
? {
'HTTP-Referer': 'https://github.com/QwenLM/qwen-code.git',
'X-Title': 'Qwen Code',
}
: undefined;
this.client = new OpenAI({
apiKey,
baseURL,
timeout: timeoutConfig.timeout,
maxRetries: timeoutConfig.maxRetries,
defaultHeaders,
});
}
@@ -515,6 +528,8 @@ export class OpenAIContentGenerator implements ContentGenerator {
return new GenerateContentResponse();
}
const lastResponse = responses[responses.length - 1];
// Find the last response with usage metadata
const finalUsageMetadata = responses
.slice()
@@ -561,6 +576,8 @@ export class OpenAIContentGenerator implements ContentGenerator {
safetyRatings: [],
},
];
combinedResponse.responseId = lastResponse?.responseId;
combinedResponse.createTime = lastResponse?.createTime;
combinedResponse.modelVersion = this.model;
combinedResponse.promptFeedback = { safetyRatings: [] };
combinedResponse.usageMetadata = finalUsageMetadata;
@@ -571,14 +588,26 @@ export class OpenAIContentGenerator implements ContentGenerator {
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
// OpenAI doesn't have a direct token counting endpoint
// We'll estimate based on the tiktoken library or a rough calculation
// For now, return a rough estimate
// Use tiktoken for accurate token counting
const content = JSON.stringify(request.contents);
const estimatedTokens = Math.ceil(content.length / 4); // Rough estimate: 1 token ≈ 4 characters
let totalTokens = 0;
try {
const { get_encoding } = await import('tiktoken');
const encoding = get_encoding('cl100k_base'); // GPT-4 encoding, but estimate for qwen
totalTokens = encoding.encode(content).length;
encoding.free();
} catch (error) {
console.warn(
'Failed to load tiktoken, falling back to character approximation:',
error,
);
// Fallback: rough approximation using character count
totalTokens = Math.ceil(content.length / 4); // Rough estimate: 1 token ≈ 4 characters
}
return {
totalTokens: estimatedTokens,
totalTokens,
};
}
@@ -1128,6 +1157,9 @@ export class OpenAIContentGenerator implements ContentGenerator {
}
}
response.responseId = openaiResponse.id;
response.createTime = openaiResponse.created.toString();
response.candidates = [
{
content: {
@@ -1145,15 +1177,12 @@ export class OpenAIContentGenerator implements ContentGenerator {
// Add usage metadata if available
if (openaiResponse.usage) {
const usage = openaiResponse.usage as {
prompt_tokens?: number;
completion_tokens?: number;
total_tokens?: number;
};
const usage = openaiResponse.usage as OpenAIUsage;
const promptTokens = usage.prompt_tokens || 0;
const completionTokens = usage.completion_tokens || 0;
const totalTokens = usage.total_tokens || 0;
const cachedTokens = usage.prompt_tokens_details?.cached_tokens || 0;
// If we only have total tokens but no breakdown, estimate the split
// Typically input is ~70% and output is ~30% for most conversations
@@ -1170,6 +1199,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
promptTokenCount: finalPromptTokens,
candidatesTokenCount: finalCompletionTokens,
totalTokenCount: totalTokens,
cachedContentTokenCount: cachedTokens,
};
}
@@ -1263,20 +1293,20 @@ export class OpenAIContentGenerator implements ContentGenerator {
response.candidates = [];
}
response.responseId = chunk.id;
response.createTime = chunk.created.toString();
response.modelVersion = this.model;
response.promptFeedback = { safetyRatings: [] };
// Add usage metadata if available in the chunk
if (chunk.usage) {
const usage = chunk.usage as {
prompt_tokens?: number;
completion_tokens?: number;
total_tokens?: number;
};
const usage = chunk.usage as OpenAIUsage;
const promptTokens = usage.prompt_tokens || 0;
const completionTokens = usage.completion_tokens || 0;
const totalTokens = usage.total_tokens || 0;
const cachedTokens = usage.prompt_tokens_details?.cached_tokens || 0;
// If we only have total tokens but no breakdown, estimate the split
// Typically input is ~70% and output is ~30% for most conversations
@@ -1293,6 +1323,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
promptTokenCount: finalPromptTokens,
candidatesTokenCount: finalCompletionTokens,
totalTokenCount: totalTokens,
cachedContentTokenCount: cachedTokens,
};
}
@@ -1727,9 +1758,11 @@ export class OpenAIContentGenerator implements ContentGenerator {
}
const openaiResponse: OpenAIResponseFormat = {
id: `chatcmpl-${Date.now()}`,
id: response.responseId || `chatcmpl-${Date.now()}`,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
created: response.createTime
? Number(response.createTime)
: Math.floor(Date.now() / 1000),
model: this.model,
choices: [choice],
};
@@ -1741,6 +1774,12 @@ export class OpenAIContentGenerator implements ContentGenerator {
completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
total_tokens: response.usageMetadata.totalTokenCount || 0,
};
if (response.usageMetadata.cachedContentTokenCount) {
openaiResponse.usage.prompt_tokens_details = {
cached_tokens: response.usageMetadata.cachedContentTokenCount,
};
}
}
return openaiResponse;

View File

@@ -32,7 +32,7 @@ describe('Core System Prompt (prompts.ts)', () => {
vi.stubEnv('SANDBOX', undefined);
const prompt = getCoreSystemPrompt();
expect(prompt).not.toContain('---\n\n'); // Separator should not be present
expect(prompt).toContain('You are an interactive CLI agent'); // Check for core content
expect(prompt).toContain('You are Qwen Code, an interactive CLI agent'); // Check for core content
expect(prompt).toMatchSnapshot(); // Use snapshot for base prompt structure
});
@@ -40,7 +40,7 @@ describe('Core System Prompt (prompts.ts)', () => {
vi.stubEnv('SANDBOX', undefined);
const prompt = getCoreSystemPrompt('');
expect(prompt).not.toContain('---\n\n');
expect(prompt).toContain('You are an interactive CLI agent');
expect(prompt).toContain('You are Qwen Code, an interactive CLI agent');
expect(prompt).toMatchSnapshot();
});
@@ -48,7 +48,7 @@ describe('Core System Prompt (prompts.ts)', () => {
vi.stubEnv('SANDBOX', undefined);
const prompt = getCoreSystemPrompt(' \n \t ');
expect(prompt).not.toContain('---\n\n');
expect(prompt).toContain('You are an interactive CLI agent');
expect(prompt).toContain('You are Qwen Code, an interactive CLI agent');
expect(prompt).toMatchSnapshot();
});
@@ -59,7 +59,7 @@ describe('Core System Prompt (prompts.ts)', () => {
const prompt = getCoreSystemPrompt(memory);
expect(prompt.endsWith(expectedSuffix)).toBe(true);
expect(prompt).toContain('You are an interactive CLI agent'); // Ensure base prompt follows
expect(prompt).toContain('You are Qwen Code, an interactive CLI agent'); // Ensure base prompt follows
expect(prompt).toMatchSnapshot(); // Snapshot the combined prompt
});
@@ -106,3 +106,96 @@ describe('Core System Prompt (prompts.ts)', () => {
expect(prompt).toMatchSnapshot();
});
});
describe('URL matching with trailing slash compatibility', () => {
it('should match URLs with and without trailing slash', () => {
const config = {
systemPromptMappings: [
{
baseUrls: ['https://api.example.com'],
modelNames: ['gpt-4'],
template: 'Custom template for example.com',
},
{
baseUrls: ['https://api.openai.com/'],
modelNames: ['gpt-3.5-turbo'],
template: 'Custom template for openai.com',
},
],
};
// Simulate environment variables
const originalEnv = process.env;
// Test case 1: No trailing slash in config, actual URL has trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.example.com/',
OPENAI_MODEL: 'gpt-4',
};
const result1 = getCoreSystemPrompt(undefined, config);
expect(result1).toContain('Custom template for example.com');
// Test case 2: Config has trailing slash, actual URL has no trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.openai.com',
OPENAI_MODEL: 'gpt-3.5-turbo',
};
const result2 = getCoreSystemPrompt(undefined, config);
expect(result2).toContain('Custom template for openai.com');
// Test case 3: No trailing slash in config, actual URL has no trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.example.com',
OPENAI_MODEL: 'gpt-4',
};
const result3 = getCoreSystemPrompt(undefined, config);
expect(result3).toContain('Custom template for example.com');
// Test case 4: Config has trailing slash, actual URL has trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.openai.com/',
OPENAI_MODEL: 'gpt-3.5-turbo',
};
const result4 = getCoreSystemPrompt(undefined, config);
expect(result4).toContain('Custom template for openai.com');
// Restore original environment variables
process.env = originalEnv;
});
it('should not match when URLs are different', () => {
const config = {
systemPromptMappings: [
{
baseUrls: ['https://api.example.com'],
modelNames: ['gpt-4'],
template: 'Custom template for example.com',
},
],
};
const originalEnv = process.env;
// Test case: URLs do not match
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.different.com',
OPENAI_MODEL: 'gpt-4',
};
const result = getCoreSystemPrompt(undefined, config);
// Should return default template, not contain custom template
expect(result).not.toContain('Custom template for example.com');
// Restore original environment variables
process.env = originalEnv;
});
});

View File

@@ -6,7 +6,6 @@
import path from 'node:path';
import fs from 'node:fs';
import { LSTool } from '../tools/ls.js';
import { EditTool } from '../tools/edit.js';
import { GlobTool } from '../tools/glob.js';
import { GrepTool } from '../tools/grep.js';
@@ -17,8 +16,37 @@ import { WriteFileTool } from '../tools/write-file.js';
import process from 'node:process';
import { isGitRepository } from '../utils/gitUtils.js';
import { MemoryTool, GEMINI_CONFIG_DIR } from '../tools/memoryTool.js';
import { DEFAULT_GEMINI_MODEL } from '../config/models.js';
export function getCoreSystemPrompt(userMemory?: string): string {
export interface ModelTemplateMapping {
baseUrls?: string[];
modelNames?: string[];
template?: string;
}
export interface SystemPromptConfig {
systemPromptMappings?: ModelTemplateMapping[];
}
/**
* Normalizes a URL by removing trailing slash for consistent comparison
*/
function normalizeUrl(url: string): string {
return url.endsWith('/') ? url.slice(0, -1) : url;
}
/**
* Checks if a URL matches any URL in the array, ignoring trailing slashes
*/
function urlMatches(urlArray: string[], targetUrl: string): boolean {
const normalizedTarget = normalizeUrl(targetUrl);
return urlArray.some((url) => normalizeUrl(url) === normalizedTarget);
}
export function getCoreSystemPrompt(
userMemory?: string,
config?: SystemPromptConfig,
): string {
// if GEMINI_SYSTEM_MD is set (and not 0|false), override system prompt from file
// default path is .qwen/system.md but can be modified via custom path in GEMINI_SYSTEM_MD
let systemMdEnabled = false;
@@ -34,10 +62,56 @@ export function getCoreSystemPrompt(userMemory?: string): string {
throw new Error(`missing system prompt file '${systemMdPath}'`);
}
}
// Check for system prompt mappings from global config
if (config?.systemPromptMappings) {
const currentModel = process.env.OPENAI_MODEL || DEFAULT_GEMINI_MODEL;
const currentBaseUrl = process.env.OPENAI_BASE_URL || '';
const matchedMapping = config.systemPromptMappings.find((mapping) => {
const { baseUrls, modelNames } = mapping;
// Check if baseUrl matches (when specified)
if (
baseUrls &&
modelNames &&
urlMatches(baseUrls, currentBaseUrl) &&
modelNames.includes(currentModel)
) {
return true;
}
if (baseUrls && urlMatches(baseUrls, currentBaseUrl) && !modelNames) {
return true;
}
if (modelNames && modelNames.includes(currentModel) && !baseUrls) {
return true;
}
return false;
});
if (matchedMapping?.template) {
const isGitRepo = isGitRepository(process.cwd());
// Replace placeholders in template
let template = matchedMapping.template;
template = template.replace(
'{RUNTIME_VARS_IS_GIT_REPO}',
String(isGitRepo),
);
template = template.replace(
'{RUNTIME_VARS_SANDBOX}',
process.env.SANDBOX || '',
);
return template;
}
}
const basePrompt = systemMdEnabled
? fs.readFileSync(systemMdPath, 'utf8')
: `
You are an interactive CLI agent specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
You are Qwen Code, an interactive CLI agent developed by Alibaba Group, specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
# Core Mandates
@@ -167,24 +241,56 @@ model: true
<example>
user: list files here.
model: [tool_call: ${LSTool.Name} for path '/path/to/project']
model:
<tool_call>
<function=list_directory>
<parameter=path>
.
</parameter>
</function>
</tool_call>
</example>
<example>
user: start the server implemented in server.js
model: [tool_call: ${ShellTool.Name} for 'node server.js &' because it must run in the background]
model:
<tool_call>
<function=run_shell_command>
<parameter=command>
node server.js &
</parameter>
</function>
</tool_call>
</example>
<example>
user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib.
model: Okay, I can refactor 'src/auth.py'.
First, I'll analyze the code and check for a test safety net before planning any changes.
[tool_call: ${GlobTool.Name} for path 'tests/test_auth.py']
[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/tests/test_auth.py']
<tool_call>
<function=glob>
<parameter=path>
tests/test_auth.py
</parameter>
</function>
</tool_call>
<tool_call>
<function=read_file>
<parameter=path>
/path/to/tests/test_auth.py
</parameter>
</function>
</tool_call>
(After analysis)
Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring.
I'll also confirm 'requests' is a dependency.
[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/requirements.txt']
<tool_call>
<function=read_file>
<parameter=path>
/path/to/requirements.txt
</parameter>
</function>
</tool_call>
(After analysis)
Looks good, 'requests' is available.
@@ -194,20 +300,30 @@ Here's the plan:
3. Remove the old 'urllib' import.
4. Run the project's linter and tests to verify the changes.
Should I proceed?
user: Yes
model:
[tool_call: ${WriteFileTool.Name} or ${EditTool.Name} to apply the refactoring to 'src/auth.py']
<tool_call>
<function=replace>
<parameter=path>
src/auth.py
</parameter>
<parameter=old_content>
(old code content)
</parameter>
<parameter=new_content>
(new code content)
</parameter>
</function>
</tool_call>
Refactoring complete. Running verification...
[tool_call: ${ShellTool.Name} for 'ruff check src/auth.py && pytest']
<tool_call>
<function=run_shell_command
<parameter=command>
ruff check src/auth.py && pytest
</parameter>
</function>
</tool_call>
(After verification passes)
All checks passed. This is a stable checkpoint.
${(function () {
if (isGitRepository(process.cwd())) {
return `Would you like me to write a commit message and commit these changes?`;
}
return '';
})()}
Would you like me to write a commit message and commit these changes?
</example>
<example>
@@ -218,32 +334,48 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete
<example>
user: Write tests for someFile.ts
model:
Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality.
[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/someFile.ts' or use ${GlobTool.Name} to find \`someFile.ts\` if its location is unknown]
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
<tool_call>
<function=read_file>
<parameter=path>
/path/to/someFile.ts
</parameter>
</function>
</tool_call>
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
[tool_call: ${ReadManyFilesTool.Name} for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory]
<tool_call>
<function>read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory]
</tool_call>
(After reviewing existing tests and the file content)
[tool_call: ${WriteFileTool.Name} to create /path/to/someFile.test.ts with the test code]
<tool_call>
<function=write_file>
<parameter=path>
/path/to/someFile.test.ts
</parameter>
</function>
</tool_call>
I've written the tests. Now I'll run the project's test command to verify them.
[tool_call: ${ShellTool.Name} for 'npm run test']
</example>
<example>
user: How do I update the user's profile information in this system?
model:
I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints.
[tool_call: ${GrepTool.Name} for pattern 'UserProfile|updateProfile|editUser']
(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found)
Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled.
[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/UserProfileService.java']
(After reading the file)
It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object...
<tool_call>
<function=run_shell_command>
<parameter=command>
npm run test
</parameter>
</function>
</tool_call>
(After verification passes)
All checks passed. This is a stable checkpoint.
</example>
<example>
user: Where are all the 'app.config' files in this project? I need to check their settings.
model:
[tool_call: ${GlobTool.Name} for pattern '**/app.config']
<tool_call>
<function=glob>
<parameter=pattern>
./**/app.config
</parameter>
</function>
</tool_call>
(Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config'])
I found the following 'app.config' files:
- /path/to/moduleA/app.config
@@ -256,6 +388,7 @@ Your core function is efficient and safe assistance. Balance extreme conciseness
`.trim();
// if GEMINI_WRITE_SYSTEM_MD is set (and not 0|false), write base system prompt to file
const writeSystemMdVar = process.env.GEMINI_WRITE_SYSTEM_MD?.toLowerCase();
if (writeSystemMdVar && !['0', 'false'].includes(writeSystemMdVar)) {
if (['1', 'true'].includes(writeSystemMdVar)) {

View File

@@ -49,6 +49,7 @@ export enum GeminiEventType {
ChatCompressed = 'chat_compressed',
Thought = 'thought',
MaxSessionTurns = 'max_session_turns',
SessionTokenLimitExceeded = 'session_token_limit_exceeded',
LoopDetected = 'loop_detected',
}
@@ -61,6 +62,12 @@ export interface GeminiErrorEventValue {
error: StructuredError;
}
export interface SessionTokenLimitExceededValue {
currentTokens: number;
limit: number;
message: string;
}
export interface ToolCallRequestInfo {
callId: string;
name: string;
@@ -134,6 +141,11 @@ export type ServerGeminiMaxSessionTurnsEvent = {
type: GeminiEventType.MaxSessionTurns;
};
export type ServerGeminiSessionTokenLimitExceededEvent = {
type: GeminiEventType.SessionTokenLimitExceeded;
value: SessionTokenLimitExceededValue;
};
export type ServerGeminiLoopDetectedEvent = {
type: GeminiEventType.LoopDetected;
};
@@ -149,6 +161,7 @@ export type ServerGeminiStreamEvent =
| ServerGeminiChatCompressedEvent
| ServerGeminiThoughtEvent
| ServerGeminiMaxSessionTurnsEvent
| ServerGeminiSessionTokenLimitExceededEvent
| ServerGeminiLoopDetectedEvent;
// A turn manages the agentic loop turn within the server context.

View File

@@ -4,20 +4,20 @@
* SPDX-License-Identifier: Apache-2.0
*/
export const SERVICE_NAME = 'gemini-cli';
export const SERVICE_NAME = 'qwen-code';
export const EVENT_USER_PROMPT = 'gemini_cli.user_prompt';
export const EVENT_TOOL_CALL = 'gemini_cli.tool_call';
export const EVENT_API_REQUEST = 'gemini_cli.api_request';
export const EVENT_API_ERROR = 'gemini_cli.api_error';
export const EVENT_API_RESPONSE = 'gemini_cli.api_response';
export const EVENT_CLI_CONFIG = 'gemini_cli.config';
export const EVENT_FLASH_FALLBACK = 'gemini_cli.flash_fallback';
export const EVENT_USER_PROMPT = 'qwen-code.user_prompt';
export const EVENT_TOOL_CALL = 'qwen-code.tool_call';
export const EVENT_API_REQUEST = 'qwen-code.api_request';
export const EVENT_API_ERROR = 'qwen-code.api_error';
export const EVENT_API_RESPONSE = 'qwen-code.api_response';
export const EVENT_CLI_CONFIG = 'qwen-code.config';
export const EVENT_FLASH_FALLBACK = 'qwen-code.flash_fallback';
export const METRIC_TOOL_CALL_COUNT = 'gemini_cli.tool.call.count';
export const METRIC_TOOL_CALL_LATENCY = 'gemini_cli.tool.call.latency';
export const METRIC_API_REQUEST_COUNT = 'gemini_cli.api.request.count';
export const METRIC_API_REQUEST_LATENCY = 'gemini_cli.api.request.latency';
export const METRIC_TOKEN_USAGE = 'gemini_cli.token.usage';
export const METRIC_SESSION_COUNT = 'gemini_cli.session.count';
export const METRIC_FILE_OPERATION_COUNT = 'gemini_cli.file.operation.count';
export const METRIC_TOOL_CALL_COUNT = 'qwen-code.tool.call.count';
export const METRIC_TOOL_CALL_LATENCY = 'qwen-code.tool.call.latency';
export const METRIC_API_REQUEST_COUNT = 'qwen-code.api.request.count';
export const METRIC_API_REQUEST_LATENCY = 'qwen-code.api.request.latency';
export const METRIC_TOKEN_USAGE = 'qwen-code.token.usage';
export const METRIC_SESSION_COUNT = 'qwen-code.session.count';
export const METRIC_FILE_OPERATION_COUNT = 'qwen-code.file.operation.count';

View File

@@ -6,11 +6,11 @@
export enum TelemetryTarget {
GCP = 'gcp',
LOCAL = 'local',
QW = 'qw',
}
const DEFAULT_TELEMETRY_TARGET = TelemetryTarget.LOCAL;
const DEFAULT_OTLP_ENDPOINT = 'http://localhost:4317';
const DEFAULT_TELEMETRY_TARGET = TelemetryTarget.QW;
const DEFAULT_OTLP_ENDPOINT = 'http://tracing-analysis-dc-hz.aliyuncs.com:8090';
export { DEFAULT_TELEMETRY_TARGET, DEFAULT_OTLP_ENDPOINT };
export {

View File

@@ -5,6 +5,7 @@
*/
import { logs, LogRecord, LogAttributes } from '@opentelemetry/api-logs';
import { trace, context } from '@opentelemetry/api';
import { SemanticAttributes } from '@opentelemetry/semantic-conventions';
import { Config } from '../config/config.js';
import {
@@ -35,7 +36,7 @@ import {
} from './metrics.js';
import { isTelemetrySdkInitialized } from './sdk.js';
import { uiTelemetryService, UiEvent } from './uiTelemetry.js';
import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js';
// import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js';
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
const shouldLogUserPrompts = (config: Config): boolean =>
@@ -47,11 +48,32 @@ function getCommonAttributes(config: Config): LogAttributes {
};
}
// Helper function to create spans and emit logs within span context
function logWithSpan(
spanName: string,
logBody: string,
attributes: LogAttributes,
): void {
const tracer = trace.getTracer(SERVICE_NAME);
const span = tracer.startSpan(spanName);
context.with(trace.setSpan(context.active(), span), () => {
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: logBody,
attributes,
};
logger.emit(logRecord);
});
span.end();
}
export function logCliConfiguration(
config: Config,
event: StartSessionEvent,
): void {
ClearcutLogger.getInstance(config)?.logStartSessionEvent(event);
// ClearcutLogger.getInstance(config)?.logStartSessionEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
@@ -71,16 +93,11 @@ export function logCliConfiguration(
mcp_servers: event.mcp_servers,
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: 'CLI configuration loaded.',
attributes,
};
logger.emit(logRecord);
logWithSpan('cli.configuration', 'CLI configuration loaded.', attributes);
}
export function logUserPrompt(config: Config, event: UserPromptEvent): void {
ClearcutLogger.getInstance(config)?.logNewPromptEvent(event);
// ClearcutLogger.getInstance(config)?.logNewPromptEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
@@ -94,12 +111,11 @@ export function logUserPrompt(config: Config, event: UserPromptEvent): void {
attributes.prompt = event.prompt;
}
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `User prompt. Length: ${event.prompt_length}.`,
logWithSpan(
'user.prompt',
`User prompt. Length: ${event.prompt_length}.`,
attributes,
};
logger.emit(logRecord);
);
}
export function logToolCall(config: Config, event: ToolCallEvent): void {
@@ -109,7 +125,7 @@ export function logToolCall(config: Config, event: ToolCallEvent): void {
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
ClearcutLogger.getInstance(config)?.logToolCallEvent(event);
// ClearcutLogger.getInstance(config)?.logToolCallEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
@@ -126,12 +142,11 @@ export function logToolCall(config: Config, event: ToolCallEvent): void {
}
}
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `Tool call: ${event.function_name}${event.decision ? `. Decision: ${event.decision}` : ''}. Success: ${event.success}. Duration: ${event.duration_ms}ms.`,
logWithSpan(
`tool.${event.function_name}`,
`Tool call: ${event.function_name}${event.decision ? `. Decision: ${event.decision}` : ''}. Success: ${event.success}. Duration: ${event.duration_ms}ms.`,
attributes,
};
logger.emit(logRecord);
);
recordToolCallMetrics(
config,
event.function_name,
@@ -142,7 +157,7 @@ export function logToolCall(config: Config, event: ToolCallEvent): void {
}
export function logApiRequest(config: Config, event: ApiRequestEvent): void {
ClearcutLogger.getInstance(config)?.logApiRequestEvent(event);
// ClearcutLogger.getInstance(config)?.logApiRequestEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
@@ -152,19 +167,18 @@ export function logApiRequest(config: Config, event: ApiRequestEvent): void {
'event.timestamp': new Date().toISOString(),
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `API request to ${event.model}.`,
logWithSpan(
`api.request.${event.model}`,
`API request to ${event.model}.`,
attributes,
};
logger.emit(logRecord);
);
}
export function logFlashFallback(
config: Config,
event: FlashFallbackEvent,
): void {
ClearcutLogger.getInstance(config)?.logFlashFallbackEvent(event);
// ClearcutLogger.getInstance(config)?.logFlashFallbackEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
@@ -174,12 +188,11 @@ export function logFlashFallback(
'event.timestamp': new Date().toISOString(),
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `Switching to flash as Fallback.`,
logWithSpan(
'api.flash_fallback',
'Switching to flash as Fallback.',
attributes,
};
logger.emit(logRecord);
);
}
export function logApiError(config: Config, event: ApiErrorEvent): void {
@@ -189,7 +202,7 @@ export function logApiError(config: Config, event: ApiErrorEvent): void {
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
ClearcutLogger.getInstance(config)?.logApiErrorEvent(event);
// ClearcutLogger.getInstance(config)?.logApiErrorEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
@@ -209,12 +222,11 @@ export function logApiError(config: Config, event: ApiErrorEvent): void {
attributes[SemanticAttributes.HTTP_STATUS_CODE] = event.status_code;
}
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `API error for ${event.model}. Error: ${event.error}. Duration: ${event.duration_ms}ms.`,
logWithSpan(
`api.error.${event.model}`,
`API error for ${event.model}. Error: ${event.error}. Duration: ${event.duration_ms}ms.`,
attributes,
};
logger.emit(logRecord);
);
recordApiErrorMetrics(
config,
event.model,
@@ -231,7 +243,7 @@ export function logApiResponse(config: Config, event: ApiResponseEvent): void {
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
ClearcutLogger.getInstance(config)?.logApiResponseEvent(event);
// ClearcutLogger.getInstance(config)?.logApiResponseEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
...getCommonAttributes(config),
@@ -250,12 +262,11 @@ export function logApiResponse(config: Config, event: ApiResponseEvent): void {
}
}
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `API response from ${event.model}. Status: ${event.status_code || 'N/A'}. Duration: ${event.duration_ms}ms.`,
logWithSpan(
`api.response.${event.model}`,
`API response from ${event.model}. Status: ${event.status_code || 'N/A'}. Duration: ${event.duration_ms}ms.`,
attributes,
};
logger.emit(logRecord);
);
recordApiResponseMetrics(
config,
event.model,
@@ -294,7 +305,7 @@ export function logLoopDetected(
config: Config,
event: LoopDetectedEvent,
): void {
ClearcutLogger.getInstance(config)?.logLoopDetectedEvent(event);
// ClearcutLogger.getInstance(config)?.logLoopDetectedEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
@@ -302,10 +313,9 @@ export function logLoopDetected(
...event,
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `Loop detected. Type: ${event.loop_type}.`,
logWithSpan(
'loop.detected',
`Loop detected. Type: ${event.loop_type}.`,
attributes,
};
logger.emit(logRecord);
);
}

View File

@@ -6,29 +6,23 @@
import { DiagConsoleLogger, DiagLogLevel, diag } from '@opentelemetry/api';
import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-grpc';
import { OTLPLogExporter } from '@opentelemetry/exporter-logs-otlp-grpc';
import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-grpc';
import { CompressionAlgorithm } from '@opentelemetry/otlp-exporter-base';
import { Metadata } from '@grpc/grpc-js';
import { NodeSDK } from '@opentelemetry/sdk-node';
import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions';
import { Resource } from '@opentelemetry/resources';
import {
BatchSpanProcessor,
ConsoleSpanExporter,
} from '@opentelemetry/sdk-trace-node';
import {
BatchLogRecordProcessor,
ConsoleLogRecordExporter,
} from '@opentelemetry/sdk-logs';
import {
ConsoleMetricExporter,
PeriodicExportingMetricReader,
} from '@opentelemetry/sdk-metrics';
import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-node';
import { BatchLogRecordProcessor } from '@opentelemetry/sdk-logs';
import { PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics';
import type { ReadableSpan } from '@opentelemetry/sdk-trace-base';
import type { LogRecord } from '@opentelemetry/sdk-logs';
import type { ResourceMetrics } from '@opentelemetry/sdk-metrics';
import type { ExportResult } from '@opentelemetry/core';
import { HttpInstrumentation } from '@opentelemetry/instrumentation-http';
import { Config } from '../config/config.js';
import { SERVICE_NAME } from './constants.js';
import { initializeMetrics } from './metrics.js';
import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js';
// For troubleshooting, set the log level to DiagLogLevel.DEBUG
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.INFO);
@@ -75,28 +69,60 @@ export function initializeTelemetry(config: Config): void {
const grpcParsedEndpoint = parseGrpcEndpoint(otlpEndpoint);
const useOtlp = !!grpcParsedEndpoint;
const metadata = new Metadata();
metadata.set(
'Authentication',
'gb7x9m2kzp@8f4e3b6c9d2a1e5_qw7x9m2kzp@19a8c5f2b4e7d93',
);
const spanExporter = useOtlp
? new OTLPTraceExporter({
url: grpcParsedEndpoint,
compression: CompressionAlgorithm.GZIP,
metadata,
})
: new ConsoleSpanExporter();
const logExporter = useOtlp
? new OTLPLogExporter({
url: grpcParsedEndpoint,
compression: CompressionAlgorithm.GZIP,
})
: new ConsoleLogRecordExporter();
: {
export: (
spans: ReadableSpan[],
callback: (result: ExportResult) => void,
) => callback({ code: 0 }),
forceFlush: () => Promise.resolve(),
shutdown: () => Promise.resolve(),
};
// FIXME: Temporarily disable OTLP log export due to gRPC endpoint not supporting LogsService
// const logExporter = useOtlp
// ? new OTLPLogExporter({
// url: grpcParsedEndpoint,
// compression: CompressionAlgorithm.GZIP,
// metadata: _metadata,
// })
// : new ConsoleLogRecordExporter();
// Create a no-op log exporter to avoid cluttering console output
const logExporter = {
export: (logs: LogRecord[], callback: (result: ExportResult) => void) =>
callback({ code: 0 }),
shutdown: () => Promise.resolve(),
};
const metricReader = useOtlp
? new PeriodicExportingMetricReader({
exporter: new OTLPMetricExporter({
url: grpcParsedEndpoint,
compression: CompressionAlgorithm.GZIP,
metadata,
}),
exportIntervalMillis: 10000,
})
: new PeriodicExportingMetricReader({
exporter: new ConsoleMetricExporter(),
exporter: {
export: (
metrics: ResourceMetrics,
callback: (result: ExportResult) => void,
) => callback({ code: 0 }),
forceFlush: () => Promise.resolve(),
shutdown: () => Promise.resolve(),
},
exportIntervalMillis: 10000,
});
@@ -126,7 +152,7 @@ export async function shutdownTelemetry(): Promise<void> {
return;
}
try {
ClearcutLogger.getInstance()?.shutdown();
// ClearcutLogger.getInstance()?.shutdown();
await sdk.shutdown();
console.log('OpenTelemetry SDK shut down successfully.');
} catch (error) {

View File

@@ -115,7 +115,7 @@ describe('getFolderStructure', () => {
it('should return basic folder structure', async () => {
const structure = await getFolderStructure('/testroot/subfolderA');
const expected = `
Showing up to 200 items (files + folders).
Showing up to 20 items (files + folders).
/testroot/subfolderA/
├───fileA1.ts
@@ -129,7 +129,7 @@ Showing up to 200 items (files + folders).
it('should handle an empty folder', async () => {
const structure = await getFolderStructure('/testroot/emptyFolder');
const expected = `
Showing up to 200 items (files + folders).
Showing up to 20 items (files + folders).
/testroot/emptyFolder/
`.trim();
@@ -139,7 +139,7 @@ Showing up to 200 items (files + folders).
it('should ignore folders specified in ignoredFolders (default)', async () => {
const structure = await getFolderStructure('/testroot');
const expected = `
Showing up to 200 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (200 items) was reached.
Showing up to 20 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (20 items) was reached.
/testroot/
├───.hiddenfile
@@ -160,7 +160,7 @@ Showing up to 200 items (files + folders). Folders or files indicated with ... c
ignoredFolders: new Set(['subfolderA', 'node_modules']),
});
const expected = `
Showing up to 200 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (200 items) was reached.
Showing up to 20 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (20 items) was reached.
/testroot/
├───.hiddenfile
@@ -177,7 +177,7 @@ Showing up to 200 items (files + folders). Folders or files indicated with ... c
fileIncludePattern: /\.ts$/,
});
const expected = `
Showing up to 200 items (files + folders).
Showing up to 20 items (files + folders).
/testroot/subfolderA/
├───fileA1.ts

View File

@@ -10,7 +10,7 @@ import * as path from 'path';
import { getErrorMessage, isNodeError } from './errors.js';
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
const MAX_ITEMS = 200;
const MAX_ITEMS = 20;
const TRUNCATION_INDICATOR = '...';
const DEFAULT_IGNORED_FOLDERS = new Set(['node_modules', '.git', 'dist']);
@@ -18,7 +18,7 @@ const DEFAULT_IGNORED_FOLDERS = new Set(['node_modules', '.git', 'dist']);
/** Options for customizing folder structure retrieval. */
interface FolderStructureOptions {
/** Maximum number of files and folders combined to display. Defaults to 200. */
/** Maximum number of files and folders combined to display. Defaults to 20. */
maxItems?: number;
/** Set of folder names to ignore completely. Case-sensitive. */
ignoredFolders?: Set<string>;

View File

@@ -1,12 +1,12 @@
{
"name": "qwen-code-vscode",
"version": "0.0.1-alpha.8",
"version": "0.0.3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "qwen-code-vscode",
"version": "0.0.1-alpha.8",
"version": "0.0.3",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",
"cors": "^2.8.5",

View File

@@ -2,7 +2,7 @@
"name": "@qwen-code/qwen-code-vscode-ide-companion",
"displayName": "Qwen Code VSCode IDE Companion",
"description": "",
"version": "0.0.1-alpha.8",
"version": "0.0.2",
"engines": {
"vscode": "^1.101.0"
},

View File

@@ -77,23 +77,23 @@ if (!argv.s) {
execSync('npm run build --workspaces', { stdio: 'inherit' });
}
console.log('packing @google/gemini-cli ...');
console.log('packing @qwen-code/qwen-code ...');
const cliPackageDir = join('packages', 'cli');
rmSync(join(cliPackageDir, 'dist', 'google-gemini-cli-*.tgz'), { force: true });
rmSync(join(cliPackageDir, 'dist', 'qwen-code-*.tgz'), { force: true });
execSync(
`npm pack -w @google/gemini-cli --pack-destination ./packages/cli/dist`,
`npm pack -w @qwen-code/qwen-code --pack-destination ./packages/cli/dist`,
{
stdio: 'ignore',
},
);
console.log('packing @google/gemini-cli-core ...');
console.log('packing @qwen-code/qwen-code-core ...');
const corePackageDir = join('packages', 'core');
rmSync(join(corePackageDir, 'dist', 'google-gemini-cli-core-*.tgz'), {
rmSync(join(corePackageDir, 'dist', 'qwen-code-core-*.tgz'), {
force: true,
});
execSync(
`npm pack -w @google/gemini-cli-core --pack-destination ./packages/core/dist`,
`npm pack -w @qwen-code/qwen-code-core --pack-destination ./packages/core/dist`,
{ stdio: 'ignore' },
);
@@ -102,11 +102,15 @@ const packageVersion = JSON.parse(
).version;
chmodSync(
join(cliPackageDir, 'dist', `google-gemini-cli-${packageVersion}.tgz`),
join(cliPackageDir, 'dist', `qwen-code-qwen-code-${packageVersion}.tgz`),
0o755,
);
chmodSync(
join(corePackageDir, 'dist', `google-gemini-cli-core-${packageVersion}.tgz`),
join(
corePackageDir,
'dist',
`qwen-code-qwen-code-core-${packageVersion}.tgz`,
),
0o755,
);
@@ -134,14 +138,21 @@ function buildImage(imageName, dockerfile) {
{ stdio: buildStdout, shell: '/bin/bash' },
);
console.log(`built ${finalImageName}`);
if (existsSync('/workspace/final_image_uri.txt')) {
// The publish step only supports one image. If we build multiple, only the last one
// will be published. Throw an error to make this failure explicit.
throw new Error(
'CI artifact file /workspace/final_image_uri.txt already exists. Refusing to overwrite.',
// If an output file path was provided via command-line, write the final image URI to it.
if (argv.outputFile) {
console.log(
`Writing final image URI for CI artifact to: ${argv.outputFile}`,
);
// The publish step only supports one image. If we build multiple, only the last one
// will be published. Throw an error to make this failure explicit if the file already exists.
if (existsSync(argv.outputFile)) {
throw new Error(
`CI artifact file ${argv.outputFile} already exists. Refusing to overwrite.`,
);
}
writeFileSync(argv.outputFile, finalImageName);
}
writeFileSync('/workspace/final_image_uri.txt', finalImageName);
}
if (baseImage && baseDockerfile) {

View File

@@ -14,20 +14,44 @@ function getPackageVersion() {
return packageJson.version;
}
function getShortSha() {
return execSync('git rev-parse --short HEAD').toString().trim();
function incrementPatchVersion(version) {
const parts = version.split('.');
const major = parseInt(parts[0]);
const minor = parseInt(parts[1]);
const patch = parseInt(parts[2].split('-')[0]); // Handle pre-release versions
return `${major}.${minor}.${patch + 1}`;
}
function getLatestNightlyCount() {
try {
// Try to get the latest nightly tag from git to determine the counter
const currentVersion = getPackageVersion();
const nextVersion = incrementPatchVersion(currentVersion);
const tags = execSync(`git tag -l "v${nextVersion}-nightly.*"`)
.toString()
.trim();
if (!tags) return 0;
const nightlyTags = tags.split('\n').filter(Boolean);
const counts = nightlyTags.map((tag) => {
const match = tag.match(/nightly\.(\d+)$/);
return match ? parseInt(match[1]) : 0;
});
return Math.max(...counts, -1) + 1;
} catch (_error) {
// If we can't get tags, start from 0
return 0;
}
}
export function getNightlyTagName() {
const version = getPackageVersion();
const now = new Date();
const year = now.getUTCFullYear().toString().slice(-2);
const month = (now.getUTCMonth() + 1).toString().padStart(2, '0');
const day = now.getUTCDate().toString().padStart(2, '0');
const date = `${year}${month}${day}`;
const nextVersion = incrementPatchVersion(version);
const nightlyCount = getLatestNightlyCount();
const sha = getShortSha();
return `v${version}-nightly.${date}.${sha}`;
return `v${nextVersion}-nightly.${nightlyCount}`;
}
export function getReleaseVersion() {
@@ -72,7 +96,13 @@ export function getReleaseVersion() {
const releaseVersion = releaseTag.substring(1);
let npmTag = 'latest';
if (releaseVersion.includes('-')) {
npmTag = releaseVersion.split('-')[1].split('.')[0];
const prereleasePart = releaseVersion.split('-')[1];
npmTag = prereleasePart.split('.')[0];
// Ensure nightly releases use 'nightly' tag, not 'latest'
if (npmTag === 'nightly') {
npmTag = 'nightly';
}
}
return { releaseTag, releaseVersion, npmTag };

View File

@@ -41,15 +41,14 @@ describe('getReleaseVersion', () => {
it('should calculate nightly version when IS_NIGHTLY is true', () => {
process.env.IS_NIGHTLY = 'true';
const knownDate = new Date('2025-07-20T10:00:00.000Z');
vi.setSystemTime(knownDate);
vi.mocked(fs.default.readFileSync).mockReturnValue(
JSON.stringify({ version: '0.1.0' }),
);
vi.mocked(execSync).mockReturnValue('abcdef');
// Mock git tag command to return empty (no existing nightly tags)
vi.mocked(execSync).mockReturnValue('');
const { releaseTag, releaseVersion, npmTag } = getReleaseVersion();
expect(releaseTag).toBe('v0.1.0-nightly.250720.abcdef');
expect(releaseVersion).toBe('0.1.0-nightly.250720.abcdef');
expect(releaseTag).toBe('v0.1.1-nightly.0');
expect(releaseVersion).toBe('0.1.1-nightly.0');
expect(npmTag).toBe('nightly');
});
@@ -99,8 +98,8 @@ describe('getReleaseVersion', () => {
describe('get-release-version script', () => {
it('should print version JSON to stdout when executed directly', () => {
const expectedJson = {
releaseTag: 'v0.1.0-nightly.20250705',
releaseVersion: '0.1.0-nightly.20250705',
releaseTag: 'v0.1.1-nightly.0',
releaseVersion: '0.1.1-nightly.0',
npmTag: 'nightly',
};
execSync.mockReturnValue(JSON.stringify(expectedJson));

View File

@@ -23,18 +23,24 @@ function writeJson(filePath, data) {
writeFileSync(filePath, JSON.stringify(data, null, 2) + '\n');
}
// 1. Get the version type from the command line arguments.
const versionType = process.argv[2];
if (!versionType) {
console.error('Error: No version type specified.');
console.error('Usage: npm run version <patch|minor|major|prerelease>');
// 1. Get the version from the command line arguments.
const versionArg = process.argv[2];
if (!versionArg) {
console.error('Error: No version specified.');
console.error(
'Usage: npm run version <version> (e.g., 1.2.3 or patch|minor|major|prerelease)',
);
process.exit(1);
}
// 2. Bump the version in the root and all workspace package.json files.
run(`npm version ${versionType} --no-git-tag-version --allow-same-version`);
// 2. Determine if we have a specific version or a version type
const isSpecificVersion = /^\d+\.\d+\.\d+/.test(versionArg);
const npmVersionArg = isSpecificVersion ? versionArg : versionArg;
// 3. Bump the version in the root and all workspace package.json files.
run(`npm version ${npmVersionArg} --no-git-tag-version --allow-same-version`);
run(
`npm version ${versionType} --workspaces --no-git-tag-version --allow-same-version`,
`npm version ${npmVersionArg} --workspaces --no-git-tag-version --allow-same-version`,
);
// 3. Get the new version number from the root package.json