Compare commits

..

2 Commits

Author SHA1 Message Date
mingholy.lmh
b9a3a60418 fix: lint issues 2025-12-18 18:30:55 +08:00
mingholy.lmh
8928fc1534 feat: add modelProviders in settings to support custom model switching 2025-12-18 18:30:09 +08:00
54 changed files with 3083 additions and 928 deletions

View File

@@ -18,6 +18,8 @@ jobs:
- 'sandbox:docker'
node-version:
- '20.x'
- '22.x'
- '24.x'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
@@ -65,13 +67,10 @@ jobs:
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
OPENAI_MODEL: '${{ secrets.OPENAI_MODEL }}'
KEEP_OUTPUT: 'true'
SANDBOX: '${{ matrix.sandbox }}'
VERBOSE: 'true'
run: |-
if [[ "${{ matrix.sandbox }}" == "sandbox:docker" ]]; then
npm run test:integration:sandbox:docker
else
npm run test:integration:sandbox:none
fi
npm run "test:integration:${SANDBOX}"
e2e-test-macos:
name: 'E2E Test - macOS'

View File

@@ -121,11 +121,6 @@ jobs:
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
MANUAL_VERSION: '${{ inputs.version }}'
- name: 'Build CLI Bundle'
run: |
npm run build
npm run bundle
- name: 'Run Tests'
if: |-
${{ github.event.inputs.force_skip_tests != 'true' }}
@@ -137,6 +132,13 @@ jobs:
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
OPENAI_MODEL: '${{ secrets.OPENAI_MODEL }}'
- name: 'Build CLI for Integration Tests'
if: |-
${{ github.event.inputs.force_skip_tests != 'true' }}
run: |
npm run build
npm run bundle
- name: 'Run SDK Integration Tests'
if: |-
${{ github.event.inputs.force_skip_tests != 'true' }}

View File

@@ -133,8 +133,8 @@ jobs:
${{ github.event.inputs.force_skip_tests != 'true' }}
run: |
npm run preflight
npm run test:integration:cli:sandbox:none
npm run test:integration:cli:sandbox:docker
npm run test:integration:sandbox:none
npm run test:integration:sandbox:docker
env:
OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}'
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'

View File

@@ -2,6 +2,27 @@
We would love to accept your patches and contributions to this project.
## Before you begin
### Sign our Contributor License Agreement
Contributions to this project must be accompanied by a
[Contributor License Agreement](https://cla.developers.google.com/about) (CLA).
You (or your employer) retain the copyright to your contribution; this simply
gives us permission to use and redistribute your contributions as part of the
project.
If you or your current employer have already signed the Google CLA (even if it
was for a different project), you probably don't need to do it again.
Visit <https://cla.developers.google.com/> to see your current agreements or to
sign a new one.
### Review our Community Guidelines
This project follows [Google's Open Source Community
Guidelines](https://opensource.google/conduct/).
## Contribution Process
### Code Reviews
@@ -53,6 +74,12 @@ Your PR should have a clear, descriptive title and a detailed description of the
In the PR description, explain the "why" behind your changes and link to the relevant issue (e.g., `Fixes #123`).
## Forking
If you are forking the repository you will be able to run the Build, Test and Integration test workflows. However in order to make the integration tests run you'll need to add a [GitHub Repository Secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository) with a value of `GEMINI_API_KEY` and set that to a valid API key that you have available. Your key and secret are private to your repo; no one without access can see your key and you cannot see any secrets related to this repo.
Additionally you will need to click on the `Actions` tab and enable workflows for your repository, you'll find it's the large blue button in the center of the screen.
## Development Setup and Workflow
This section guides contributors on how to build, modify, and understand the development setup of this project.
@@ -71,8 +98,8 @@ This section guides contributors on how to build, modify, and understand the dev
To clone the repository:
```bash
git clone https://github.com/QwenLM/qwen-code.git # Or your fork's URL
cd qwen-code
git clone https://github.com/google-gemini/gemini-cli.git # Or your fork's URL
cd gemini-cli
```
To install dependencies defined in `package.json` as well as root dependencies:
@@ -91,9 +118,9 @@ This command typically compiles TypeScript to JavaScript, bundles assets, and pr
### Enabling Sandboxing
[Sandboxing](#sandboxing) is highly recommended and requires, at a minimum, setting `QWEN_SANDBOX=true` in your `~/.env` and ensuring a sandboxing provider (e.g. `macOS Seatbelt`, `docker`, or `podman`) is available. See [Sandboxing](#sandboxing) for details.
[Sandboxing](#sandboxing) is highly recommended and requires, at a minimum, setting `GEMINI_SANDBOX=true` in your `~/.env` and ensuring a sandboxing provider (e.g. `macOS Seatbelt`, `docker`, or `podman`) is available. See [Sandboxing](#sandboxing) for details.
To build both the `qwen-code` CLI utility and the sandbox container, run `build:all` from the root directory:
To build both the `gemini` CLI utility and the sandbox container, run `build:all` from the root directory:
```bash
npm run build:all
@@ -103,13 +130,13 @@ To skip building the sandbox container, you can use `npm run build` instead.
### Running
To start the Qwen Code application from the source code (after building), run the following command from the root directory:
To start the Gemini CLI from the source code (after building), run the following command from the root directory:
```bash
npm start
```
If you'd like to run the source build outside of the qwen-code folder, you can utilize `npm link path/to/qwen-code/packages/cli` (see: [docs](https://docs.npmjs.com/cli/v9/commands/npm-link)) to run with `qwen-code`
If you'd like to run the source build outside of the gemini-cli folder, you can utilize `npm link path/to/gemini-cli/packages/cli` (see: [docs](https://docs.npmjs.com/cli/v9/commands/npm-link)) or `alias gemini="node path/to/gemini-cli/packages/cli"` to run with `gemini`
### Running Tests
@@ -127,7 +154,7 @@ This will run tests located in the `packages/core` and `packages/cli` directorie
#### Integration Tests
The integration tests are designed to validate the end-to-end functionality of Qwen Code. They are not run as part of the default `npm run test` command.
The integration tests are designed to validate the end-to-end functionality of the Gemini CLI. They are not run as part of the default `npm run test` command.
To run the integration tests, use the following command:
@@ -182,61 +209,19 @@ npm run lint
### Coding Conventions
- Please adhere to the coding style, patterns, and conventions used throughout the existing codebase.
- Consult [QWEN.md](https://github.com/QwenLM/qwen-code/blob/main/QWEN.md) (typically found in the project root) for specific instructions related to AI-assisted development, including conventions for React, comments, and Git usage.
- **Imports:** Pay special attention to import paths. The project uses ESLint to enforce restrictions on relative imports between packages.
### Project Structure
- `packages/`: Contains the individual sub-packages of the project.
- `cli/`: The command-line interface.
- `core/`: The core backend logic for Qwen Code.
- `core/`: The core backend logic for the Gemini CLI.
- `docs/`: Contains all project documentation.
- `scripts/`: Utility scripts for building, testing, and development tasks.
For more detailed architecture, see `docs/architecture.md`.
## Documentation Development
This section describes how to develop and preview the documentation locally.
### Prerequisites
1. Ensure you have Node.js (version 18+) installed
2. Have npm or yarn available
### Setup Documentation Site Locally
To work on the documentation and preview changes locally:
1. Navigate to the `docs-site` directory:
```bash
cd docs-site
```
2. Install dependencies:
```bash
npm install
```
3. Link the documentation content from the main `docs` directory:
```bash
npm run link
```
This creates a symbolic link from `../docs` to `content` in the docs-site project, allowing the documentation content to be served by the Next.js site.
4. Start the development server:
```bash
npm run dev
```
5. Open [http://localhost:3000](http://localhost:3000) in your browser to see the documentation site with live updates as you make changes.
Any changes made to the documentation files in the main `docs` directory will be reflected immediately in the documentation site.
## Debugging
### VS Code:
@@ -246,7 +231,7 @@ Any changes made to the documentation files in the main `docs` directory will be
```bash
npm run debug
```
This command runs `node --inspect-brk dist/index.js` within the `packages/cli` directory, pausing execution until a debugger attaches. You can then open `chrome://inspect` in your Chrome browser to connect to the debugger.
This command runs `node --inspect-brk dist/gemini.js` within the `packages/cli` directory, pausing execution until a debugger attaches. You can then open `chrome://inspect` in your Chrome browser to connect to the debugger.
2. In VS Code, use the "Attach" launch configuration (found in `.vscode/launch.json`).
Alternatively, you can use the "Launch Program" configuration in VS Code if you prefer to launch the currently open file directly, but 'F5' is generally recommended.
@@ -254,16 +239,16 @@ Alternatively, you can use the "Launch Program" configuration in VS Code if you
To hit a breakpoint inside the sandbox container run:
```bash
DEBUG=1 qwen-code
DEBUG=1 gemini
```
**Note:** If you have `DEBUG=true` in a project's `.env` file, it won't affect qwen-code due to automatic exclusion. Use `.qwen-code/.env` files for qwen-code specific debug settings.
**Note:** If you have `DEBUG=true` in a project's `.env` file, it won't affect gemini-cli due to automatic exclusion. Use `.gemini/.env` files for gemini-cli specific debug settings.
### React DevTools
To debug the CLI's React-based UI, you can use React DevTools. Ink, the library used for the CLI's interface, is compatible with React DevTools version 4.x.
1. **Start the Qwen Code application in development mode:**
1. **Start the Gemini CLI in development mode:**
```bash
DEV=true npm start
@@ -285,10 +270,23 @@ To debug the CLI's React-based UI, you can use React DevTools. Ink, the library
```
Your running CLI application should then connect to React DevTools.
![](/docs/assets/connected_devtools.png)
## Sandboxing
> TBD
### macOS Seatbelt
On macOS, `qwen` uses Seatbelt (`sandbox-exec`) under a `permissive-open` profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that restricts writes to the project folder but otherwise allows all other operations and outbound network traffic ("open") by default. You can switch to a `restrictive-closed` profile (see `packages/cli/src/utils/sandbox-macos-restrictive-closed.sb`) that declines all operations and outbound network traffic ("closed") by default by setting `SEATBELT_PROFILE=restrictive-closed` in your environment or `.env` file. Available built-in profiles are `{permissive,restrictive}-{open,closed,proxied}` (see below for proxied networking). You can also switch to a custom profile `SEATBELT_PROFILE=<profile>` if you also create a file `.qwen/sandbox-macos-<profile>.sb` under your project settings directory `.qwen`.
### Container-based Sandboxing (All Platforms)
For stronger container-based sandboxing on macOS or other platforms, you can set `GEMINI_SANDBOX=true|docker|podman|<command>` in your environment or `.env` file. The specified command (or if `true` then either `docker` or `podman`) must be installed on the host machine. Once enabled, `npm run build:all` will build a minimal container ("sandbox") image and `npm start` will launch inside a fresh instance of that container. The first build can take 20-30s (mostly due to downloading of the base image) but after that both build and start overhead should be minimal. Default builds (`npm run build`) will not rebuild the sandbox.
Container-based sandboxing mounts the project directory (and system temp directory) with read-write access and is started/stopped/removed automatically as you start/stop Gemini CLI. Files created within the sandbox should be automatically mapped to your user/group on host machine. You can easily specify additional mounts, ports, or environment variables by setting `SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox for your projects by creating the files `.qwen/sandbox.Dockerfile` and/or `.qwen/sandbox.bashrc` under your project settings directory (`.qwen`) and running `qwen` with `BUILD_SANDBOX=1` to trigger building of your custom sandbox.
#### Proxied Networking
All sandboxing methods, including macOS Seatbelt using `*-proxied` profiles, support restricting outbound network traffic through a custom proxy server that can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=<command>`, where `<command>` must start a proxy server that listens on `:::8877` for relevant requests. See `docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS` connections to `example.com:443` (e.g. `curl https://example.com`) and declines all other requests. The proxy is started and stopped automatically alongside the sandbox.
## Manual Publish

View File

@@ -1,9 +1,9 @@
# Makefile for qwen-code
# Makefile for gemini-cli
.PHONY: help install build build-sandbox build-all test lint format preflight clean start debug release run-npx create-alias
help:
@echo "Makefile for qwen-code"
@echo "Makefile for gemini-cli"
@echo ""
@echo "Usage:"
@echo " make install - Install npm dependencies"
@@ -14,11 +14,11 @@ help:
@echo " make format - Format the code"
@echo " make preflight - Run formatting, linting, and tests"
@echo " make clean - Remove generated files"
@echo " make start - Start the Qwen Code CLI"
@echo " make debug - Start the Qwen Code CLI in debug mode"
@echo " make start - Start the Gemini CLI"
@echo " make debug - Start the Gemini CLI in debug mode"
@echo ""
@echo " make run-npx - Run the CLI using npx (for testing the published package)"
@echo " make create-alias - Create a 'qwen' alias for your shell"
@echo " make create-alias - Create a 'gemini' alias for your shell"
install:
npm install

410
README.md
View File

@@ -1,152 +1,382 @@
# Qwen Code
<div align="center">
![Qwen Code Screenshot](./docs/assets/qwen-screenshot.png)
[![npm version](https://img.shields.io/npm/v/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
[![License](https://img.shields.io/github/license/QwenLM/qwen-code.svg)](./LICENSE)
[![Node.js Version](https://img.shields.io/badge/node-%3E%3D20.0.0-brightgreen.svg)](https://nodejs.org/)
[![Downloads](https://img.shields.io/npm/dm/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
**An open-source AI agent that lives in your terminal.**
**AI-powered command-line workflow tool for developers**
<a href="https://qwenlm.github.io/qwen-code-docs/zh/users/overview">中文</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/de/users/overview">Deutsch</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/fr/users/overview">français</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/ja/users/overview">日本語</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/ru/users/overview">Русский</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/pt-BR/users/overview">Português (Brasil)</a>
[Installation](#installation) • [Quick Start](#quick-start) • [Features](#key-features) • [Documentation](./docs/) • [Contributing](./CONTRIBUTING.md)
</div>
Qwen Code is an open-source AI agent for the terminal, optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder). It helps you understand large codebases, automate tedious work, and ship faster.
<div align="center">
<a href="https://qwenlm.github.io/qwen-code-docs/de/">Deutsch</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/fr">français</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/ja/">日本語</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/ru">Русский</a> |
<a href="https://qwenlm.github.io/qwen-code-docs/zh/">中文</a>
</div>
![](https://gw.alicdn.com/imgextra/i1/O1CN01D2DviS1wwtEtMwIzJ_!!6000000006373-2-tps-1600-900.png)
Qwen Code is a powerful command-line AI workflow tool adapted from [**Gemini CLI**](https://github.com/google-gemini/gemini-cli), specifically optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder) models. It enhances your development workflow with advanced code understanding, automated tasks, and intelligent assistance.
## Why Qwen Code?
## 💡 Free Options Available
- **OpenAI-compatible, OAuth free tier**: use an OpenAI-compatible API, or sign in with Qwen OAuth to get 2,000 free requests/day.
- **Open-source, co-evolving**: both the framework and the Qwen3-Coder model are open-source—and they ship and evolve together.
- **Agentic workflow, feature-rich**: rich built-in tools (Skills, SubAgents, Plan Mode) for a full agentic workflow and a Claude Code-like experience.
- **Terminal-first, IDE-friendly**: built for developers who live in the command line, with optional integration for VS Code and Zed.
Get started with Qwen Code at no cost using any of these free options:
### 🔥 Qwen OAuth (Recommended)
- **2,000 requests per day** with no token limits
- **60 requests per minute** rate limit
- Simply run `qwen` and authenticate with your qwen.ai account
- Automatic credential management and refresh
- Use `/auth` command to switch to Qwen OAuth if you have initialized with OpenAI compatible mode
### 🌏 Regional Free Tiers
- **Mainland China**: ModelScope offers **2,000 free API calls per day**
- **International**: OpenRouter provides **up to 1,000 free API calls per day** worldwide
For detailed setup instructions, see [Authorization](#authorization).
> [!WARNING]
> **Token Usage Notice**: Qwen Code may issue multiple API calls per cycle, resulting in higher token usage (similar to Claude Code). We're actively optimizing API efficiency.
## Key Features
- **Code Understanding & Editing** - Query and edit large codebases beyond traditional context window limits
- **Workflow Automation** - Automate operational tasks like handling pull requests and complex rebases
- **Enhanced Parser** - Adapted parser specifically optimized for Qwen-Coder models
- **Vision Model Support** - Automatically detect images in your input and seamlessly switch to vision-capable models for multimodal analysis
## Installation
#### Prerequisites
### Prerequisites
Ensure you have [Node.js version 20](https://nodejs.org/en/download) or higher installed.
```bash
# Node.js 20+
curl -qL https://www.npmjs.com/install.sh | sh
```
#### NPM (recommended)
### Install from npm
```bash
npm install -g @qwen-code/qwen-code@latest
qwen --version
```
#### Homebrew (macOS, Linux)
### Install from source
```bash
git clone https://github.com/QwenLM/qwen-code.git
cd qwen-code
npm install
npm install -g .
```
### Install globally with Homebrew (macOS/Linux)
```bash
brew install qwen-code
```
## VS Code Extension
In addition to the CLI tool, Qwen Code also provides a **VS Code extension** that brings AI-powered coding assistance directly into your editor with features like file system operations, native diffing, interactive chat, and more.
> 📦 The extension is currently in development. For installation, features, and development guide, see the [VS Code Extension README](./packages/vscode-ide-companion/README.md).
## Quick Start
```bash
# Start Qwen Code (interactive)
# Start Qwen Code
qwen
# Then, in the session:
/help
/auth
# Example commands
> Explain this codebase structure
> Help me refactor this function
> Generate unit tests for this module
```
On first use, you'll be prompted to sign in. You can run `/auth` anytime to switch authentication methods.
### Session Management
Example prompts:
Control your token usage with configurable session limits to optimize costs and performance.
```text
What does this project do?
Explain the codebase structure.
Help me refactor this function.
Generate unit tests for this module.
#### Configure Session Token Limit
Create or edit `.qwen/settings.json` in your home directory:
```json
{
"sessionTokenLimit": 32000
}
```
#### Session Commands
- **`/compress`** - Compress conversation history to continue within token limits
- **`/clear`** - Clear all conversation history and start fresh
- **`/stats`** - Check current token usage and limits
> 📝 **Note**: Session token limit applies to a single conversation, not cumulative API calls.
### Vision Model Configuration
Qwen Code includes intelligent vision model auto-switching that detects images in your input and can automatically switch to vision-capable models for multimodal analysis. **This feature is enabled by default** - when you include images in your queries, you'll see a dialog asking how you'd like to handle the vision model switch.
#### Skip the Switch Dialog (Optional)
If you don't want to see the interactive dialog each time, configure the default behavior in your `.qwen/settings.json`:
```json
{
"experimental": {
"vlmSwitchMode": "once"
}
}
```
**Available modes:**
- **`"once"`** - Switch to vision model for this query only, then revert
- **`"session"`** - Switch to vision model for the entire session
- **`"persist"`** - Continue with current model (no switching)
- **Not set** - Show interactive dialog each time (default)
#### Command Line Override
You can also set the behavior via command line:
```bash
# Switch once per query
qwen --vlm-switch-mode once
# Switch for entire session
qwen --vlm-switch-mode session
# Never switch automatically
qwen --vlm-switch-mode persist
```
#### Disable Vision Models (Optional)
To completely disable vision model support, add to your `.qwen/settings.json`:
```json
{
"experimental": {
"visionModelPreview": false
}
}
```
> 💡 **Tip**: In YOLO mode (`--yolo`), vision switching happens automatically without prompts when images are detected.
### Authorization
Choose your preferred authentication method based on your needs:
#### 1. Qwen OAuth (🚀 Recommended - Start in 30 seconds)
The easiest way to get started - completely free with generous quotas:
```bash
# Just run this command and follow the browser authentication
qwen
```
**What happens:**
1. **Instant Setup**: CLI opens your browser automatically
2. **One-Click Login**: Authenticate with your qwen.ai account
3. **Automatic Management**: Credentials cached locally for future use
4. **No Configuration**: Zero setup required - just start coding!
**Free Tier Benefits:**
-**2,000 requests/day** (no token counting needed)
-**60 requests/minute** rate limit
-**Automatic credential refresh**
-**Zero cost** for individual users
- **Note**: Model fallback may occur to maintain service quality
#### 2. OpenAI-Compatible API
Use API keys for OpenAI or other compatible providers:
**Configuration Methods:**
1. **Environment Variables**
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="your_api_endpoint"
export OPENAI_MODEL="your_model_choice"
```
2. **Project `.env` File**
Create a `.env` file in your project root:
```env
OPENAI_API_KEY=your_api_key_here
OPENAI_BASE_URL=your_api_endpoint
OPENAI_MODEL=your_model_choice
```
**API Provider Options**
> ⚠️ **Regional Notice:**
>
> - **Mainland China**: Use Alibaba Cloud Bailian or ModelScope
> - **International**: Use Alibaba Cloud ModelStudio or OpenRouter
<details>
<summary>Click to watch a demo video</summary>
<summary><b>🇨🇳 For Users in Mainland China</b></summary>
<video src="https://cloud.video.taobao.com/vod/HLfyppnCHplRV9Qhz2xSqeazHeRzYtG-EYJnHAqtzkQ.mp4" controls>
Your browser does not support the video tag.
</video>
**Option 1: Alibaba Cloud Bailian** ([Apply for API Key](https://bailian.console.aliyun.com/))
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
export OPENAI_MODEL="qwen3-coder-plus"
```
**Option 2: ModelScope (Free Tier)** ([Apply for API Key](https://modelscope.cn/docs/model-service/API-Inference/intro))
- ✅ **2,000 free API calls per day**
- ⚠️ Connect your Aliyun account to avoid authentication errors
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://api-inference.modelscope.cn/v1"
export OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
```
</details>
## Authentication
<details>
<summary><b>🌍 For International Users</b></summary>
Qwen Code supports two authentication methods:
- **Qwen OAuth (recommended & free)**: sign in with your `qwen.ai` account in a browser.
- **OpenAI-compatible API**: use `OPENAI_API_KEY` (and optionally a custom base URL / model).
#### Qwen OAuth (recommended)
Start `qwen`, then run:
**Option 1: Alibaba Cloud ModelStudio** ([Apply for API Key](https://modelstudio.console.alibabacloud.com/))
```bash
/auth
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
export OPENAI_MODEL="qwen3-coder-plus"
```
Choose **Qwen OAuth** and complete the browser flow. Your credentials are cached locally so you usually won't need to log in again.
#### OpenAI-compatible API (API key)
Environment variables (recommended for CI / headless environments):
**Option 2: OpenRouter (Free Tier Available)** ([Apply for API Key](https://openrouter.ai/))
```bash
export OPENAI_API_KEY="your-api-key-here"
export OPENAI_BASE_URL="https://api.openai.com/v1" # optional
export OPENAI_MODEL="gpt-4o" # optional
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://openrouter.ai/api/v1"
export OPENAI_MODEL="qwen/qwen3-coder:free"
```
For details (including `.qwen/.env` loading and security notes), see the [authentication guide](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/auth/).
</details>
## Usage
## Usage Examples
As an open-source terminal agent, you can use Qwen Code in four primary ways:
1. Interactive mode (terminal UI)
2. Headless mode (scripts, CI)
3. IDE integration (VS Code, Zed)
4. TypeScript SDK
#### Interactive mode
### 🔍 Explore Codebases
```bash
cd your-project/
qwen
# Architecture analysis
> Describe the main pieces of this system's architecture
> What are the key dependencies and how do they interact?
> Find all API endpoints and their authentication methods
```
Run `qwen` in your project folder to launch the interactive terminal UI. Use `@` to reference local files (for example `@src/main.ts`).
#### Headless mode
### 💻 Code Development
```bash
cd your-project/
qwen -p "your question"
# Refactoring
> Refactor this function to improve readability and performance
> Convert this class to use dependency injection
> Split this large module into smaller, focused components
# Code generation
> Create a REST API endpoint for user management
> Generate unit tests for the authentication module
> Add error handling to all database operations
```
Use `-p` to run Qwen Code without the interactive UI—ideal for scripts, automation, and CI/CD. Learn more: [Headless mode](https://qwenlm.github.io/qwen-code-docs/en/users/features/headless).
### 🔄 Automate Workflows
#### IDE integration
```bash
# Git automation
> Analyze git commits from the last 7 days, grouped by feature
> Create a changelog from recent commits
> Find all TODO comments and create GitHub issues
Use Qwen Code inside your editor (VS Code and Zed):
# File operations
> Convert all images in this directory to PNG format
> Rename all test files to follow the *.test.ts pattern
> Find and remove all console.log statements
```
- [Use in VS Code](https://qwenlm.github.io/qwen-code-docs/en/users/integration-vscode/)
- [Use in Zed](https://qwenlm.github.io/qwen-code-docs/en/users/integration-zed/)
### 🐛 Debugging & Analysis
#### TypeScript SDK
```bash
# Performance analysis
> Identify performance bottlenecks in this React component
> Find all N+1 query problems in the codebase
Build on top of Qwen Code with the TypeScript SDK:
# Security audit
> Check for potential SQL injection vulnerabilities
> Find all hardcoded credentials or API keys
```
- [Use the Qwen Code SDK](./packages/sdk-typescript/README.md)
## Popular Tasks
### 📚 Understand New Codebases
```text
> What are the core business logic components?
> What security mechanisms are in place?
> How does the data flow through the system?
> What are the main design patterns used?
> Generate a dependency graph for this module
```
### 🔨 Code Refactoring & Optimization
```text
> What parts of this module can be optimized?
> Help me refactor this class to follow SOLID principles
> Add proper error handling and logging
> Convert callbacks to async/await pattern
> Implement caching for expensive operations
```
### 📝 Documentation & Testing
```text
> Generate comprehensive JSDoc comments for all public APIs
> Write unit tests with edge cases for this component
> Create API documentation in OpenAPI format
> Add inline comments explaining complex algorithms
> Generate a README for this module
```
### 🚀 Development Acceleration
```text
> Set up a new Express server with authentication
> Create a React component with TypeScript and tests
> Implement a rate limiter middleware
> Add database migrations for new schema
> Configure CI/CD pipeline for this project
```
## Commands & Shortcuts
@@ -156,7 +386,6 @@ Build on top of Qwen Code with the TypeScript SDK:
- `/clear` - Clear conversation history
- `/compress` - Compress history to save tokens
- `/stats` - Show current session information
- `/bug` - Submit a bug report
- `/exit` or `/quit` - Exit Qwen Code
### Keyboard Shortcuts
@@ -165,19 +394,6 @@ Build on top of Qwen Code with the TypeScript SDK:
- `Ctrl+D` - Exit (on empty line)
- `Up/Down` - Navigate command history
> Learn more about [Commands](https://qwenlm.github.io/qwen-code-docs/en/users/features/commands/)
>
> **Tip**: In YOLO mode (`--yolo`), vision switching happens automatically without prompts when images are detected. Learn more about [Approval Mode](https://qwenlm.github.io/qwen-code-docs/en/users/features/approval-mode/)
## Configuration
Qwen Code can be configured via `settings.json`, environment variables, and CLI flags.
- **User settings**: `~/.qwen/settings.json`
- **Project settings**: `.qwen/settings.json`
See [settings](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/settings/) for available options and precedence.
## Benchmark Results
### Terminal-Bench Performance
@@ -187,18 +403,24 @@ See [settings](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/se
| Qwen Code | Qwen3-Coder-480A35 | 37.5% |
| Qwen Code | Qwen3-Coder-30BA3B | 31.3% |
## Ecosystem
## Development & Contributing
Looking for a graphical interface?
See [CONTRIBUTING.md](./CONTRIBUTING.md) to learn how to contribute to the project.
- [**Gemini CLI Desktop**](https://github.com/Piebald-AI/gemini-cli-desktop) A cross-platform desktop/web/mobile UI for Qwen Code
For detailed authentication setup, see the [authentication guide](./docs/cli/authentication.md).
## Troubleshooting
If you encounter issues, check the [troubleshooting guide](https://qwenlm.github.io/qwen-code-docs/en/users/support/troubleshooting/).
To report a bug from within the CLI, run `/bug` and include a short title and repro steps.
If you encounter issues, check the [troubleshooting guide](docs/troubleshooting.md).
## Acknowledgments
This project is based on [Google Gemini CLI](https://github.com/google-gemini/gemini-cli). We acknowledge and appreciate the excellent work of the Gemini CLI team. Our main contribution focuses on parser-level adaptations to better support Qwen-Coder models.
## License
[LICENSE](./LICENSE)
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=QwenLM/qwen-code&type=Date)](https://www.star-history.com/#QwenLM/qwen-code&Date)

View File

@@ -135,6 +135,69 @@ Settings are organized into categories. All settings should be placed within the
- `"./custom-logs"` - Logs to `./custom-logs` relative to current directory
- `"/tmp/openai-logs"` - Logs to absolute path `/tmp/openai-logs`
#### `modelProviders`
The `modelProviders` configuration allows you to define multiple models for a specific authentication type. Currently we support only `openai` authentication type.
| Field | Type | Required | Description | Default |
| -------------------------------------- | ------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
| `id` | string | Yes | Unique identifier for the model within the authentication type. | - |
| `name` | string | No | Display name for the model. | Same as `id` |
| `description` | string | No | A brief description of the model. | `undefined` |
| `envKey` | string | No | The name of the environment variable containing the API key for this model. For example, if set to `"OPENAI_API_KEY"`, the system will read the API key from `process.env.OPENAI_API_KEY`. This keeps API keys secure in environment variables. | `undefined` |
| `baseUrl` | string | No | Custom API endpoint URL. If not specified, uses the default URL for the authentication type. | `undefined` |
| `capabilities.vision` | boolean | No | Whether the model supports vision/image inputs. | `false` |
| `generationConfig.temperature` | number | No | Sampling temperature. Refer to your providers' document. | `undefined` |
| `generationConfig.top_p` | number | No | Nucleus sampling parameter. Refer to your providers' document. | `undefined` |
| `generationConfig.top_k` | number | No | Top-k sampling parameter. Refer to your providers' document. | `undefined` |
| `generationConfig.max_tokens` | number | No | Maximum output tokens. | `undefined` |
| `generationConfig.timeout` | number | No | Request timeout in milliseconds. | `undefined` |
| `generationConfig.maxRetries` | number | No | Maximum retry attempts. | `undefined` |
| `generationConfig.disableCacheControl` | boolean | No | Disable cache control for DashScope providers. | `false` |
**Example Configuration:**
```json
{
"modelProviders": {
"openai": [
{
"id": "gpt-4-turbo",
"name": "GPT-4 Turbo",
"description": "Most capable GPT-4 model",
"envKey": "OPENAI_API_KEY",
"baseUrl": "https://api.openai.com/v1",
"capabilities": {
"vision": true
},
"generationConfig": {
"temperature": 0.7,
"max_tokens": 4096
}
},
{
"id": "deepseek-coder",
"name": "DeepSeek Coder",
"description": "DeepSeek coding model",
"envKey": "DEEPSEEK_API_KEY",
"baseUrl": "https://api.deepseek.com/v1",
"generationConfig": {
"temperature": 0.5,
"max_tokens": 8192
}
}
]
}
}
```
**Security Note:** API keys should never be stored directly in configuration files. Always use the `envKey` field to reference environment variables where your API keys are stored. Set these environment variables in your shell profile or `.env` files:
```bash
export OPENAI_API_KEY="your-api-key-here"
export DEEPSEEK_API_KEY="your-deepseek-key-here"
```
#### context
| Setting | Type | Description | Default |

View File

@@ -16,15 +16,16 @@ The plugin **MUST** run a local HTTP server that implements the **Model Context
- **Endpoint:** The server should expose a single endpoint (e.g., `/mcp`) for all MCP communication.
- **Port:** The server **MUST** listen on a dynamically assigned port (i.e., listen on port `0`).
### 2. Discovery Mechanism: The Lock File
### 2. Discovery Mechanism: The Port File
For Qwen Code to connect, it needs to discover what port your server is using. The plugin **MUST** facilitate this by creating a "lock file" and setting the port environment variable.
For Qwen Code to connect, it needs to discover which IDE instance it's running in and what port your server is using. The plugin **MUST** facilitate this by creating a "discovery file."
- **How the CLI Finds the File:** The CLI reads the port from `QWEN_CODE_IDE_SERVER_PORT`, then reads `~/.qwen/ide/<PORT>.lock`. (Legacy fallbacks exist for older extensions; see note below.)
- **File Location:** The file must be created in a specific directory: `~/.qwen/ide/`. Your plugin must create this directory if it doesn't exist.
- **How the CLI Finds the File:** The CLI determines the Process ID (PID) of the IDE it's running in by traversing the process tree. It then looks for a discovery file that contains this PID in its name.
- **File Location:** The file must be created in a specific directory: `os.tmpdir()/qwen/ide/`. Your plugin must create this directory if it doesn't exist.
- **File Naming Convention:** The filename is critical and **MUST** follow the pattern:
`<PORT>.lock`
- `<PORT>`: The port your MCP server is listening on.
`qwen-code-ide-server-${PID}-${PORT}.json`
- `${PID}`: The process ID of the parent IDE process. Your plugin must determine this PID and include it in the filename.
- `${PORT}`: The port your MCP server is listening on.
- **File Content & Workspace Validation:** The file **MUST** contain a JSON object with the following structure:
```json
@@ -32,20 +33,21 @@ For Qwen Code to connect, it needs to discover what port your server is using. T
"port": 12345,
"workspacePath": "/path/to/project1:/path/to/project2",
"authToken": "a-very-secret-token",
"ppid": 1234,
"ideName": "VS Code"
"ideInfo": {
"name": "vscode",
"displayName": "VS Code"
}
}
```
- `port` (number, required): The port of the MCP server.
- `workspacePath` (string, required): A list of all open workspace root paths, delimited by the OS-specific path separator (`:` for Linux/macOS, `;` for Windows). The CLI uses this path to ensure it's running in the same project folder that's open in the IDE. If the CLI's current working directory is not a sub-directory of `workspacePath`, the connection will be rejected. Your plugin **MUST** provide the correct, absolute path(s) to the root of the open workspace(s).
- `authToken` (string, required): A secret token for securing the connection. The CLI will include this token in an `Authorization: Bearer <token>` header on all requests.
- `ppid` (number, required): The parent process ID of the IDE process.
- `ideName` (string, required): A user-friendly name for the IDE (e.g., `VS Code`, `JetBrains IDE`).
- `ideInfo` (object, required): Information about the IDE.
- `name` (string, required): A short, lowercase identifier for the IDE (e.g., `vscode`, `jetbrains`).
- `displayName` (string, required): A user-friendly name for the IDE (e.g., `VS Code`, `JetBrains IDE`).
- **Authentication:** To secure the connection, the plugin **MUST** generate a unique, secret token and include it in the discovery file. The CLI will then include this token in the `Authorization` header for all requests to the MCP server (e.g., `Authorization: Bearer a-very-secret-token`). Your server **MUST** validate this token on every request and reject any that are unauthorized.
- **Environment Variables (Required):** Your plugin **MUST** set `QWEN_CODE_IDE_SERVER_PORT` in the integrated terminal so the CLI can locate the correct `<PORT>.lock` file.
**Legacy note:** For extensions older than v0.5.1, Qwen Code may fall back to reading JSON files in the system temp directory named `qwen-code-ide-server-<PID>.json` or `qwen-code-ide-server-<PORT>.json`. New integrations should not rely on these legacy files.
- **Tie-Breaking with Environment Variables (Recommended):** For the most reliable experience, your plugin **SHOULD** both create the discovery file and set the `QWEN_CODE_IDE_SERVER_PORT` environment variable in the integrated terminal. The file serves as the primary discovery mechanism, but the environment variable is crucial for tie-breaking. If a user has multiple IDE windows open for the same workspace, the CLI uses the `QWEN_CODE_IDE_SERVER_PORT` variable to identify and connect to the correct window's server.
## II. The Context Interface

87
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0",
"version": "0.5.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.6.0",
"version": "0.5.1",
"workspaces": [
"packages/*"
],
@@ -568,6 +568,7 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18"
},
@@ -591,6 +592,7 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18"
}
@@ -2155,6 +2157,7 @@
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
"integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
"license": "Apache-2.0",
"peer": true,
"engines": {
"node": ">=8.0.0"
}
@@ -3668,6 +3671,7 @@
"resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz",
"integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==",
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.10.4",
"@babel/runtime": "^7.12.5",
@@ -4138,6 +4142,7 @@
"integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"csstype": "^3.0.2"
}
@@ -4148,6 +4153,7 @@
"integrity": "sha512-4hOiT/dwO8Ko0gV1m/TJZYk3y0KBnY9vzDh7W+DH17b2HFSOGgdj33dhihPeuy3l0q23+4e+hoXHV6hCC4dCXw==",
"dev": true,
"license": "MIT",
"peer": true,
"peerDependencies": {
"@types/react": "^19.0.0"
}
@@ -4353,6 +4359,7 @@
"integrity": "sha512-6sMvZePQrnZH2/cJkwRpkT7DxoAWh+g6+GFRK6bV3YQo7ogi3SX5rgF6099r5Q53Ma5qeT7LGmOmuIutF4t3lA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@typescript-eslint/scope-manager": "8.35.0",
"@typescript-eslint/types": "8.35.0",
@@ -5128,6 +5135,7 @@
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -5522,8 +5530,7 @@
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/array-includes": {
"version": "3.1.9",
@@ -6858,7 +6865,6 @@
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"safe-buffer": "5.2.1"
},
@@ -7976,6 +7982,7 @@
"integrity": "sha512-GsGizj2Y1rCWDu6XoEekL3RLilp0voSePurjZIkxL3wlm5o5EC9VpgaP7lrCvjnkuLvzFBQWB3vWB3K5KQTveQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -8511,7 +8518,6 @@
"resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
"license": "MIT",
"peer": true,
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
@@ -8573,7 +8579,6 @@
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">= 0.6"
}
@@ -8583,7 +8588,6 @@
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"license": "MIT",
"peer": true,
"dependencies": {
"ms": "2.0.0"
}
@@ -8593,7 +8597,6 @@
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">= 0.8"
}
@@ -8760,7 +8763,6 @@
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz",
"integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
@@ -8779,7 +8781,6 @@
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"license": "MIT",
"peer": true,
"dependencies": {
"ms": "2.0.0"
}
@@ -8788,15 +8789,13 @@
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/finalhandler/node_modules/statuses": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">= 0.8"
}
@@ -9910,6 +9909,7 @@
"resolved": "https://registry.npmjs.org/ink/-/ink-6.2.3.tgz",
"integrity": "sha512-fQkfEJjKbLXIcVWEE3MvpYSnwtbbmRsmeNDNz1pIuOFlwE+UF2gsy228J36OXKZGWJWZJKUigphBSqCNMcARtg==",
"license": "MIT",
"peer": true,
"dependencies": {
"@alcalzone/ansi-tokenize": "^0.2.0",
"ansi-escapes": "^7.0.0",
@@ -11864,7 +11864,6 @@
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">= 0.6"
}
@@ -13163,8 +13162,7 @@
"version": "0.1.12",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/path-type": {
"version": "3.0.0",
@@ -13823,6 +13821,7 @@
"resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz",
"integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -13833,6 +13832,7 @@
"integrity": "sha512-cq/o30z9W2Wb4rzBefjv5fBalHU0rJGZCHAkf/RHSBWSSYwh8PlQTqqOJmgIIbBtpj27T6FIPXeomIjZtCNVqA==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"shell-quote": "^1.6.1",
"ws": "^7"
@@ -13866,6 +13866,7 @@
"integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"scheduler": "^0.26.0"
},
@@ -15931,6 +15932,7 @@
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -16110,7 +16112,8 @@
"version": "2.8.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
"integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
"license": "0BSD"
"license": "0BSD",
"peer": true
},
"node_modules/tsx": {
"version": "4.20.3",
@@ -16118,6 +16121,7 @@
"integrity": "sha512-qjbnuR9Tr+FJOMBqJCW5ehvIo/buZq7vH7qD7JziU98h6l3qGy0a/yPFjwO+y0/T7GFpNgNAvEcPPVfyT8rrPQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "~0.25.0",
"get-tsconfig": "^4.7.5"
@@ -16312,6 +16316,7 @@
"integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
"dev": true,
"license": "Apache-2.0",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -16386,7 +16391,6 @@
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/undici/-/undici-7.15.0.tgz",
"integrity": "sha512-7oZJCPvvMvTd0OlqWsIxTuItTpJBpU1tcbVl24FMn3xt3+VSunwUasmfPJRE57oNO1KsZ4PgA1xTdAX4hq8NyQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=20.18.1"
@@ -16619,7 +16623,6 @@
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">= 0.4.0"
}
@@ -16675,6 +16678,7 @@
"integrity": "sha512-ixXJB1YRgDIw2OszKQS9WxGHKwLdCsbQNkpJN171udl6szi/rIySHL6/Os3s2+oE4P/FLD4dxg4mD7Wust+u5g==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.25.0",
"fdir": "^6.4.6",
@@ -16788,6 +16792,7 @@
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -16801,6 +16806,7 @@
"integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/chai": "^5.2.2",
"@vitest/expect": "3.2.4",
@@ -17479,6 +17485,7 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
"integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
@@ -17494,7 +17501,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.6.0",
"version": "0.5.1",
"dependencies": {
"@google/genai": "1.16.0",
"@iarna/toml": "^2.2.5",
@@ -17525,7 +17532,7 @@
"strip-ansi": "^7.1.0",
"strip-json-comments": "^3.1.1",
"tar": "^7.5.2",
"undici": "^6.22.0",
"undici": "^7.10.0",
"update-notifier": "^7.3.1",
"wrap-ansi": "9.0.2",
"yargs": "^17.7.2",
@@ -17607,18 +17614,9 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"packages/cli/node_modules/undici": {
"version": "6.22.0",
"resolved": "https://registry.npmjs.org/undici/-/undici-6.22.0.tgz",
"integrity": "sha512-hU/10obOIu62MGYjdskASR3CUAiYaFTtC9Pa6vHyf//mAipSvSQg6od2CnJswq7fvzNS3zJhxoRkgNVaHurWKw==",
"license": "MIT",
"engines": {
"node": ">=18.17"
}
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.6.0",
"version": "0.5.1",
"hasInstallScript": true,
"dependencies": {
"@google/genai": "1.16.0",
@@ -17661,7 +17659,7 @@
"simple-git": "^3.28.0",
"strip-ansi": "^7.1.0",
"tiktoken": "^1.0.21",
"undici": "^6.22.0",
"undici": "^7.10.0",
"uuid": "^9.0.1",
"ws": "^8.18.0"
},
@@ -17749,6 +17747,7 @@
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -17756,22 +17755,12 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"packages/core/node_modules/undici": {
"version": "6.22.0",
"resolved": "https://registry.npmjs.org/undici/-/undici-6.22.0.tgz",
"integrity": "sha512-hU/10obOIu62MGYjdskASR3CUAiYaFTtC9Pa6vHyf//mAipSvSQg6od2CnJswq7fvzNS3zJhxoRkgNVaHurWKw==",
"license": "MIT",
"engines": {
"node": ">=18.17"
}
},
"packages/sdk-typescript": {
"name": "@qwen-code/sdk",
"version": "0.6.0",
"version": "0.5.1",
"license": "Apache-2.0",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4",
"tiktoken": "^1.0.21"
"@modelcontextprotocol/sdk": "^1.0.4"
},
"devDependencies": {
"@types/node": "^20.14.0",
@@ -20197,7 +20186,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.6.0",
"version": "0.5.1",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -20209,7 +20198,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.6.0",
"version": "0.5.1",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",
@@ -20229,7 +20218,7 @@
"@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6",
"@types/semver": "^7.7.1",
"@types/vscode": "^1.85.0",
"@types/vscode": "^1.99.0",
"@typescript-eslint/eslint-plugin": "^8.31.1",
"@typescript-eslint/parser": "^8.31.1",
"@vscode/vsce": "^3.6.0",
@@ -20244,7 +20233,7 @@
"vitest": "^3.2.4"
},
"engines": {
"vscode": "^1.85.0"
"vscode": "^1.99.0"
}
},
"packages/vscode-ide-companion/node_modules/@types/react": {

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0",
"version": "0.5.1",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.5.1"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0",
"version": "0.5.1",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -33,7 +33,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.5.1"
},
"dependencies": {
"@google/genai": "1.16.0",
@@ -64,7 +64,7 @@
"strip-ansi": "^7.1.0",
"strip-json-comments": "^3.1.1",
"tar": "^7.5.2",
"undici": "^6.22.0",
"undici": "^7.10.0",
"extract-zip": "^2.0.1",
"update-notifier": "^7.3.1",
"wrap-ansi": "9.0.2",

View File

@@ -206,18 +206,6 @@ describe('parseArguments', () => {
expect(argv.prompt).toBeUndefined();
});
it('should allow -r flag as alias for --resume', async () => {
process.argv = ['node', 'script.js', '-r', 'session-123'];
const argv = await parseArguments({} as Settings);
expect(argv.resume).toBe('session-123');
});
it('should allow -c flag as alias for --continue', async () => {
process.argv = ['node', 'script.js', '-c'];
const argv = await parseArguments({} as Settings);
expect(argv.continue).toBe(true);
});
it('should convert positional query argument to prompt by default', async () => {
process.argv = ['node', 'script.js', 'Hi Gemini'];
const argv = await parseArguments({} as Settings);

View File

@@ -29,6 +29,7 @@ import {
} from '@qwen-code/qwen-code-core';
import { extensionsCommand } from '../commands/extensions.js';
import type { Settings } from './settings.js';
import { getModelProvidersConfigFromSettings } from './settings.js';
import yargs, { type Argv } from 'yargs';
import { hideBin } from 'yargs/helpers';
import * as fs from 'node:fs';
@@ -299,6 +300,7 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
'Set the approval mode: plan (plan only), default (prompt for approval), auto-edit (auto-approve edit tools), yolo (auto-approve all tools)',
})
.option('checkpointing', {
alias: 'c',
type: 'boolean',
description: 'Enables checkpointing of file edits',
default: false,
@@ -421,14 +423,12 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
default: false,
})
.option('continue', {
alias: 'c',
type: 'boolean',
description:
'Resume the most recent session for the current project.',
default: false,
})
.option('resume', {
alias: 'r',
type: 'string',
description:
'Resume a specific session by its ID. Use without an ID to show session picker.',
@@ -865,11 +865,16 @@ export async function loadCliConfig(
);
}
const resolvedModel =
argv.model ||
process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name;
let resolvedModel: string | undefined;
if (argv.model) {
resolvedModel = argv.model;
} else {
resolvedModel =
process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name;
}
const sandboxConfig = await loadSandboxConfig(settings, argv);
const screenReader =
@@ -903,6 +908,8 @@ export async function loadCliConfig(
}
}
const modelProvidersConfig = getModelProvidersConfigFromSettings(settings);
return new Config({
sessionId,
sessionData,
@@ -961,6 +968,7 @@ export async function loadCliConfig(
inputFormat,
outputFormat,
includePartialMessages,
modelProvidersConfig,
generationConfig: {
...(settings.model?.generationConfig || {}),
model: resolvedModel,

View File

@@ -14,6 +14,11 @@ import {
QWEN_DIR,
getErrorMessage,
Storage,
type AuthType,
type ProviderModelConfig as ModelConfig,
type ModelProvidersConfig,
type ModelCapabilities,
type ModelGenerationConfig,
} from '@qwen-code/qwen-code-core';
import stripJsonComments from 'strip-json-comments';
import { DefaultLight } from '../ui/themes/default-light.js';
@@ -47,7 +52,14 @@ function getMergeStrategyForPath(path: string[]): MergeStrategy | undefined {
return current?.mergeStrategy;
}
export type { Settings, MemoryImportFormat };
export type {
Settings,
MemoryImportFormat,
ModelConfig,
ModelProvidersConfig,
ModelCapabilities,
ModelGenerationConfig,
};
export const SETTINGS_DIRECTORY_NAME = '.qwen';
export const USER_SETTINGS_PATH = Storage.getGlobalSettingsPath();
@@ -862,3 +874,31 @@ export function saveSettings(settingsFile: SettingsFile): void {
throw error;
}
}
/**
* Get models configuration from settings, grouped by authType.
* Returns the models config from the merged settings without mutating files.
*
* @param settings - The merged settings object
* @returns ModelProvidersConfig object (keyed by authType) or empty object if not configured
*/
export function getModelProvidersConfigFromSettings(
settings: Settings,
): ModelProvidersConfig {
return (settings.modelProviders as ModelProvidersConfig) || {};
}
/**
* Get models for a specific authType from settings.
*
* @param settings - The merged settings object
* @param authType - The authType to get models for
* @returns Array of ModelConfig for the authType, or empty array if not configured
*/
export function getModelsForAuthType(
settings: Settings,
authType: string,
): ModelConfig[] {
const modelProvidersConfig = getModelProvidersConfigFromSettings(settings);
return modelProvidersConfig[authType as AuthType] || [];
}

View File

@@ -10,6 +10,7 @@ import type {
TelemetrySettings,
AuthType,
ChatCompressionSettings,
ModelProvidersConfig,
} from '@qwen-code/qwen-code-core';
import {
ApprovalMode,
@@ -102,6 +103,19 @@ const SETTINGS_SCHEMA = {
mergeStrategy: MergeStrategy.SHALLOW_MERGE,
},
// Model providers configuration grouped by authType
modelProviders: {
type: 'object',
label: 'Model Providers',
category: 'Model',
requiresRestart: false,
default: {} as ModelProvidersConfig,
description:
'Model providers configuration grouped by authType. Each authType contains an array of model configurations.',
showInDialog: false,
mergeStrategy: MergeStrategy.SHALLOW_MERGE,
},
general: {
type: 'object',
label: 'General',

View File

@@ -52,7 +52,7 @@ export const modelCommand: SlashCommand = {
};
}
const availableModels = getAvailableModelsForAuthType(authType);
const availableModels = getAvailableModelsForAuthType(authType, config);
if (availableModels.length === 0) {
return {

View File

@@ -40,7 +40,8 @@ const renderComponent = (
? ({
// --- Functions used by ModelDialog ---
getModel: vi.fn(() => MAINLINE_CODER),
setModel: vi.fn(),
setModel: vi.fn().mockResolvedValue(undefined),
switchModel: vi.fn().mockResolvedValue(undefined),
getAuthType: vi.fn(() => 'qwen-oauth'),
// --- Functions used by ClearcutLogger ---
@@ -139,16 +140,19 @@ describe('<ModelDialog />', () => {
expect(mockedSelect).toHaveBeenCalledTimes(1);
});
it('calls config.setModel and onClose when DescriptiveRadioButtonSelect.onSelect is triggered', () => {
it('calls config.switchModel and onClose when DescriptiveRadioButtonSelect.onSelect is triggered', async () => {
const { props, mockConfig } = renderComponent({}, {}); // Pass empty object for contextValue
const childOnSelect = mockedSelect.mock.calls[0][0].onSelect;
expect(childOnSelect).toBeDefined();
childOnSelect(MAINLINE_CODER);
await childOnSelect(MAINLINE_CODER);
// Assert against the default mock provided by renderComponent
expect(mockConfig?.setModel).toHaveBeenCalledWith(MAINLINE_CODER);
// Assert that switchModel is called with the model and metadata
expect(mockConfig?.switchModel).toHaveBeenCalledWith(MAINLINE_CODER, {
reason: 'user_manual',
context: 'Model switched via /model dialog',
});
expect(props.onClose).toHaveBeenCalledTimes(1);
});

View File

@@ -29,13 +29,11 @@ interface ModelDialogProps {
export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
const config = useContext(ConfigContext);
// Get auth type from config, default to QWEN_OAUTH if not available
const authType = config?.getAuthType() ?? AuthType.QWEN_OAUTH;
// Get available models based on auth type
const availableModels = useMemo(
() => getAvailableModelsForAuthType(authType),
[authType],
() => getAvailableModelsForAuthType(authType, config ?? undefined),
[authType, config],
);
const MODEL_OPTIONS = useMemo(
@@ -49,7 +47,6 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
[availableModels],
);
// Determine the Preferred Model (read once when the dialog opens).
const preferredModel = config?.getModel() || MAINLINE_CODER;
useKeypress(
@@ -61,17 +58,18 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
{ isActive: true },
);
// Calculate the initial index based on the preferred model.
const initialIndex = useMemo(
() => MODEL_OPTIONS.findIndex((option) => option.value === preferredModel),
[MODEL_OPTIONS, preferredModel],
);
// Handle selection internally (Autonomous Dialog).
const handleSelect = useCallback(
(model: string) => {
async (model: string) => {
if (config) {
config.setModel(model);
await config.switchModel(model, {
reason: 'user_manual',
context: 'Model switched via /model dialog',
});
const event = new ModelSlashCommandEvent(model);
logModelSlashCommand(config, event);
}

View File

@@ -0,0 +1,203 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
getAvailableModelsForAuthType,
getFilteredQwenModels,
getOpenAIAvailableModelFromEnv,
isVisionModel,
getDefaultVisionModel,
AVAILABLE_MODELS_QWEN,
MAINLINE_VLM,
MAINLINE_CODER,
} from './availableModels.js';
import { AuthType, type Config } from '@qwen-code/qwen-code-core';
describe('availableModels', () => {
describe('AVAILABLE_MODELS_QWEN', () => {
it('should include coder model', () => {
const coderModel = AVAILABLE_MODELS_QWEN.find(
(m) => m.id === MAINLINE_CODER,
);
expect(coderModel).toBeDefined();
expect(coderModel?.isVision).toBeFalsy();
});
it('should include vision model', () => {
const visionModel = AVAILABLE_MODELS_QWEN.find(
(m) => m.id === MAINLINE_VLM,
);
expect(visionModel).toBeDefined();
expect(visionModel?.isVision).toBe(true);
});
});
describe('getFilteredQwenModels', () => {
it('should return all models when vision preview is enabled', () => {
const models = getFilteredQwenModels(true);
expect(models.length).toBe(AVAILABLE_MODELS_QWEN.length);
});
it('should filter out vision models when preview is disabled', () => {
const models = getFilteredQwenModels(false);
expect(models.every((m) => !m.isVision)).toBe(true);
});
});
describe('getOpenAIAvailableModelFromEnv', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should return null when OPENAI_MODEL is not set', () => {
delete process.env['OPENAI_MODEL'];
expect(getOpenAIAvailableModelFromEnv()).toBeNull();
});
it('should return model from OPENAI_MODEL env var', () => {
process.env['OPENAI_MODEL'] = 'gpt-4-turbo';
const model = getOpenAIAvailableModelFromEnv();
expect(model?.id).toBe('gpt-4-turbo');
expect(model?.label).toBe('gpt-4-turbo');
});
it('should trim whitespace from env var', () => {
process.env['OPENAI_MODEL'] = ' gpt-4 ';
const model = getOpenAIAvailableModelFromEnv();
expect(model?.id).toBe('gpt-4');
});
});
describe('getAvailableModelsForAuthType', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should return hard-coded qwen models for qwen-oauth', () => {
const models = getAvailableModelsForAuthType(AuthType.QWEN_OAUTH);
expect(models).toEqual(AVAILABLE_MODELS_QWEN);
});
it('should return hard-coded qwen models even when config is provided', () => {
const mockConfig = {
getAvailableModels: vi
.fn()
.mockReturnValue([
{ id: 'custom', label: 'Custom', authType: AuthType.QWEN_OAUTH },
]),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.QWEN_OAUTH,
mockConfig,
);
expect(models).toEqual(AVAILABLE_MODELS_QWEN);
});
it('should use config.getAvailableModels for openai authType when available', () => {
const mockModels = [
{
id: 'gpt-4',
label: 'GPT-4',
description: 'Test',
authType: AuthType.USE_OPENAI,
isVision: false,
},
];
const mockConfig = {
getAvailableModels: vi.fn().mockReturnValue(mockModels),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfig,
);
expect(mockConfig.getAvailableModels).toHaveBeenCalled();
expect(models[0].id).toBe('gpt-4');
});
it('should fallback to env var for openai when config returns empty', () => {
process.env['OPENAI_MODEL'] = 'fallback-model';
const mockConfig = {
getAvailableModels: vi.fn().mockReturnValue([]),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfig,
);
expect(models[0].id).toBe('fallback-model');
});
it('should fallback to env var for openai when config throws', () => {
process.env['OPENAI_MODEL'] = 'fallback-model';
const mockConfig = {
getAvailableModels: vi.fn().mockImplementation(() => {
throw new Error('Registry not initialized');
}),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfig,
);
expect(models[0].id).toBe('fallback-model');
});
it('should return env model for openai without config', () => {
process.env['OPENAI_MODEL'] = 'gpt-4-turbo';
const models = getAvailableModelsForAuthType(AuthType.USE_OPENAI);
expect(models[0].id).toBe('gpt-4-turbo');
});
it('should return empty array for openai without config or env', () => {
delete process.env['OPENAI_MODEL'];
const models = getAvailableModelsForAuthType(AuthType.USE_OPENAI);
expect(models).toEqual([]);
});
it('should return empty array for other auth types', () => {
const models = getAvailableModelsForAuthType(AuthType.USE_GEMINI);
expect(models).toEqual([]);
});
});
describe('isVisionModel', () => {
it('should return true for vision model', () => {
expect(isVisionModel(MAINLINE_VLM)).toBe(true);
});
it('should return false for non-vision model', () => {
expect(isVisionModel(MAINLINE_CODER)).toBe(false);
});
it('should return false for unknown model', () => {
expect(isVisionModel('unknown-model')).toBe(false);
});
});
describe('getDefaultVisionModel', () => {
it('should return the vision model ID', () => {
expect(getDefaultVisionModel()).toBe(MAINLINE_VLM);
});
});
});

View File

@@ -4,7 +4,12 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { AuthType, DEFAULT_QWEN_MODEL } from '@qwen-code/qwen-code-core';
import {
AuthType,
DEFAULT_QWEN_MODEL,
type Config,
type AvailableModel as CoreAvailableModel,
} from '@qwen-code/qwen-code-core';
import { t } from '../../i18n/index.js';
export type AvailableModel = {
@@ -60,24 +65,56 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
return id ? { id, label: id } : null;
}
export function getAvailableModelsForAuthType(
authType: AuthType,
): AvailableModel[] {
switch (authType) {
case AuthType.QWEN_OAUTH:
return AVAILABLE_MODELS_QWEN;
case AuthType.USE_OPENAI: {
const openAIModel = getOpenAIAvailableModelFromEnv();
return openAIModel ? [openAIModel] : [];
}
default:
// For other auth types, return empty array for now
// This can be expanded later according to the design doc
return [];
}
/**
* Convert core AvailableModel to CLI AvailableModel format
*/
function convertCoreModelToCliModel(
coreModel: CoreAvailableModel,
): AvailableModel {
return {
id: coreModel.id,
label: coreModel.label,
description: coreModel.description,
isVision: coreModel.isVision ?? coreModel.capabilities?.vision ?? false,
};
}
/**
* Get available models for the given authType.
*
* If a Config object is provided, uses the model registry to get models.
* For qwen-oauth, always returns the hard-coded models.
* For openai authType, falls back to environment variable if no config provided.
*/
export function getAvailableModelsForAuthType(
authType: AuthType,
config?: Config,
): AvailableModel[] {
// For qwen-oauth, always use hard-coded models, this aligns with the API gateway.
if (authType === AuthType.QWEN_OAUTH) {
return AVAILABLE_MODELS_QWEN;
}
if (config) {
try {
const models = config.getAvailableModels();
if (models.length > 0) {
return models.map(convertCoreModelToCliModel);
}
} catch (error) {
console.error('Failed to get models from model registry', error);
}
}
if (authType === AuthType.USE_OPENAI) {
const openAIModel = getOpenAIAvailableModelFromEnv();
return openAIModel ? [openAIModel] : [];
}
// For other auth types, return empty array
return [];
}
/**
* Hard code the default vision model as a string literal,
* until our coding model supports multimodal.

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.6.0",
"version": "0.5.1",
"description": "Qwen Code Core",
"repository": {
"type": "git",
@@ -63,7 +63,7 @@
"simple-git": "^3.28.0",
"strip-ansi": "^7.1.0",
"tiktoken": "^1.0.21",
"undici": "^6.22.0",
"undici": "^7.10.0",
"uuid": "^9.0.1",
"ws": "^8.18.0"
},

View File

@@ -102,6 +102,15 @@ import {
} from '../services/sessionService.js';
import { randomUUID } from 'node:crypto';
// Models
import {
ModelSelectionManager,
type ModelProvidersConfig,
type AvailableModel,
type ResolvedModelConfig,
SelectionSource,
} from '../models/index.js';
// Re-export types
export type { AnyToolInvocation, FileFilteringOptions, MCPOAuthConfig };
export {
@@ -351,6 +360,8 @@ export interface ConfigParameters {
sdkMode?: boolean;
sessionSubagents?: SubagentConfig[];
channel?: string;
/** Model providers configuration grouped by authType */
modelProvidersConfig?: ModelProvidersConfig;
}
function normalizeConfigOutputFormat(
@@ -490,6 +501,10 @@ export class Config {
private readonly useSmartEdit: boolean;
private readonly channel: string | undefined;
// Model selection manager (ModelRegistry is internal to it)
private modelSelectionManager?: ModelSelectionManager;
private readonly modelProvidersConfig?: ModelProvidersConfig;
constructor(params: ConfigParameters) {
this.sessionId = params.sessionId ?? randomUUID();
this.sessionData = params.sessionData;
@@ -609,6 +624,7 @@ export class Config {
this.vlmSwitchMode = params.vlmSwitchMode;
this.inputFormat = params.inputFormat ?? InputFormat.TEXT;
this.fileExclusions = new FileExclusions(this);
this.modelProvidersConfig = params.modelProvidersConfig;
this.eventEmitter = params.eventEmitter;
if (params.contextFileName) {
setGeminiMdFilename(params.contextFileName);
@@ -777,13 +793,111 @@ export class Config {
async setModel(
newModel: string,
_metadata?: { reason?: string; context?: string },
metadata?: { reason?: string; context?: string },
): Promise<void> {
if (this.contentGeneratorConfig) {
this.contentGeneratorConfig.model = newModel;
const manager = this.getModelSelectionManager();
await manager.switchModel(
newModel,
SelectionSource.PROGRAMMATIC_OVERRIDE,
metadata,
);
}
/**
* Get or lazily initialize the ModelSelectionManager.
* This is the single entry point for all model-related operations.
*/
getModelSelectionManager(): ModelSelectionManager {
if (!this.modelSelectionManager) {
const currentAuthType = this.contentGeneratorConfig?.authType;
const currentModelId = this.contentGeneratorConfig?.model;
this.modelSelectionManager = new ModelSelectionManager({
initialAuthType: currentAuthType,
initialModelId: currentModelId,
onModelChange: this.handleModelChange.bind(this),
modelProvidersConfig: this.modelProvidersConfig,
});
}
// TODO: Log _metadata for telemetry if needed
// This _metadata can be used for tracking model switches (reason, context)
return this.modelSelectionManager;
}
/**
* Handle model change from the selection manager.
* This updates the content generator config with the new model settings.
*/
private async handleModelChange(
authType: AuthType,
model: ResolvedModelConfig,
): Promise<void> {
if (!this.contentGeneratorConfig) {
return;
}
this._generationConfig.model = model.id;
// Read API key from environment variable if envKey is specified
if (model.envKey !== undefined) {
const apiKey = process.env[model.envKey];
if (apiKey) {
this._generationConfig.apiKey = apiKey;
} else {
console.warn(
`[Config] Environment variable '${model.envKey}' is not set for model '${model.id}'. ` +
`API key will not be available.`,
);
}
}
if (model.baseUrl !== undefined) {
this._generationConfig.baseUrl = model.baseUrl;
}
if (model.generationConfig) {
this._generationConfig.samplingParams = {
temperature: model.generationConfig.temperature,
top_p: model.generationConfig.top_p,
top_k: model.generationConfig.top_k,
max_tokens: model.generationConfig.max_tokens,
presence_penalty: model.generationConfig.presence_penalty,
frequency_penalty: model.generationConfig.frequency_penalty,
repetition_penalty: model.generationConfig.repetition_penalty,
};
if (model.generationConfig.timeout !== undefined) {
this._generationConfig.timeout = model.generationConfig.timeout;
}
if (model.generationConfig.maxRetries !== undefined) {
this._generationConfig.maxRetries = model.generationConfig.maxRetries;
}
if (model.generationConfig.disableCacheControl !== undefined) {
this._generationConfig.disableCacheControl =
model.generationConfig.disableCacheControl;
}
}
await this.refreshAuth(authType);
}
/**
* Get available models for the current authType.
* This is used by the /model command and ModelDialog.
*/
getAvailableModels(): AvailableModel[] {
return this.getModelSelectionManager().getAvailableModels();
}
/**
* Switch to a different model within the current authType.
* @param modelId - The model ID to switch to
* @param metadata - Optional metadata for telemetry
*/
async switchModel(
modelId: string,
metadata?: { reason?: string; context?: string },
): Promise<void> {
const manager = this.getModelSelectionManager();
await manager.switchModel(modelId, SelectionSource.USER_MANUAL, metadata);
}
isInFallbackMode(): boolean {

View File

@@ -11,7 +11,8 @@ import fs from 'node:fs';
vi.mock('node:fs');
describe('Flash Model Fallback Configuration', () => {
// Skip this test because we do not have fall back mechanism.
describe.skip('Flash Model Fallback Configuration', () => {
let config: Config;
beforeEach(() => {

View File

@@ -15,7 +15,6 @@ export const OAUTH_FILE = 'oauth_creds.json';
const TMP_DIR_NAME = 'tmp';
const BIN_DIR_NAME = 'bin';
const PROJECT_DIR_NAME = 'projects';
const IDE_DIR_NAME = 'ide';
export class Storage {
private readonly targetDir: string;
@@ -60,10 +59,6 @@ export class Storage {
return path.join(Storage.getGlobalQwenDir(), TMP_DIR_NAME);
}
static getGlobalIdeDir(): string {
return path.join(Storage.getGlobalQwenDir(), IDE_DIR_NAME);
}
static getGlobalBinDir(): string {
return path.join(Storage.getGlobalQwenDir(), BIN_DIR_NAME);
}

View File

@@ -151,7 +151,8 @@ describe('BaseLlmClient', () => {
contents: defaultOptions.contents,
config: {
abortSignal: defaultOptions.abortSignal,
topP: 0.8,
temperature: 0,
topP: 1,
tools: [
{
functionDeclarations: [
@@ -188,7 +189,7 @@ describe('BaseLlmClient', () => {
expect.objectContaining({
config: expect.objectContaining({
temperature: 0.8,
topP: 0.8, // Default should remain if not overridden
topP: 1, // Default should remain if not overridden
topK: 10,
tools: expect.any(Array),
}),

View File

@@ -66,7 +66,8 @@ export interface GenerateJsonOptions {
export class BaseLlmClient {
// Default configuration for utility tasks
private readonly defaultUtilityConfig: GenerateContentConfig = {
topP: 0.8,
temperature: 0,
topP: 1,
};
constructor(

View File

@@ -2310,7 +2310,7 @@ ${JSON.stringify(
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.5,
topP: 0.8,
topP: 1,
},
contents,
},

View File

@@ -94,7 +94,8 @@ const MAX_TURNS = 100;
export class GeminiClient {
private chat?: GeminiChat;
private readonly generateContentConfig: GenerateContentConfig = {
topP: 0.8,
temperature: 0,
topP: 1,
};
private sessionTurnCount = 0;

View File

@@ -32,7 +32,6 @@ vi.mock('node:fs', async (importOriginal) => {
...actual.promises,
readFile: vi.fn(),
readdir: vi.fn(),
stat: vi.fn(),
},
realpathSync: (p: string) => p,
existsSync: () => false,
@@ -69,7 +68,6 @@ describe('IdeClient', () => {
command: 'test-ide',
});
vi.mocked(os.tmpdir).mockReturnValue('/tmp');
vi.mocked(os.homedir).mockReturnValue('/home/test');
// Mock MCP client and transports
mockClient = {
@@ -99,15 +97,19 @@ describe('IdeClient', () => {
describe('connect', () => {
it('should connect using HTTP when port is provided in config file', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '8080';
const config = { port: '8080' };
vi.mocked(fs.promises.readFile).mockResolvedValue(JSON.stringify(config));
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([]);
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(fs.promises.readFile).toHaveBeenCalledWith(
path.join('/home/test', '.qwen', 'ide', '8080.lock'),
path.join('/tmp', 'qwen-code-ide-server-12345.json'),
'utf8',
);
expect(StreamableHTTPClientTransport).toHaveBeenCalledWith(
@@ -118,13 +120,16 @@ describe('IdeClient', () => {
expect(ideClient.getConnectionStatus().status).toBe(
IDEConnectionStatus.Connected,
);
delete process.env['QWEN_CODE_IDE_SERVER_PORT'];
});
it('should connect using stdio when stdio config is provided in file', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '8080';
const config = { stdio: { command: 'test-cmd', args: ['--foo'] } };
vi.mocked(fs.promises.readFile).mockResolvedValue(JSON.stringify(config));
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([]);
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
@@ -137,16 +142,19 @@ describe('IdeClient', () => {
expect(ideClient.getConnectionStatus().status).toBe(
IDEConnectionStatus.Connected,
);
delete process.env['QWEN_CODE_IDE_SERVER_PORT'];
});
it('should prioritize port over stdio when both are in config file', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '8080';
const config = {
port: '8080',
stdio: { command: 'test-cmd', args: ['--foo'] },
};
vi.mocked(fs.promises.readFile).mockResolvedValue(JSON.stringify(config));
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([]);
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
@@ -156,7 +164,6 @@ describe('IdeClient', () => {
expect(ideClient.getConnectionStatus().status).toBe(
IDEConnectionStatus.Connected,
);
delete process.env['QWEN_CODE_IDE_SERVER_PORT'];
});
it('should connect using HTTP when port is provided in environment variables', async () => {
@@ -256,8 +263,7 @@ describe('IdeClient', () => {
});
describe('getConnectionConfigFromFile', () => {
it('should return config from the env port lock file if it exists', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '1234';
it('should return config from the specific pid file if it exists', async () => {
const config = { port: '1234', workspacePath: '/test/workspace' };
vi.mocked(fs.promises.readFile).mockResolvedValue(JSON.stringify(config));
@@ -271,14 +277,18 @@ describe('IdeClient', () => {
expect(result).toEqual(config);
expect(fs.promises.readFile).toHaveBeenCalledWith(
path.join('/home/test', '.qwen', 'ide', '1234.lock'),
path.join('/tmp', 'qwen-code-ide-server-12345.json'),
'utf8',
);
delete process.env['QWEN_CODE_IDE_SERVER_PORT'];
});
it('should return undefined if no config files are found', async () => {
vi.mocked(fs.promises.readFile).mockRejectedValue(new Error('not found'));
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([]);
const ideClient = await IdeClient.getInstance();
const result = await (
@@ -290,15 +300,20 @@ describe('IdeClient', () => {
expect(result).toBeUndefined();
});
it('should read legacy pid config when available', async () => {
const config = {
port: '5678',
workspacePath: '/test/workspace',
ppid: 12345,
};
vi.mocked(fs.promises.readFile).mockResolvedValueOnce(
JSON.stringify(config),
);
it('should find and parse a single config file with the new naming scheme', async () => {
const config = { port: '5678', workspacePath: '/test/workspace' };
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
); // For old path
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue(['qwen-code-ide-server-12345-123.json']);
vi.mocked(fs.promises.readFile).mockResolvedValue(JSON.stringify(config));
vi.spyOn(IdeClient, 'validateWorkspacePath').mockReturnValue({
isValid: true,
});
const ideClient = await IdeClient.getInstance();
const result = await (
@@ -309,18 +324,110 @@ describe('IdeClient', () => {
expect(result).toEqual(config);
expect(fs.promises.readFile).toHaveBeenCalledWith(
path.join('/tmp', 'qwen-code-ide-server-12345.json'),
path.join('/tmp/gemini/ide', 'qwen-code-ide-server-12345-123.json'),
'utf8',
);
});
it('should fall back to legacy port file when pid file is missing', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '2222';
const config2 = { port: '2222', workspacePath: '/test/workspace2' };
it('should filter out configs with invalid workspace paths', async () => {
const validConfig = {
port: '5678',
workspacePath: '/test/workspace',
};
const invalidConfig = {
port: '1111',
workspacePath: '/invalid/workspace',
};
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
);
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([
'qwen-code-ide-server-12345-111.json',
'qwen-code-ide-server-12345-222.json',
]);
vi.mocked(fs.promises.readFile)
.mockRejectedValueOnce(new Error('not found')) // ~/.qwen/ide/<port>.lock
.mockRejectedValueOnce(new Error('not found')) // legacy pid file
.mockResolvedValueOnce(JSON.stringify(invalidConfig))
.mockResolvedValueOnce(JSON.stringify(validConfig));
const validateSpy = vi
.spyOn(IdeClient, 'validateWorkspacePath')
.mockReturnValueOnce({ isValid: false })
.mockReturnValueOnce({ isValid: true });
const ideClient = await IdeClient.getInstance();
const result = await (
ideClient as unknown as {
getConnectionConfigFromFile: () => Promise<unknown>;
}
).getConnectionConfigFromFile();
expect(result).toEqual(validConfig);
expect(validateSpy).toHaveBeenCalledWith(
'/invalid/workspace',
'/test/workspace/sub-dir',
);
expect(validateSpy).toHaveBeenCalledWith(
'/test/workspace',
'/test/workspace/sub-dir',
);
});
it('should return the first valid config when multiple workspaces are valid', async () => {
const config1 = { port: '1111', workspacePath: '/test/workspace' };
const config2 = { port: '2222', workspacePath: '/test/workspace2' };
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
);
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([
'qwen-code-ide-server-12345-111.json',
'qwen-code-ide-server-12345-222.json',
]);
vi.mocked(fs.promises.readFile)
.mockResolvedValueOnce(JSON.stringify(config1))
.mockResolvedValueOnce(JSON.stringify(config2));
vi.spyOn(IdeClient, 'validateWorkspacePath').mockReturnValue({
isValid: true,
});
const ideClient = await IdeClient.getInstance();
const result = await (
ideClient as unknown as {
getConnectionConfigFromFile: () => Promise<unknown>;
}
).getConnectionConfigFromFile();
expect(result).toEqual(config1);
});
it('should prioritize the config matching the port from the environment variable', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '2222';
const config1 = { port: '1111', workspacePath: '/test/workspace' };
const config2 = { port: '2222', workspacePath: '/test/workspace2' };
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
);
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([
'qwen-code-ide-server-12345-111.json',
'qwen-code-ide-server-12345-222.json',
]);
vi.mocked(fs.promises.readFile)
.mockResolvedValueOnce(JSON.stringify(config1))
.mockResolvedValueOnce(JSON.stringify(config2));
vi.spyOn(IdeClient, 'validateWorkspacePath').mockReturnValue({
isValid: true,
});
const ideClient = await IdeClient.getInstance();
const result = await (
@@ -330,23 +437,28 @@ describe('IdeClient', () => {
).getConnectionConfigFromFile();
expect(result).toEqual(config2);
expect(fs.promises.readFile).toHaveBeenCalledWith(
path.join('/tmp', 'qwen-code-ide-server-12345.json'),
'utf8',
);
expect(fs.promises.readFile).toHaveBeenCalledWith(
path.join('/tmp', 'qwen-code-ide-server-2222.json'),
'utf8',
);
delete process.env['QWEN_CODE_IDE_SERVER_PORT'];
});
it('should fall back to legacy config when env lock file has invalid JSON', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '3333';
const config = { port: '1111', workspacePath: '/test/workspace' };
it('should handle invalid JSON in one of the config files', async () => {
const validConfig = { port: '2222', workspacePath: '/test/workspace' };
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
);
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([
'qwen-code-ide-server-12345-111.json',
'qwen-code-ide-server-12345-222.json',
]);
vi.mocked(fs.promises.readFile)
.mockResolvedValueOnce('invalid json')
.mockResolvedValueOnce(JSON.stringify(config));
.mockResolvedValueOnce(JSON.stringify(validConfig));
vi.spyOn(IdeClient, 'validateWorkspacePath').mockReturnValue({
isValid: true,
});
const ideClient = await IdeClient.getInstance();
const result = await (
@@ -355,7 +467,96 @@ describe('IdeClient', () => {
}
).getConnectionConfigFromFile();
expect(result).toEqual(config);
expect(result).toEqual(validConfig);
});
it('should return undefined if readdir throws an error', async () => {
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
);
vi.mocked(fs.promises.readdir).mockRejectedValue(
new Error('readdir failed'),
);
const ideClient = await IdeClient.getInstance();
const result = await (
ideClient as unknown as {
getConnectionConfigFromFile: () => Promise<unknown>;
}
).getConnectionConfigFromFile();
expect(result).toBeUndefined();
});
it('should ignore files with invalid names', async () => {
const validConfig = { port: '3333', workspacePath: '/test/workspace' };
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
);
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([
'qwen-code-ide-server-12345-111.json', // valid
'not-a-config-file.txt', // invalid
'qwen-code-ide-server-asdf.json', // invalid
]);
vi.mocked(fs.promises.readFile).mockResolvedValueOnce(
JSON.stringify(validConfig),
);
vi.spyOn(IdeClient, 'validateWorkspacePath').mockReturnValue({
isValid: true,
});
const ideClient = await IdeClient.getInstance();
const result = await (
ideClient as unknown as {
getConnectionConfigFromFile: () => Promise<unknown>;
}
).getConnectionConfigFromFile();
expect(result).toEqual(validConfig);
expect(fs.promises.readFile).toHaveBeenCalledWith(
path.join('/tmp/gemini/ide', 'qwen-code-ide-server-12345-111.json'),
'utf8',
);
expect(fs.promises.readFile).not.toHaveBeenCalledWith(
path.join('/tmp/gemini/ide', 'not-a-config-file.txt'),
'utf8',
);
});
it('should match env port string to a number port in the config', async () => {
process.env['QWEN_CODE_IDE_SERVER_PORT'] = '3333';
const config1 = { port: 1111, workspacePath: '/test/workspace' };
const config2 = { port: 3333, workspacePath: '/test/workspace2' };
vi.mocked(fs.promises.readFile).mockRejectedValueOnce(
new Error('not found'),
);
(
vi.mocked(fs.promises.readdir) as Mock<
(path: fs.PathLike) => Promise<string[]>
>
).mockResolvedValue([
'qwen-code-ide-server-12345-111.json',
'qwen-code-ide-server-12345-222.json',
]);
vi.mocked(fs.promises.readFile)
.mockResolvedValueOnce(JSON.stringify(config1))
.mockResolvedValueOnce(JSON.stringify(config2));
vi.spyOn(IdeClient, 'validateWorkspacePath').mockReturnValue({
isValid: true,
});
const ideClient = await IdeClient.getInstance();
const result = await (
ideClient as unknown as {
getConnectionConfigFromFile: () => Promise<unknown>;
}
).getConnectionConfigFromFile();
expect(result).toEqual(config2);
delete process.env['QWEN_CODE_IDE_SERVER_PORT'];
});
});

View File

@@ -8,7 +8,6 @@ import * as fs from 'node:fs';
import { isSubpath } from '../utils/paths.js';
import { detectIde, type IdeInfo } from '../ide/detect-ide.js';
import { ideContextStore } from './ideContext.js';
import { Storage } from '../config/storage.js';
import {
IdeContextNotificationSchema,
IdeDiffAcceptedNotificationSchema,
@@ -573,103 +572,98 @@ export class IdeClient {
| (ConnectionConfig & { workspacePath?: string; ideInfo?: IdeInfo })
| undefined
> {
const portFromEnv = this.getPortFromEnv();
if (portFromEnv) {
try {
const ideDir = Storage.getGlobalIdeDir();
const lockFile = path.join(ideDir, `${portFromEnv}.lock`);
const lockFileContents = await fs.promises.readFile(lockFile, 'utf8');
return JSON.parse(lockFileContents);
} catch (_) {
// Fall through to legacy discovery.
}
if (!this.ideProcessInfo) {
return undefined;
}
// Legacy discovery for VSCode extension < v0.5.1.
return this.getLegacyConnectionConfig(portFromEnv);
}
// Legacy connection files were written in the global temp directory.
private async getLegacyConnectionConfig(
portFromEnv?: string,
): Promise<
| (ConnectionConfig & { workspacePath?: string; ideInfo?: IdeInfo })
| undefined
> {
if (this.ideProcessInfo) {
try {
const portFile = path.join(
os.tmpdir(),
`qwen-code-ide-server-${this.ideProcessInfo.pid}.json`,
);
const portFileContents = await fs.promises.readFile(portFile, 'utf8');
return JSON.parse(portFileContents);
} catch (_) {
// For older/newer extension versions, the file name matches the pattern
// /^qwen-code-ide-server-${pid}-\d+\.json$/. If multiple IDE
// windows are open, multiple files matching the pattern are expected to
// exist.
}
}
if (portFromEnv) {
try {
const portFile = path.join(
os.tmpdir(),
`qwen-code-ide-server-${portFromEnv}.json`,
);
const portFileContents = await fs.promises.readFile(portFile, 'utf8');
return JSON.parse(portFileContents);
} catch (_) {
// Ignore and fall through.
}
}
return undefined;
}
protected async getAllConnectionConfigs(
ideDir: string,
): Promise<
ConnectionConfig & Array<{ workspacePath?: string; ideInfo?: IdeInfo }>
> {
const fileRegex = new RegExp('^\\d+\\.lock$');
let lockFiles: string[];
// For backwards compatability
try {
lockFiles = (await fs.promises.readdir(ideDir)).filter((file) =>
fileRegex.test(file),
const portFile = path.join(
os.tmpdir(),
`qwen-code-ide-server-${this.ideProcessInfo.pid}.json`,
);
const portFileContents = await fs.promises.readFile(portFile, 'utf8');
return JSON.parse(portFileContents);
} catch (_) {
// For newer extension versions, the file name matches the pattern
// /^qwen-code-ide-server-${pid}-\d+\.json$/. If multiple IDE
// windows are open, multiple files matching the pattern are expected to
// exist.
}
const portFileDir = path.join(os.tmpdir(), 'gemini', 'ide');
let portFiles;
try {
portFiles = await fs.promises.readdir(portFileDir);
} catch (e) {
logger.debug('Failed to read IDE connection directory:', e);
return [];
return undefined;
}
const fileContents = await Promise.all(
lockFiles.map(async (file) => {
const fullPath = path.join(ideDir, file);
try {
const stat = await fs.promises.stat(fullPath);
const content = await fs.promises.readFile(fullPath, 'utf8');
try {
const parsed = JSON.parse(content);
return { file, mtimeMs: stat.mtimeMs, parsed };
} catch (e) {
logger.debug('Failed to parse JSON from lock file: ', e);
return { file, mtimeMs: stat.mtimeMs, parsed: undefined };
}
} catch (e) {
// If we can't stat/read the file, treat it as very old so it doesn't
// win ties, and skip parsing by returning undefined content.
logger.debug('Failed to read/stat IDE lock file:', e);
return { file, mtimeMs: -Infinity, parsed: undefined };
}
}),
);
if (!portFiles) {
return undefined;
}
return fileContents
.filter(({ parsed }) => parsed !== undefined)
.sort((a, b) => b.mtimeMs - a.mtimeMs)
.map(({ parsed }) => parsed);
const fileRegex = new RegExp(
`^qwen-code-ide-server-${this.ideProcessInfo.pid}-\\d+\\.json$`,
);
const matchingFiles = portFiles
.filter((file) => fileRegex.test(file))
.sort();
if (matchingFiles.length === 0) {
return undefined;
}
let fileContents: string[];
try {
fileContents = await Promise.all(
matchingFiles.map((file) =>
fs.promises.readFile(path.join(portFileDir, file), 'utf8'),
),
);
} catch (e) {
logger.debug('Failed to read IDE connection config file(s):', e);
return undefined;
}
const parsedContents = fileContents.map((content) => {
try {
return JSON.parse(content);
} catch (e) {
logger.debug('Failed to parse JSON from config file: ', e);
return undefined;
}
});
const validWorkspaces = parsedContents.filter((content) => {
if (!content) {
return false;
}
const { isValid } = IdeClient.validateWorkspacePath(
content.workspacePath,
process.cwd(),
);
return isValid;
});
if (validWorkspaces.length === 0) {
return undefined;
}
if (validWorkspaces.length === 1) {
return validWorkspaces[0];
}
const portFromEnv = this.getPortFromEnv();
if (portFromEnv) {
const matchingPort = validWorkspaces.find(
(content) => String(content.port) === portFromEnv,
);
if (matchingPort) {
return matchingPort;
}
}
return validWorkspaces[0];
}
private createProxyAwareFetch() {

View File

@@ -50,7 +50,7 @@ describe('getIdeProcessInfo', () => {
expect(result).toEqual({ pid: 700, command: '/usr/lib/vscode/code' });
});
it('should return shell process info if grandparent lookup fails', async () => {
it('should return parent process info if grandparent lookup fails', async () => {
(os.platform as Mock).mockReturnValue('linux');
mockedExec
.mockResolvedValueOnce({ stdout: '800 /bin/bash' }) // pid 1000 -> ppid 800 (shell)
@@ -63,96 +63,134 @@ describe('getIdeProcessInfo', () => {
});
describe('on Windows', () => {
it('should return great-grandparent process using heuristic', async () => {
it('should traverse up and find the great-grandchild of the root process', async () => {
(os.platform as Mock).mockReturnValue('win32');
const processes = [
{
ProcessId: 1000,
ParentProcessId: 900,
Name: 'node.exe',
CommandLine: 'node.exe',
},
{
ProcessId: 900,
ParentProcessId: 800,
Name: 'powershell.exe',
CommandLine: 'powershell.exe',
},
{
ProcessId: 800,
ParentProcessId: 700,
Name: 'code.exe',
CommandLine: 'code.exe',
},
{
ProcessId: 700,
ParentProcessId: 0,
Name: 'wininit.exe',
CommandLine: 'wininit.exe',
},
];
mockedExec.mockImplementation((file: string, _args: string[]) => {
if (file === 'powershell') {
return Promise.resolve({ stdout: JSON.stringify(processes) });
const processInfoMap = new Map([
[
1000,
{
stdout:
'{"Name":"node.exe","ParentProcessId":900,"CommandLine":"node.exe"}',
},
],
[
900,
{
stdout:
'{"Name":"powershell.exe","ParentProcessId":800,"CommandLine":"powershell.exe"}',
},
],
[
800,
{
stdout:
'{"Name":"code.exe","ParentProcessId":700,"CommandLine":"code.exe"}',
},
],
[
700,
{
stdout:
'{"Name":"wininit.exe","ParentProcessId":0,"CommandLine":"wininit.exe"}',
},
],
]);
mockedExec.mockImplementation((command: string) => {
const pidMatch = command.match(/ProcessId=(\d+)/);
if (pidMatch) {
const pid = parseInt(pidMatch[1], 10);
return Promise.resolve(processInfoMap.get(pid));
}
return Promise.resolve({ stdout: '' });
return Promise.reject(new Error('Invalid command for mock'));
});
const result = await getIdeProcessInfo();
// Process chain: 1000 (node.exe) -> 900 (powershell.exe) -> 800 (code.exe) -> 700 (wininit.exe)
// ancestors = [1000, 900, 800, 700], length = 4
// Heuristic: return ancestors[length-3] = ancestors[1] = 900 (powershell.exe)
expect(result).toEqual({ pid: 900, command: 'powershell.exe' });
});
it('should handle empty process list gracefully', async () => {
it('should handle non-existent process gracefully', async () => {
(os.platform as Mock).mockReturnValue('win32');
mockedExec.mockResolvedValue({ stdout: '[]' });
mockedExec
.mockResolvedValueOnce({ stdout: '' }) // Non-existent PID returns empty due to -ErrorAction SilentlyContinue
.mockResolvedValueOnce({
stdout:
'{"Name":"fallback.exe","ParentProcessId":0,"CommandLine":"fallback.exe"}',
}); // Fallback call
const result = await getIdeProcessInfo();
// Should return current pid and empty command because process not found in map
expect(result).toEqual({ pid: 1000, command: '' });
expect(result).toEqual({ pid: 1000, command: 'fallback.exe' });
});
it('should handle malformed JSON output gracefully', async () => {
(os.platform as Mock).mockReturnValue('win32');
mockedExec.mockResolvedValue({ stdout: '{"invalid":json}' });
mockedExec
.mockResolvedValueOnce({ stdout: '{"invalid":json}' }) // Malformed JSON
.mockResolvedValueOnce({
stdout:
'{"Name":"fallback.exe","ParentProcessId":0,"CommandLine":"fallback.exe"}',
}); // Fallback call
const result = await getIdeProcessInfo();
expect(result).toEqual({ pid: 1000, command: '' });
expect(result).toEqual({ pid: 1000, command: 'fallback.exe' });
});
it('should return last ancestor if chain is too short', async () => {
it('should handle PowerShell errors without crashing the process chain', async () => {
(os.platform as Mock).mockReturnValue('win32');
const processInfoMap = new Map([
[1000, { stdout: '' }], // First process doesn't exist (empty due to -ErrorAction)
[
1001,
{
stdout:
'{"Name":"parent.exe","ParentProcessId":800,"CommandLine":"parent.exe"}',
},
],
[
800,
{
stdout:
'{"Name":"ide.exe","ParentProcessId":0,"CommandLine":"ide.exe"}',
},
],
]);
const processes = [
{
ProcessId: 1000,
ParentProcessId: 900,
Name: 'node.exe',
CommandLine: 'node.exe',
},
{
ProcessId: 900,
ParentProcessId: 0,
Name: 'explorer.exe',
CommandLine: 'explorer.exe',
},
];
// Mock the process.pid to test traversal with missing processes
Object.defineProperty(process, 'pid', {
value: 1001,
configurable: true,
});
mockedExec.mockImplementation((file: string, _args: string[]) => {
if (file === 'powershell') {
return Promise.resolve({ stdout: JSON.stringify(processes) });
mockedExec.mockImplementation((command: string) => {
const pidMatch = command.match(/ProcessId=(\d+)/);
if (pidMatch) {
const pid = parseInt(pidMatch[1], 10);
return Promise.resolve(processInfoMap.get(pid) || { stdout: '' });
}
return Promise.resolve({ stdout: '' });
return Promise.reject(new Error('Invalid command for mock'));
});
const result = await getIdeProcessInfo();
// ancestors = [1000, 900], length = 2 (< 3)
// Heuristic: return ancestors[length-1] = ancestors[1] = 900 (explorer.exe)
expect(result).toEqual({ pid: 900, command: 'explorer.exe' });
// Should return the current process command since traversal continues despite missing processes
expect(result).toEqual({ pid: 1001, command: 'parent.exe' });
// Reset process.pid
Object.defineProperty(process, 'pid', {
value: 1000,
configurable: true,
});
});
it('should handle partial JSON data with defaults', async () => {
(os.platform as Mock).mockReturnValue('win32');
mockedExec
.mockResolvedValueOnce({ stdout: '{"Name":"partial.exe"}' }) // Missing ParentProcessId, defaults to 0
.mockResolvedValueOnce({
stdout:
'{"Name":"root.exe","ParentProcessId":0,"CommandLine":"root.exe"}',
}); // Get grandparent info
const result = await getIdeProcessInfo();
expect(result).toEqual({ pid: 1000, command: 'root.exe' });
});
});
});

View File

@@ -4,43 +4,74 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { exec, execFile } from 'node:child_process';
import { exec } from 'node:child_process';
import { promisify } from 'node:util';
import os from 'node:os';
import path from 'node:path';
const execAsync = promisify(exec);
const execFileAsync = promisify(execFile);
const MAX_TRAVERSAL_DEPTH = 32;
/**
* Fetches the parent process ID, name, and command for a given process ID.
*
* @param pid The process ID to inspect.
* @returns A promise that resolves to the parent's PID, name, and command.
*/
async function getProcessInfo(pid: number): Promise<{
parentPid: number;
name: string;
command: string;
}> {
// Only used for Unix systems (macOS and Linux)
try {
const command = `ps -o ppid=,command= -p ${pid}`;
const { stdout } = await execAsync(command);
const trimmedStdout = stdout.trim();
if (!trimmedStdout) {
return { parentPid: 0, name: '', command: '' };
const platform = os.platform();
if (platform === 'win32') {
const powershellCommand = [
'$p = Get-CimInstance Win32_Process',
`-Filter 'ProcessId=${pid}'`,
'-ErrorAction SilentlyContinue;',
'if ($p) {',
'@{Name=$p.Name;ParentProcessId=$p.ParentProcessId;CommandLine=$p.CommandLine}',
'| ConvertTo-Json',
'}',
].join(' ');
const { stdout } = await execAsync(`powershell "${powershellCommand}"`);
const output = stdout.trim();
if (!output) return { parentPid: 0, name: '', command: '' };
const {
Name = '',
ParentProcessId = 0,
CommandLine = '',
} = JSON.parse(output);
return {
parentPid: ParentProcessId,
name: Name,
command: CommandLine ?? '',
};
} else {
const command = `ps -o ppid=,command= -p ${pid}`;
const { stdout } = await execAsync(command);
const trimmedStdout = stdout.trim();
if (!trimmedStdout) {
return { parentPid: 0, name: '', command: '' };
}
const ppidString = trimmedStdout.split(/\s+/)[0];
const parentPid = parseInt(ppidString, 10);
const fullCommand = trimmedStdout.substring(ppidString.length).trim();
const processName = path.basename(fullCommand.split(' ')[0]);
return {
parentPid: isNaN(parentPid) ? 1 : parentPid,
name: processName,
command: fullCommand,
};
}
const parts = trimmedStdout.split(/\s+/);
const ppidString = parts[0];
const parentPid = parseInt(ppidString, 10);
const fullCommand = trimmedStdout.substring(ppidString.length).trim();
const processName = path.basename(fullCommand.split(' ')[0]);
return {
parentPid: isNaN(parentPid) ? 1 : parentPid,
name: processName,
command: fullCommand,
};
} catch (_e) {
console.debug(`Failed to get process info for pid ${pid}:`, _e);
return { parentPid: 0, name: '', command: '' };
}
}
/**
* Finds the IDE process info on Unix-like systems.
*
@@ -75,15 +106,15 @@ async function getIdeProcessInfoForUnix(): Promise<{
} catch {
// Ignore if getting grandparent fails, we'll just use the parent pid.
}
const { command: ideCommand } = await getProcessInfo(idePid);
return { pid: idePid, command: ideCommand };
const { command } = await getProcessInfo(idePid);
return { pid: idePid, command };
}
if (parentPid <= 1) {
break; // Reached the root
}
currentPid = parentPid;
} catch (_e) {
} catch {
// Process in chain died
break;
}
@@ -93,104 +124,50 @@ async function getIdeProcessInfoForUnix(): Promise<{
return { pid: currentPid, command };
}
interface ProcessInfo {
pid: number;
parentPid: number;
name: string;
command: string;
}
interface RawProcessInfo {
ProcessId?: number;
ParentProcessId?: number;
Name?: string;
CommandLine?: string;
}
/**
* Fetches the entire process table on Windows.
* Finds the IDE process info on Windows.
*
* The strategy is to find the great-grandchild of the root process.
*
* @returns A promise that resolves to the PID and command of the IDE process.
*/
async function getProcessTableWindows(): Promise<Map<number, ProcessInfo>> {
const processMap = new Map<number, ProcessInfo>();
try {
const powershellCommand =
'Get-CimInstance Win32_Process | Select-Object ProcessId,ParentProcessId,Name,CommandLine | ConvertTo-Json -Compress';
const { stdout } = await execFileAsync(
'powershell',
['-NoProfile', '-NonInteractive', '-Command', powershellCommand],
{ maxBuffer: 10 * 1024 * 1024 },
);
if (!stdout.trim()) {
return processMap;
}
let processes: RawProcessInfo | RawProcessInfo[];
try {
processes = JSON.parse(stdout);
} catch (_e) {
return processMap;
}
if (!Array.isArray(processes)) {
processes = [processes];
}
for (const p of processes) {
if (p && typeof p.ProcessId === 'number') {
processMap.set(p.ProcessId, {
pid: p.ProcessId,
parentPid: p.ParentProcessId || 0,
name: p.Name || '',
command: p.CommandLine || '',
});
}
}
} catch (_e) {
// Fallback or error handling if PowerShell fails
}
return processMap;
}
async function getIdeProcessInfoForWindows(): Promise<{
pid: number;
command: string;
}> {
// Fetch the entire process table in one go.
const processMap = await getProcessTableWindows();
let currentPid = process.pid;
let previousPid = process.pid;
const myPid = process.pid;
const myProc = processMap.get(myPid);
for (let i = 0; i < MAX_TRAVERSAL_DEPTH; i++) {
try {
const { parentPid } = await getProcessInfo(currentPid);
if (!myProc) {
// Fallback: return current process info if snapshot fails
return { pid: myPid, command: '' };
}
if (parentPid > 0) {
try {
const { parentPid: grandParentPid } = await getProcessInfo(parentPid);
if (grandParentPid === 0) {
// We've found the grandchild of the root (`currentPid`). The IDE
// process is its child, which we've stored in `previousPid`.
const { command } = await getProcessInfo(previousPid);
return { pid: previousPid, command };
}
} catch {
// getting grandparent failed, proceed
}
}
// Perform tree traversal in memory
const ancestors: ProcessInfo[] = [];
let curr: ProcessInfo | undefined = myProc;
for (let i = 0; i < MAX_TRAVERSAL_DEPTH && curr; i++) {
ancestors.push(curr);
if (curr.parentPid === 0 || !processMap.has(curr.parentPid)) {
// Parent process not in map, stop traversal
if (parentPid <= 0) {
break; // Reached the root
}
previousPid = currentPid;
currentPid = parentPid;
} catch {
// Process in chain died
break;
}
curr = processMap.get(curr.parentPid);
}
// Use heuristic: return the great-grandparent (ancestors[length-3])
if (ancestors.length >= 3) {
const target = ancestors[ancestors.length - 3];
return { pid: target.pid, command: target.command };
} else if (ancestors.length > 0) {
const target = ancestors[ancestors.length - 1];
return { pid: target.pid, command: target.command };
}
return { pid: myPid, command: myProc.command };
const { command } = await getProcessInfo(currentPid);
return { pid: currentPid, command };
}
/**

View File

@@ -9,6 +9,25 @@ export * from './config/config.js';
export * from './output/types.js';
export * from './output/json-formatter.js';
// Export models
export {
type ModelCapabilities,
type ModelGenerationConfig,
type ModelConfig as ProviderModelConfig,
type ModelProvidersConfig,
type ResolvedModelConfig,
type AvailableModel,
type ModelSwitchMetadata,
type CurrentModelInfo,
SelectionSource,
DEFAULT_GENERATION_CONFIG,
DEFAULT_BASE_URLS,
QWEN_OAUTH_MODELS,
ModelSelectionManager,
type ModelChangeCallback,
type ModelSelectionManagerOptions,
} from './models/index.js';
// Export Core Logic
export * from './core/client.js';
export * from './core/contentGenerator.js';

View File

@@ -0,0 +1,27 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
export {
type ModelCapabilities,
type ModelGenerationConfig,
type ModelConfig,
type ModelProvidersConfig,
type ResolvedModelConfig,
type AvailableModel,
type ModelSwitchMetadata,
type CurrentModelInfo,
SelectionSource,
DEFAULT_GENERATION_CONFIG,
DEFAULT_BASE_URLS,
} from './types.js';
export { QWEN_OAUTH_MODELS } from './modelRegistry.js';
export {
ModelSelectionManager,
type ModelChangeCallback,
type ModelSelectionManagerOptions,
} from './modelSelectionManager.js';

View File

@@ -0,0 +1,336 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { ModelRegistry, QWEN_OAUTH_MODELS } from './modelRegistry.js';
import { AuthType } from '../core/contentGenerator.js';
import type { ModelProvidersConfig } from './types.js';
describe('ModelRegistry', () => {
describe('initialization', () => {
it('should always include hard-coded qwen-oauth models', () => {
const registry = new ModelRegistry();
const qwenModels = registry.getModelsForAuthType(AuthType.QWEN_OAUTH);
expect(qwenModels.length).toBe(QWEN_OAUTH_MODELS.length);
expect(qwenModels[0].id).toBe('coder-model');
expect(qwenModels[1].id).toBe('vision-model');
});
it('should initialize with empty config', () => {
const registry = new ModelRegistry();
expect(registry.hasAuthType(AuthType.QWEN_OAUTH)).toBe(true);
expect(registry.hasAuthType(AuthType.USE_OPENAI)).toBe(false);
});
it('should initialize with custom models config', () => {
const modelProvidersConfig: ModelProvidersConfig = {
openai: [
{
id: 'gpt-4-turbo',
name: 'GPT-4 Turbo',
baseUrl: 'https://api.openai.com/v1',
},
],
};
const registry = new ModelRegistry(modelProvidersConfig);
expect(registry.hasAuthType(AuthType.USE_OPENAI)).toBe(true);
const openaiModels = registry.getModelsForAuthType(AuthType.USE_OPENAI);
expect(openaiModels.length).toBe(1);
expect(openaiModels[0].id).toBe('gpt-4-turbo');
});
it('should ignore qwen-oauth models in config (hard-coded)', () => {
const modelProvidersConfig: ModelProvidersConfig = {
'qwen-oauth': [
{
id: 'custom-qwen',
name: 'Custom Qwen',
},
],
};
const registry = new ModelRegistry(modelProvidersConfig);
// Should still use hard-coded qwen-oauth models
const qwenModels = registry.getModelsForAuthType(AuthType.QWEN_OAUTH);
expect(qwenModels.length).toBe(QWEN_OAUTH_MODELS.length);
expect(qwenModels.find((m) => m.id === 'custom-qwen')).toBeUndefined();
});
});
describe('getModelsForAuthType', () => {
let registry: ModelRegistry;
beforeEach(() => {
const modelProvidersConfig: ModelProvidersConfig = {
openai: [
{
id: 'gpt-4-turbo',
name: 'GPT-4 Turbo',
description: 'Most capable GPT-4',
baseUrl: 'https://api.openai.com/v1',
capabilities: { vision: true },
},
{
id: 'gpt-3.5-turbo',
name: 'GPT-3.5 Turbo',
capabilities: { vision: false },
},
],
};
registry = new ModelRegistry(modelProvidersConfig);
});
it('should return models for existing authType', () => {
const models = registry.getModelsForAuthType(AuthType.USE_OPENAI);
expect(models.length).toBe(2);
});
it('should return empty array for non-existent authType', () => {
const models = registry.getModelsForAuthType(AuthType.USE_VERTEX_AI);
expect(models.length).toBe(0);
});
it('should return AvailableModel format with correct fields', () => {
const models = registry.getModelsForAuthType(AuthType.USE_OPENAI);
const gpt4 = models.find((m) => m.id === 'gpt-4-turbo');
expect(gpt4).toBeDefined();
expect(gpt4?.label).toBe('GPT-4 Turbo');
expect(gpt4?.description).toBe('Most capable GPT-4');
expect(gpt4?.isVision).toBe(true);
expect(gpt4?.authType).toBe(AuthType.USE_OPENAI);
});
});
describe('getModel', () => {
let registry: ModelRegistry;
beforeEach(() => {
const modelProvidersConfig: ModelProvidersConfig = {
openai: [
{
id: 'gpt-4-turbo',
name: 'GPT-4 Turbo',
baseUrl: 'https://api.openai.com/v1',
generationConfig: {
temperature: 0.8,
max_tokens: 4096,
},
},
],
};
registry = new ModelRegistry(modelProvidersConfig);
});
it('should return resolved model config', () => {
const model = registry.getModel(AuthType.USE_OPENAI, 'gpt-4-turbo');
expect(model).toBeDefined();
expect(model?.id).toBe('gpt-4-turbo');
expect(model?.name).toBe('GPT-4 Turbo');
expect(model?.authType).toBe(AuthType.USE_OPENAI);
expect(model?.baseUrl).toBe('https://api.openai.com/v1');
});
it('should merge generationConfig with defaults', () => {
const model = registry.getModel(AuthType.USE_OPENAI, 'gpt-4-turbo');
expect(model?.generationConfig.temperature).toBe(0.8);
expect(model?.generationConfig.max_tokens).toBe(4096);
// Default values should be applied
expect(model?.generationConfig.top_p).toBe(0.9);
expect(model?.generationConfig.timeout).toBe(60000);
});
it('should return undefined for non-existent model', () => {
const model = registry.getModel(AuthType.USE_OPENAI, 'non-existent');
expect(model).toBeUndefined();
});
it('should return undefined for non-existent authType', () => {
const model = registry.getModel(AuthType.USE_VERTEX_AI, 'some-model');
expect(model).toBeUndefined();
});
});
describe('hasModel', () => {
let registry: ModelRegistry;
beforeEach(() => {
registry = new ModelRegistry({
openai: [{ id: 'gpt-4', name: 'GPT-4' }],
});
});
it('should return true for existing model', () => {
expect(registry.hasModel(AuthType.USE_OPENAI, 'gpt-4')).toBe(true);
});
it('should return false for non-existent model', () => {
expect(registry.hasModel(AuthType.USE_OPENAI, 'non-existent')).toBe(
false,
);
});
it('should return false for non-existent authType', () => {
expect(registry.hasModel(AuthType.USE_VERTEX_AI, 'gpt-4')).toBe(false);
});
});
describe('getFirstModelForAuthType', () => {
it('should return first model for authType', () => {
const registry = new ModelRegistry({
openai: [
{ id: 'first', name: 'First' },
{ id: 'second', name: 'Second' },
],
});
const firstModel = registry.getFirstModelForAuthType(AuthType.USE_OPENAI);
expect(firstModel?.id).toBe('first');
});
it('should return undefined for empty authType', () => {
const registry = new ModelRegistry();
const firstModel = registry.getFirstModelForAuthType(AuthType.USE_OPENAI);
expect(firstModel).toBeUndefined();
});
});
describe('getDefaultModelForAuthType', () => {
it('should return coder-model for qwen-oauth', () => {
const registry = new ModelRegistry();
const defaultModel = registry.getDefaultModelForAuthType(
AuthType.QWEN_OAUTH,
);
expect(defaultModel?.id).toBe('coder-model');
});
it('should return first model for other authTypes', () => {
const registry = new ModelRegistry({
openai: [
{ id: 'gpt-4', name: 'GPT-4' },
{ id: 'gpt-3.5', name: 'GPT-3.5' },
],
});
const defaultModel = registry.getDefaultModelForAuthType(
AuthType.USE_OPENAI,
);
expect(defaultModel?.id).toBe('gpt-4');
});
});
describe('getAvailableAuthTypes', () => {
it('should return all configured authTypes', () => {
const registry = new ModelRegistry({
openai: [{ id: 'gpt-4', name: 'GPT-4' }],
});
const authTypes = registry.getAvailableAuthTypes();
expect(authTypes).toContain(AuthType.QWEN_OAUTH);
expect(authTypes).toContain(AuthType.USE_OPENAI);
});
});
describe('validation', () => {
it('should throw error for model without id', () => {
expect(
() =>
new ModelRegistry({
openai: [{ id: '', name: 'No ID' }],
}),
).toThrow('missing required field: id');
});
});
describe('default base URLs', () => {
it('should apply default dashscope URL for qwen-oauth', () => {
const registry = new ModelRegistry();
const model = registry.getModel(AuthType.QWEN_OAUTH, 'coder-model');
expect(model?.baseUrl).toBe(
'https://dashscope.aliyuncs.com/compatible-mode/v1',
);
});
it('should apply default openai URL when not specified', () => {
const registry = new ModelRegistry({
openai: [{ id: 'gpt-4', name: 'GPT-4' }],
});
const model = registry.getModel(AuthType.USE_OPENAI, 'gpt-4');
expect(model?.baseUrl).toBe('https://api.openai.com/v1');
});
it('should use custom baseUrl when specified', () => {
const registry = new ModelRegistry({
openai: [
{
id: 'deepseek',
name: 'DeepSeek',
baseUrl: 'https://api.deepseek.com/v1',
},
],
});
const model = registry.getModel(AuthType.USE_OPENAI, 'deepseek');
expect(model?.baseUrl).toBe('https://api.deepseek.com/v1');
});
});
describe('findAuthTypesForModel', () => {
it('should return empty array for non-existent model', () => {
const registry = new ModelRegistry();
const authTypes = registry.findAuthTypesForModel('non-existent');
expect(authTypes).toEqual([]);
});
it('should return authTypes that have the model', () => {
const registry = new ModelRegistry({
openai: [{ id: 'gpt-4', name: 'GPT-4' }],
});
const authTypes = registry.findAuthTypesForModel('gpt-4');
expect(authTypes).toContain(AuthType.USE_OPENAI);
expect(authTypes.length).toBe(1);
});
it('should return multiple authTypes if model exists in multiple', () => {
const registry = new ModelRegistry({
openai: [{ id: 'shared-model', name: 'Shared' }],
'gemini-api-key': [{ id: 'shared-model', name: 'Shared Gemini' }],
});
const authTypes = registry.findAuthTypesForModel('shared-model');
expect(authTypes.length).toBe(2);
expect(authTypes).toContain(AuthType.USE_OPENAI);
expect(authTypes).toContain(AuthType.USE_GEMINI);
});
it('should prioritize preferred authType in results', () => {
const registry = new ModelRegistry({
openai: [{ id: 'shared-model', name: 'Shared' }],
'gemini-api-key': [{ id: 'shared-model', name: 'Shared Gemini' }],
});
const authTypes = registry.findAuthTypesForModel(
'shared-model',
AuthType.USE_GEMINI,
);
expect(authTypes[0]).toBe(AuthType.USE_GEMINI);
});
it('should handle qwen-oauth models', () => {
const registry = new ModelRegistry();
const authTypes = registry.findAuthTypesForModel('coder-model');
expect(authTypes).toContain(AuthType.QWEN_OAUTH);
});
});
});

View File

@@ -0,0 +1,268 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { AuthType } from '../core/contentGenerator.js';
import {
type ModelConfig,
type ModelProvidersConfig,
type ResolvedModelConfig,
type AvailableModel,
type ModelGenerationConfig,
DEFAULT_GENERATION_CONFIG,
DEFAULT_BASE_URLS,
} from './types.js';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
/**
* Hard-coded Qwen OAuth models that are always available.
* These cannot be overridden by user configuration.
*/
export const QWEN_OAUTH_MODELS: ModelConfig[] = [
{
id: 'coder-model',
name: 'Qwen Coder',
description:
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
capabilities: { vision: false },
generationConfig: {
temperature: 0.7,
top_p: 0.9,
max_tokens: 8192,
timeout: 60000,
maxRetries: 3,
},
},
{
id: 'vision-model',
name: 'Qwen Vision',
description:
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)',
capabilities: { vision: true },
generationConfig: {
temperature: 0.7,
top_p: 0.9,
max_tokens: 8192,
timeout: 60000,
maxRetries: 3,
},
},
];
/**
* Central registry for managing model configurations.
* Models are organized by authType.
*/
export class ModelRegistry {
private modelsByAuthType: Map<AuthType, Map<string, ResolvedModelConfig>>;
// Reverse index for O(1) model lookups: modelId -> authTypes[]
private modelIdToAuthTypes: Map<string, AuthType[]>;
constructor(modelProvidersConfig?: ModelProvidersConfig) {
this.modelsByAuthType = new Map();
this.modelIdToAuthTypes = new Map();
// Always register qwen-oauth models (hard-coded, cannot be overridden)
this.registerAuthTypeModels(AuthType.QWEN_OAUTH, QWEN_OAUTH_MODELS);
// Register user-configured models for other authTypes
if (modelProvidersConfig) {
for (const [authType, models] of Object.entries(modelProvidersConfig)) {
// Skip qwen-oauth as it uses hard-coded models
if (authType === AuthType.QWEN_OAUTH) {
continue;
}
const authTypeEnum = authType as AuthType;
this.registerAuthTypeModels(authTypeEnum, models);
}
}
}
/**
* Register models for an authType
*/
private registerAuthTypeModels(
authType: AuthType,
models: ModelConfig[],
): void {
const modelMap = new Map<string, ResolvedModelConfig>();
for (const config of models) {
const resolved = this.resolveModelConfig(config, authType);
modelMap.set(config.id, resolved);
// Update reverse index
const existingAuthTypes = this.modelIdToAuthTypes.get(config.id) || [];
existingAuthTypes.push(authType);
this.modelIdToAuthTypes.set(config.id, existingAuthTypes);
}
this.modelsByAuthType.set(authType, modelMap);
}
/**
* Get all models for a specific authType.
* This is used by /model command to show only relevant models.
*/
getModelsForAuthType(authType: AuthType): AvailableModel[] {
const models = this.modelsByAuthType.get(authType);
if (!models) return [];
return Array.from(models.values()).map((model) => ({
id: model.id,
label: model.name,
description: model.description,
capabilities: model.capabilities,
authType: model.authType,
isVision: model.capabilities?.vision ?? false,
}));
}
/**
* Get all available authTypes that have models configured
*/
getAvailableAuthTypes(): AuthType[] {
return Array.from(this.modelsByAuthType.keys());
}
/**
* Get model configuration by authType and modelId
*/
getModel(
authType: AuthType,
modelId: string,
): ResolvedModelConfig | undefined {
const models = this.modelsByAuthType.get(authType);
return models?.get(modelId);
}
/**
* Check if model exists for given authType
*/
hasModel(authType: AuthType, modelId: string): boolean {
const models = this.modelsByAuthType.get(authType);
return models?.has(modelId) ?? false;
}
/**
* Get first model for an authType (used as default)
*/
getFirstModelForAuthType(
authType: AuthType,
): ResolvedModelConfig | undefined {
const models = this.modelsByAuthType.get(authType);
if (!models || models.size === 0) return undefined;
return Array.from(models.values())[0];
}
/**
* Get default model for an authType.
* For qwen-oauth, returns the coder model.
* For others, returns the first configured model.
*/
getDefaultModelForAuthType(
authType: AuthType,
): ResolvedModelConfig | undefined {
if (authType === AuthType.QWEN_OAUTH) {
return this.getModel(authType, DEFAULT_QWEN_MODEL);
}
return this.getFirstModelForAuthType(authType);
}
/**
* Resolve model config by applying defaults
*/
private resolveModelConfig(
config: ModelConfig,
authType: AuthType,
): ResolvedModelConfig {
this.validateModelConfig(config, authType);
const defaultBaseUrl = DEFAULT_BASE_URLS[authType] || '';
return {
...config,
authType,
name: config.name || config.id,
baseUrl: config.baseUrl || defaultBaseUrl,
generationConfig: this.mergeGenerationConfig(config.generationConfig),
capabilities: config.capabilities || {},
};
}
/**
* Merge generation config with defaults
*/
private mergeGenerationConfig(
config?: ModelGenerationConfig,
): ModelGenerationConfig {
if (!config) {
return { ...DEFAULT_GENERATION_CONFIG };
}
return {
...DEFAULT_GENERATION_CONFIG,
...config,
};
}
/**
* Validate model configuration
*/
private validateModelConfig(config: ModelConfig, authType: AuthType): void {
if (!config.id) {
throw new Error(
`Model config in authType '${authType}' missing required field: id`,
);
}
}
/**
* Check if the registry has any models for a given authType
*/
hasAuthType(authType: AuthType): boolean {
const models = this.modelsByAuthType.get(authType);
return models !== undefined && models.size > 0;
}
/**
* Get total number of models across all authTypes
*/
getTotalModelCount(): number {
let count = 0;
for (const models of this.modelsByAuthType.values()) {
count += models.size;
}
return count;
}
/**
* Find all authTypes that have a model with the given modelId.
* Uses reverse index for O(1) lookup.
* Returns empty array if model doesn't exist.
*
* @param modelId - The model ID to search for
* @param preferredAuthType - Optional authType to prioritize in results
* @returns Array of authTypes that have this model (preferred authType first if found)
*/
findAuthTypesForModel(
modelId: string,
preferredAuthType?: AuthType,
): AuthType[] {
const authTypes = this.modelIdToAuthTypes.get(modelId) || [];
// If no preferred authType or it's not in the list, return as-is
if (!preferredAuthType || !authTypes.includes(preferredAuthType)) {
return authTypes;
}
// Move preferred authType to front
return [
preferredAuthType,
...authTypes.filter((at) => at !== preferredAuthType),
];
}
}

View File

@@ -0,0 +1,235 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { ModelSelectionManager } from './modelSelectionManager.js';
import { AuthType } from '../core/contentGenerator.js';
import { SelectionSource } from './types.js';
import type { ModelProvidersConfig } from './types.js';
describe('ModelSelectionManager', () => {
let manager: ModelSelectionManager;
const defaultConfig: ModelProvidersConfig = {
openai: [
{
id: 'gpt-4-turbo',
name: 'GPT-4 Turbo',
baseUrl: 'https://api.openai.com/v1',
},
{
id: 'gpt-3.5-turbo',
name: 'GPT-3.5 Turbo',
baseUrl: 'https://api.openai.com/v1',
},
{
id: 'deepseek-coder',
name: 'DeepSeek Coder',
baseUrl: 'https://api.deepseek.com/v1',
},
],
};
describe('initialization', () => {
it('should initialize with default qwen-oauth authType and coder-model', () => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
});
expect(manager.getCurrentAuthType()).toBe(AuthType.QWEN_OAUTH);
expect(manager.getCurrentModelId()).toBe('coder-model');
expect(manager.getSelectionSource()).toBe(SelectionSource.DEFAULT);
});
it('should initialize with specified authType and model', () => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
initialAuthType: AuthType.USE_OPENAI,
initialModelId: 'gpt-4-turbo',
});
expect(manager.getCurrentAuthType()).toBe(AuthType.USE_OPENAI);
expect(manager.getCurrentModelId()).toBe('gpt-4-turbo');
expect(manager.getSelectionSource()).toBe(SelectionSource.SETTINGS);
});
it('should fallback to default model if specified model not found', () => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
initialAuthType: AuthType.USE_OPENAI,
initialModelId: 'non-existent',
});
expect(manager.getCurrentAuthType()).toBe(AuthType.USE_OPENAI);
// Should fallback to first model
expect(manager.getCurrentModelId()).toBe('gpt-4-turbo');
});
});
describe('switchModel', () => {
beforeEach(() => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
initialAuthType: AuthType.USE_OPENAI,
initialModelId: 'gpt-4-turbo',
});
});
it('should switch model within same authType', async () => {
await manager.switchModel('gpt-3.5-turbo', SelectionSource.USER_MANUAL);
expect(manager.getCurrentModelId()).toBe('gpt-3.5-turbo');
expect(manager.getCurrentAuthType()).toBe(AuthType.USE_OPENAI);
});
it('should update selection source on switch', async () => {
await manager.switchModel('gpt-3.5-turbo', SelectionSource.USER_MANUAL);
expect(manager.getSelectionSource()).toBe(SelectionSource.USER_MANUAL);
});
it('should call onModelChange callback', async () => {
const onModelChange = vi.fn();
manager.setOnModelChange(onModelChange);
await manager.switchModel('gpt-3.5-turbo', SelectionSource.USER_MANUAL);
expect(onModelChange).toHaveBeenCalledTimes(1);
expect(onModelChange).toHaveBeenCalledWith(
AuthType.USE_OPENAI,
expect.objectContaining({ id: 'gpt-3.5-turbo' }),
);
});
it('should throw error for non-existent model', async () => {
await expect(
manager.switchModel('non-existent', SelectionSource.USER_MANUAL),
).rejects.toThrow('not found for authType');
});
it('should allow any source to override previous selection', async () => {
// First set to USER_MANUAL
await manager.switchModel('gpt-3.5-turbo', SelectionSource.USER_MANUAL);
expect(manager.getCurrentModelId()).toBe('gpt-3.5-turbo');
// Should allow PROGRAMMATIC_OVERRIDE to override USER_MANUAL
await manager.switchModel(
'gpt-4-turbo',
SelectionSource.PROGRAMMATIC_OVERRIDE,
);
expect(manager.getCurrentModelId()).toBe('gpt-4-turbo');
// Should allow SETTINGS to override PROGRAMMATIC_OVERRIDE
await manager.switchModel('gpt-3.5-turbo', SelectionSource.SETTINGS);
expect(manager.getCurrentModelId()).toBe('gpt-3.5-turbo');
});
});
describe('getAvailableModels', () => {
it('should return models for current authType', () => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
initialAuthType: AuthType.USE_OPENAI,
});
const models = manager.getAvailableModels();
expect(models.length).toBe(3);
expect(models.map((m) => m.id)).toContain('gpt-4-turbo');
});
it('should return qwen-oauth models by default', () => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
});
const models = manager.getAvailableModels();
expect(models.some((m) => m.id === 'coder-model')).toBe(true);
expect(models.some((m) => m.id === 'vision-model')).toBe(true);
});
});
describe('getAvailableAuthTypes', () => {
it('should return all available authTypes', () => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
});
const authTypes = manager.getAvailableAuthTypes();
expect(authTypes).toContain(AuthType.QWEN_OAUTH);
expect(authTypes).toContain(AuthType.USE_OPENAI);
});
});
describe('getCurrentModel', () => {
beforeEach(() => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
initialAuthType: AuthType.USE_OPENAI,
initialModelId: 'gpt-4-turbo',
});
});
it('should return current model info', () => {
const modelInfo = manager.getCurrentModel();
expect(modelInfo.authType).toBe(AuthType.USE_OPENAI);
expect(modelInfo.modelId).toBe('gpt-4-turbo');
expect(modelInfo.model.id).toBe('gpt-4-turbo');
expect(modelInfo.selectionSource).toBe(SelectionSource.SETTINGS);
});
it('should throw error if no model selected', () => {
// Create manager with invalid initial state
const mgr = new ModelSelectionManager({
modelProvidersConfig: { openai: [] },
initialAuthType: AuthType.USE_OPENAI,
});
expect(() => mgr.getCurrentModel()).toThrow('No model selected');
});
});
describe('selection timestamp', () => {
it('should update timestamp on model switch', async () => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
initialAuthType: AuthType.USE_OPENAI,
initialModelId: 'gpt-4-turbo',
});
const initialTimestamp = manager.getSelectionTimestamp();
// Wait a small amount to ensure timestamp changes
await new Promise((resolve) => setTimeout(resolve, 10));
await manager.switchModel('gpt-3.5-turbo', SelectionSource.USER_MANUAL);
expect(manager.getSelectionTimestamp()).toBeGreaterThan(initialTimestamp);
});
});
describe('delegation methods', () => {
beforeEach(() => {
manager = new ModelSelectionManager({
modelProvidersConfig: defaultConfig,
});
});
it('should delegate hasModel to registry', () => {
expect(manager.hasModel(AuthType.QWEN_OAUTH, 'coder-model')).toBe(true);
expect(manager.hasModel(AuthType.QWEN_OAUTH, 'non-existent')).toBe(false);
});
it('should delegate getModel to registry', () => {
const model = manager.getModel(AuthType.QWEN_OAUTH, 'coder-model');
expect(model).toBeDefined();
expect(model?.id).toBe('coder-model');
const nonExistent = manager.getModel(AuthType.QWEN_OAUTH, 'non-existent');
expect(nonExistent).toBeUndefined();
});
});
});

View File

@@ -0,0 +1,251 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { AuthType } from '../core/contentGenerator.js';
import { ModelRegistry } from './modelRegistry.js';
import {
type ResolvedModelConfig,
type AvailableModel,
type ModelSwitchMetadata,
type CurrentModelInfo,
type ModelProvidersConfig,
SelectionSource,
} from './types.js';
/**
* Callback type for when the model changes.
* This is used to notify Config to update the ContentGenerator.
*/
export type ModelChangeCallback = (
authType: AuthType,
model: ResolvedModelConfig,
) => Promise<void>;
/**
* Options for initializing the ModelSelectionManager
*/
export interface ModelSelectionManagerOptions {
/** Initial authType from persisted settings */
initialAuthType?: AuthType;
/** Initial model ID from persisted settings */
initialModelId?: string;
/** Callback when model changes */
onModelChange?: ModelChangeCallback;
/** Model providers configuration for creating ModelRegistry */
modelProvidersConfig?: ModelProvidersConfig;
}
/**
* Manages model and auth selection with persistence.
* Two-level selection: authType → model
*/
export class ModelSelectionManager {
private modelRegistry: ModelRegistry;
// Current selection state
private currentAuthType: AuthType;
private currentModelId: string;
// Selection metadata for tracking and observability
private selectionSource: SelectionSource = SelectionSource.DEFAULT;
private selectionTimestamp: number = Date.now();
// Callback for model changes
private onModelChange?: ModelChangeCallback;
constructor(options: ModelSelectionManagerOptions = {}) {
// Create ModelRegistry internally - it's an implementation detail
this.modelRegistry = new ModelRegistry(options.modelProvidersConfig);
this.onModelChange = options.onModelChange;
// Initialize from options or use defaults
this.currentAuthType = options.initialAuthType || AuthType.QWEN_OAUTH;
this.currentModelId = options.initialModelId || '';
// Validate and initialize selection
this.initializeDefaultSelection(options);
}
/**
* Initialize default selection
*/
private initializeDefaultSelection(
_options: ModelSelectionManagerOptions,
): void {
// Check if persisted model selection is valid
if (
this.currentModelId &&
this.modelRegistry.hasModel(this.currentAuthType, this.currentModelId)
) {
this.selectionSource = SelectionSource.SETTINGS;
return;
}
// Check environment variables (backward compatibility)
const envModel = this.getModelFromEnvironment();
if (
envModel &&
this.modelRegistry.hasModel(this.currentAuthType, envModel)
) {
this.currentModelId = envModel;
this.selectionSource = SelectionSource.ENVIRONMENT;
return;
}
// Use registry default (first model for current authType)
const defaultModel = this.modelRegistry.getDefaultModelForAuthType(
this.currentAuthType,
);
if (defaultModel) {
this.currentModelId = defaultModel.id;
this.selectionSource = SelectionSource.DEFAULT;
}
}
/**
* Get model from environment variables (backward compatibility)
*/
private getModelFromEnvironment(): string | undefined {
// Support legacy OPENAI_MODEL env var for openai authType
if (this.currentAuthType === AuthType.USE_OPENAI) {
return process.env['OPENAI_MODEL'];
}
return undefined;
}
/**
* Switch model within current authType.
* This updates model name and generation config.
*/
async switchModel(
modelId: string,
source: SelectionSource,
_metadata?: ModelSwitchMetadata,
): Promise<void> {
// Validate model exists for current authType
const model = this.modelRegistry.getModel(this.currentAuthType, modelId);
if (!model) {
throw new Error(
`Model '${modelId}' not found for authType '${this.currentAuthType}'`,
);
}
// Store previous model for rollback if needed
const previousModelId = this.currentModelId;
try {
// Update selection state
this.currentModelId = modelId;
this.selectionSource = source;
this.selectionTimestamp = Date.now();
// Notify about the change
if (this.onModelChange) {
await this.onModelChange(this.currentAuthType, model);
}
} catch (error) {
// Rollback on error
this.currentModelId = previousModelId;
throw error;
}
}
/**
* Get available models for current authType.
* Used by /model command to show only relevant models.
*/
getAvailableModels(): AvailableModel[] {
return this.modelRegistry.getModelsForAuthType(this.currentAuthType);
}
/**
* Get available authTypes.
* Used by /auth command.
*/
getAvailableAuthTypes(): AuthType[] {
return this.modelRegistry.getAvailableAuthTypes();
}
/**
* Get current authType
*/
getCurrentAuthType(): AuthType {
return this.currentAuthType;
}
/**
* Get current model ID
*/
getCurrentModelId(): string {
return this.currentModelId;
}
/**
* Get current model information
*/
getCurrentModel(): CurrentModelInfo {
if (!this.currentModelId) {
throw new Error('No model selected');
}
const model = this.modelRegistry.getModel(
this.currentAuthType,
this.currentModelId,
);
if (!model) {
throw new Error(
`Current model '${this.currentModelId}' not found for authType '${this.currentAuthType}'`,
);
}
return {
authType: this.currentAuthType,
modelId: this.currentModelId,
model,
selectionSource: this.selectionSource,
};
}
/**
* Check if a model exists for the given authType.
* Delegates to ModelRegistry.
*/
hasModel(authType: AuthType, modelId: string): boolean {
return this.modelRegistry.hasModel(authType, modelId);
}
/**
* Get model configuration by authType and modelId.
* Delegates to ModelRegistry.
*/
getModel(
authType: AuthType,
modelId: string,
): ResolvedModelConfig | undefined {
return this.modelRegistry.getModel(authType, modelId);
}
/**
* Get the current selection source
*/
getSelectionSource(): SelectionSource {
return this.selectionSource;
}
/**
* Get the timestamp of when the current selection was made
*/
getSelectionTimestamp(): number {
return this.selectionTimestamp;
}
/**
* Update the onModelChange callback
*/
setOnModelChange(callback: ModelChangeCallback): void {
this.onModelChange = callback;
}
}

View File

@@ -0,0 +1,154 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { AuthType } from '../core/contentGenerator.js';
/**
* Model capabilities configuration
*/
export interface ModelCapabilities {
/** Supports image/vision inputs */
vision?: boolean;
}
/**
* Generation configuration for model sampling parameters
*/
export interface ModelGenerationConfig {
/** Temperature for sampling (0.0 - 2.0) */
temperature?: number;
/** Top-p for nucleus sampling (0.0 - 1.0) */
top_p?: number;
/** Top-k for sampling */
top_k?: number;
/** Maximum output tokens */
max_tokens?: number;
/** Presence penalty (-2.0 - 2.0) */
presence_penalty?: number;
/** Frequency penalty (-2.0 - 2.0) */
frequency_penalty?: number;
/** Repetition penalty (provider-specific) */
repetition_penalty?: number;
/** Request timeout in milliseconds */
timeout?: number;
/** Maximum retry attempts */
maxRetries?: number;
/** Disable cache control for DashScope providers */
disableCacheControl?: boolean;
}
/**
* Model configuration for a single model within an authType
*/
export interface ModelConfig {
/** Unique model ID within authType (e.g., "qwen-coder", "gpt-4-turbo") */
id: string;
/** Display name (defaults to id) */
name?: string;
/** Model description */
description?: string;
/** Environment variable name to read API key from (e.g., "OPENAI_API_KEY") */
envKey?: string;
/** API endpoint override */
baseUrl?: string;
/** Model capabilities */
capabilities?: ModelCapabilities;
/** Generation configuration (sampling parameters) */
generationConfig?: ModelGenerationConfig;
}
/**
* Model providers configuration grouped by authType
*/
export type ModelProvidersConfig = {
[authType: string]: ModelConfig[];
};
/**
* Resolved model config with all defaults applied
*/
export interface ResolvedModelConfig extends ModelConfig {
/** AuthType this model belongs to (always present from map key) */
authType: AuthType;
/** Display name (always present, defaults to id) */
name: string;
/** Environment variable name to read API key from (optional, provider-specific) */
envKey?: string;
/** API base URL (always present, has default per authType) */
baseUrl: string;
/** Generation config (always present, merged with defaults) */
generationConfig: ModelGenerationConfig;
/** Capabilities (always present, defaults to {}) */
capabilities: ModelCapabilities;
}
/**
* Model info for UI display
*/
export interface AvailableModel {
id: string;
label: string;
description?: string;
capabilities?: ModelCapabilities;
authType: AuthType;
isVision?: boolean;
}
/**
* Selection source for tracking and observability.
* This tracks how a model was selected but does not enforce any priority rules.
*/
export enum SelectionSource {
/** Default selection (first model in registry) */
DEFAULT = 'default',
/** From environment variables */
ENVIRONMENT = 'environment',
/** From settings.json */
SETTINGS = 'settings',
/** Programmatic override (e.g., VLM auto-switch, control requests) */
PROGRAMMATIC_OVERRIDE = 'programmatic_override',
/** User explicitly switched via /model command */
USER_MANUAL = 'user_manual',
}
/**
* Metadata for model switch operations
*/
export interface ModelSwitchMetadata {
/** Reason for the switch */
reason?: string;
/** Additional context */
context?: string;
}
/**
* Current model information
*/
export interface CurrentModelInfo {
authType: AuthType;
modelId: string;
model: ResolvedModelConfig;
selectionSource: SelectionSource;
}
/**
* Default generation configuration values
*/
export const DEFAULT_GENERATION_CONFIG: ModelGenerationConfig = {
temperature: 0.7,
top_p: 0.9,
max_tokens: 4096,
timeout: 60000,
maxRetries: 3,
};
/**
* Default base URLs per authType
*/
export const DEFAULT_BASE_URLS: Partial<Record<AuthType, string>> = {
'qwen-oauth': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
openai: 'https://api.openai.com/v1',
};

View File

@@ -13,8 +13,9 @@ npm install @qwen-code/sdk
## Requirements
- Node.js >= 20.0.0
- [Qwen Code](https://github.com/QwenLM/qwen-code) >= 0.4.0 (stable) installed and accessible in PATH
> From v0.1.1, the CLI is bundled with the SDK. So no standalone CLI installation is needed.
> **Note for nvm users**: If you use nvm to manage Node.js versions, the SDK may not be able to auto-detect the Qwen Code executable. You should explicitly set the `pathToQwenExecutable` option to the full path of the `qwen` binary.
## Quick Start
@@ -371,23 +372,6 @@ try {
}
```
## FAQ / Troubleshooting
### Version 0.1.0 Requirements
If you're using SDK version **0.1.0**, please note the following requirements:
#### Qwen Code Installation Required
Version 0.1.0 requires [Qwen Code](https://github.com/QwenLM/qwen-code) **>= 0.4.0** to be installed separately and accessible in your PATH.
```bash
# Install Qwen Code globally
npm install -g qwen-code@^0.4.0
```
**Note**: From version **0.1.1** onwards, the CLI is bundled with the SDK, so no separate Qwen Code installation is needed.
## License
Apache-2.0 - see [LICENSE](./LICENSE) for details.

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/sdk",
"version": "0.6.0",
"version": "0.5.1",
"description": "TypeScript SDK for programmatic access to qwen-code CLI",
"main": "./dist/index.cjs",
"module": "./dist/index.mjs",
@@ -45,8 +45,7 @@
"node": ">=18.0.0"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4",
"tiktoken": "^1.0.21"
"@modelcontextprotocol/sdk": "^1.0.4"
},
"devDependencies": {
"@types/node": "^20.14.0",

View File

@@ -91,35 +91,3 @@ if (existsSync(licenseSource)) {
console.warn('Could not copy LICENSE:', error.message);
}
}
console.log('Bundling CLI into SDK package...');
const repoRoot = join(rootDir, '..', '..');
const rootDistDir = join(repoRoot, 'dist');
if (!existsSync(rootDistDir) || !existsSync(join(rootDistDir, 'cli.js'))) {
console.log('Building CLI bundle...');
try {
execSync('npm run bundle', { stdio: 'inherit', cwd: repoRoot });
} catch (error) {
console.error('Failed to build CLI bundle:', error.message);
throw error;
}
}
const cliDistDir = join(rootDir, 'dist', 'cli');
mkdirSync(cliDistDir, { recursive: true });
console.log('Copying CLI bundle...');
cpSync(join(rootDistDir, 'cli.js'), join(cliDistDir, 'cli.js'));
const vendorSource = join(rootDistDir, 'vendor');
if (existsSync(vendorSource)) {
cpSync(vendorSource, join(cliDistDir, 'vendor'), { recursive: true });
}
const localesSource = join(rootDistDir, 'locales');
if (existsSync(localesSource)) {
cpSync(localesSource, join(cliDistDir, 'locales'), { recursive: true });
}
console.log('CLI bundle copied successfully to SDK package');

View File

@@ -2,16 +2,24 @@
* CLI path auto-detection and subprocess spawning utilities
*
* Supports multiple execution modes:
* 1. Bundled CLI: Node.js bundle included in the SDK package (default)
* 2. Node.js bundle: 'node /path/to/cli.js' (custom path)
* 1. Native binary: 'qwen' (production)
* 2. Node.js bundle: 'node /path/to/cli.js' (production validation)
* 3. Bun bundle: 'bun /path/to/cli.js' (alternative runtime)
* 4. TypeScript source: 'tsx /path/to/index.ts' (development)
*
* Auto-detection locations for native binary:
* 1. QWEN_CODE_CLI_PATH environment variable
* 2. ~/.volta/bin/qwen
* 3. ~/.npm-global/bin/qwen
* 4. /usr/local/bin/qwen
* 5. ~/.local/bin/qwen
* 6. ~/node_modules/.bin/qwen
* 7. ~/.yarn/bin/qwen
*/
import * as fs from 'node:fs';
import * as path from 'node:path';
import { execSync } from 'node:child_process';
import { fileURLToPath } from 'node:url';
/**
* Executable types supported by the SDK
@@ -32,38 +40,49 @@ export type SpawnInfo = {
originalInput: string;
};
function getBundledCliPath(): string | null {
try {
const currentFile =
typeof __filename !== 'undefined'
? __filename
: fileURLToPath(import.meta.url);
const currentDir = path.dirname(currentFile);
const bundledCliPath = path.join(currentDir, 'cli', 'cli.js');
if (fs.existsSync(bundledCliPath)) {
return bundledCliPath;
}
return null;
} catch {
return null;
}
}
export function findNativeCliPath(): string {
const bundledCli = getBundledCliPath();
if (bundledCli) {
return bundledCli;
const homeDir = process.env['HOME'] || process.env['USERPROFILE'] || '';
const candidates: Array<string | undefined> = [
// 1. Environment variable (highest priority)
process.env['QWEN_CODE_CLI_PATH'],
// 2. Volta bin
path.join(homeDir, '.volta', 'bin', 'qwen'),
// 3. Global npm installations
path.join(homeDir, '.npm-global', 'bin', 'qwen'),
// 4. Common Unix binary locations
'/usr/local/bin/qwen',
// 5. User local bin
path.join(homeDir, '.local', 'bin', 'qwen'),
// 6. Node modules bin in home directory
path.join(homeDir, 'node_modules', '.bin', 'qwen'),
// 7. Yarn global bin
path.join(homeDir, '.yarn', 'bin', 'qwen'),
];
// Find first existing candidate
for (const candidate of candidates) {
if (candidate && fs.existsSync(candidate)) {
return path.resolve(candidate);
}
}
// Not found - throw helpful error
throw new Error(
'Bundled qwen CLI not found. The CLI should be included in the SDK package.\n' +
'If you need to use a custom CLI, provide explicit executable:\n' +
' query({ pathToQwenExecutable: "/path/to/cli.js" })\n' +
'qwen CLI not found. Please:\n' +
' 1. Install qwen globally: npm install -g qwen\n' +
' 2. Or provide explicit executable: query({ pathToQwenExecutable: "/path/to/qwen" })\n' +
' 3. Or set environment variable: QWEN_CODE_CLI_PATH="/path/to/qwen"\n' +
'\n' +
'For development/testing, you can also use:\n' +
' • TypeScript source: query({ pathToQwenExecutable: "/path/to/index.ts" })\n' +
' • Node.js bundle: query({ pathToQwenExecutable: "/path/to/cli.js" })\n' +
' • Force specific runtime: query({ pathToQwenExecutable: "bun:/path/to/cli.js" })',
);
}

View File

@@ -38,8 +38,6 @@ describe('CLI Path Utilities', () => {
mockFs.statSync.mockReturnValue({
isFile: () => true,
} as ReturnType<typeof import('fs').statSync>);
// Default: return true for existsSync (can be overridden in specific tests)
mockFs.existsSync.mockReturnValue(true);
});
afterEach(() => {
@@ -52,26 +50,28 @@ describe('CLI Path Utilities', () => {
describe('parseExecutableSpec', () => {
describe('auto-detection (no spec provided)', () => {
it('should auto-detect bundled CLI when no spec provided', () => {
// Mock existsSync to return true for bundled CLI
mockFs.existsSync.mockImplementation((p) => {
const pathStr = p.toString();
return (
pathStr.includes('cli/cli.js') || pathStr.includes('cli\\cli.js')
);
});
it('should auto-detect native CLI when no spec provided', () => {
// Mock environment variable
const originalEnv = process.env['QWEN_CODE_CLI_PATH'];
process.env['QWEN_CODE_CLI_PATH'] = '/usr/local/bin/qwen';
mockFs.existsSync.mockReturnValue(true);
const result = parseExecutableSpec();
expect(result.executablePath).toContain('cli.js');
expect(result.isExplicitRuntime).toBe(false);
expect(result).toEqual({
executablePath: path.resolve('/usr/local/bin/qwen'),
isExplicitRuntime: false,
});
// Restore env
process.env['QWEN_CODE_CLI_PATH'] = originalEnv;
});
it('should throw when bundled CLI not found', () => {
it('should throw when auto-detection fails', () => {
mockFs.existsSync.mockReturnValue(false);
expect(() => parseExecutableSpec()).toThrow(
'Bundled qwen CLI not found',
'qwen CLI not found. Please:',
);
});
});
@@ -361,44 +361,65 @@ describe('CLI Path Utilities', () => {
});
describe('auto-detection fallback', () => {
it('should auto-detect bundled CLI when no spec provided', () => {
// Mock existsSync to return true for bundled CLI
mockFs.existsSync.mockImplementation((p) => {
const pathStr = p.toString();
return (
pathStr.includes('cli/cli.js') || pathStr.includes('cli\\cli.js')
);
});
it('should auto-detect when no spec provided', () => {
// Mock environment variable
const originalEnv = process.env['QWEN_CODE_CLI_PATH'];
process.env['QWEN_CODE_CLI_PATH'] = '/usr/local/bin/qwen';
const result = prepareSpawnInfo();
expect(result.command).toBe(process.execPath);
expect(result.args[0]).toContain('cli.js');
expect(result.type).toBe('node');
expect(result.originalInput).toBe('');
expect(result).toEqual({
command: path.resolve('/usr/local/bin/qwen'),
args: [],
type: 'native',
originalInput: '',
});
// Restore env
process.env['QWEN_CODE_CLI_PATH'] = originalEnv;
});
});
});
describe('findNativeCliPath', () => {
it('should find bundled CLI', () => {
// Mock existsSync to return true for bundled CLI
it('should find CLI from environment variable', () => {
const originalEnv = process.env['QWEN_CODE_CLI_PATH'];
process.env['QWEN_CODE_CLI_PATH'] = '/custom/path/to/qwen';
mockFs.existsSync.mockReturnValue(true);
const result = findNativeCliPath();
expect(result).toBe(path.resolve('/custom/path/to/qwen'));
process.env['QWEN_CODE_CLI_PATH'] = originalEnv;
});
it('should search common installation locations', () => {
const originalEnv = process.env['QWEN_CODE_CLI_PATH'];
delete process.env['QWEN_CODE_CLI_PATH'];
// Mock fs.existsSync to return true for volta bin
// Use path.join to match platform-specific path separators
const voltaBinPath = path.join('.volta', 'bin', 'qwen');
mockFs.existsSync.mockImplementation((p) => {
const pathStr = p.toString();
return (
pathStr.includes('cli/cli.js') || pathStr.includes('cli\\cli.js')
);
return p.toString().includes(voltaBinPath);
});
const result = findNativeCliPath();
expect(result).toContain('cli.js');
expect(result).toContain(voltaBinPath);
process.env['QWEN_CODE_CLI_PATH'] = originalEnv;
});
it('should throw descriptive error when bundled CLI not found', () => {
it('should throw descriptive error when CLI not found', () => {
const originalEnv = process.env['QWEN_CODE_CLI_PATH'];
delete process.env['QWEN_CODE_CLI_PATH'];
mockFs.existsSync.mockReturnValue(false);
expect(() => findNativeCliPath()).toThrow('Bundled qwen CLI not found');
expect(() => findNativeCliPath()).toThrow('qwen CLI not found. Please:');
process.env['QWEN_CODE_CLI_PATH'] = originalEnv;
});
});
@@ -613,10 +634,13 @@ describe('CLI Path Utilities', () => {
mockFs.existsSync.mockReturnValue(false);
expect(() => parseExecutableSpec('/missing/file')).toThrow(
'Executable file not found at',
'Set QWEN_CODE_CLI_PATH environment variable',
);
expect(() => parseExecutableSpec('/missing/file')).toThrow(
'Please check the file path and ensure the file exists',
'Install qwen globally: npm install -g qwen',
);
expect(() => parseExecutableSpec('/missing/file')).toThrow(
'Force specific runtime: bun:/path/to/cli.js or tsx:/path/to/index.ts',
);
});
});

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.6.0",
"version": "0.5.1",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.6.0",
"version": "0.5.1",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {
@@ -11,7 +11,7 @@
"directory": "packages/vscode-ide-companion"
},
"engines": {
"vscode": "^1.85.0"
"vscode": "^1.99.0"
},
"license": "LICENSE",
"preview": true,
@@ -137,7 +137,7 @@
"@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6",
"@types/semver": "^7.7.1",
"@types/vscode": "^1.85.0",
"@types/vscode": "^1.99.0",
"@typescript-eslint/eslint-plugin": "^8.31.1",
"@typescript-eslint/parser": "^8.31.1",
"@vscode/vsce": "^3.6.0",

View File

@@ -35,7 +35,7 @@ function npmBin() {
function run(cmd, args, opts = {}) {
const res = spawnSync(cmd, args, {
stdio: 'inherit',
shell: process.platform === 'win32',
shell: process.platform === 'win32' ? true : false,
...opts,
});
if (res.error) {

View File

@@ -27,14 +27,13 @@ vi.mock('node:fs/promises', () => ({
writeFile: vi.fn(() => Promise.resolve(undefined)),
unlink: vi.fn(() => Promise.resolve(undefined)),
chmod: vi.fn(() => Promise.resolve(undefined)),
mkdir: vi.fn(() => Promise.resolve(undefined)),
}));
vi.mock('node:os', async (importOriginal) => {
const actual = await importOriginal<typeof os>();
return {
...actual,
homedir: vi.fn(() => '/home/test'),
tmpdir: vi.fn(() => '/tmp'),
};
});
@@ -129,24 +128,30 @@ describe('IDEServer', () => {
);
const port = getPortFromMock(replaceMock);
const expectedLockFile = path.join(
'/home/test',
'.qwen',
'ide',
`${port}.lock`,
const expectedPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${port}.json`,
);
const expectedPpidPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${process.ppid}.json`,
);
const expectedContent = JSON.stringify({
port: parseInt(port, 10),
workspacePath: expectedWorkspacePaths,
ppid: process.ppid,
authToken: 'test-auth-token',
ideName: 'VS Code',
});
expect(fs.writeFile).toHaveBeenCalledWith(
expectedLockFile,
expectedPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedLockFile, 0o600);
expect(fs.writeFile).toHaveBeenCalledWith(
expectedPpidPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedPortFile, 0o600);
expect(fs.chmod).toHaveBeenCalledWith(expectedPpidPortFile, 0o600);
});
it('should set a single folder path', async () => {
@@ -161,24 +166,30 @@ describe('IDEServer', () => {
);
const port = getPortFromMock(replaceMock);
const expectedLockFile = path.join(
'/home/test',
'.qwen',
'ide',
`${port}.lock`,
const expectedPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${port}.json`,
);
const expectedPpidPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${process.ppid}.json`,
);
const expectedContent = JSON.stringify({
port: parseInt(port, 10),
workspacePath: '/foo/bar',
ppid: process.ppid,
authToken: 'test-auth-token',
ideName: 'VS Code',
});
expect(fs.writeFile).toHaveBeenCalledWith(
expectedLockFile,
expectedPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedLockFile, 0o600);
expect(fs.writeFile).toHaveBeenCalledWith(
expectedPpidPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedPortFile, 0o600);
expect(fs.chmod).toHaveBeenCalledWith(expectedPpidPortFile, 0o600);
});
it('should set an empty string if no folders are open', async () => {
@@ -193,24 +204,30 @@ describe('IDEServer', () => {
);
const port = getPortFromMock(replaceMock);
const expectedLockFile = path.join(
'/home/test',
'.qwen',
'ide',
`${port}.lock`,
const expectedPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${port}.json`,
);
const expectedPpidPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${process.ppid}.json`,
);
const expectedContent = JSON.stringify({
port: parseInt(port, 10),
workspacePath: '',
ppid: process.ppid,
authToken: 'test-auth-token',
ideName: 'VS Code',
});
expect(fs.writeFile).toHaveBeenCalledWith(
expectedLockFile,
expectedPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedLockFile, 0o600);
expect(fs.writeFile).toHaveBeenCalledWith(
expectedPpidPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedPortFile, 0o600);
expect(fs.chmod).toHaveBeenCalledWith(expectedPpidPortFile, 0o600);
});
it('should update the path when workspace folders change', async () => {
@@ -239,24 +256,30 @@ describe('IDEServer', () => {
);
const port = getPortFromMock(replaceMock);
const expectedLockFile = path.join(
'/home/test',
'.qwen',
'ide',
`${port}.lock`,
const expectedPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${port}.json`,
);
const expectedPpidPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${process.ppid}.json`,
);
const expectedContent = JSON.stringify({
port: parseInt(port, 10),
workspacePath: expectedWorkspacePaths,
ppid: process.ppid,
authToken: 'test-auth-token',
ideName: 'VS Code',
});
expect(fs.writeFile).toHaveBeenCalledWith(
expectedLockFile,
expectedPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedLockFile, 0o600);
expect(fs.writeFile).toHaveBeenCalledWith(
expectedPpidPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedPortFile, 0o600);
expect(fs.chmod).toHaveBeenCalledWith(expectedPpidPortFile, 0o600);
// Simulate removing a folder
vscodeMock.workspace.workspaceFolders = [{ uri: { fsPath: '/baz/qux' } }];
@@ -271,26 +294,36 @@ describe('IDEServer', () => {
workspacePath: '/baz/qux',
ppid: process.ppid,
authToken: 'test-auth-token',
ideName: 'VS Code',
});
expect(fs.writeFile).toHaveBeenCalledWith(
expectedLockFile,
expectedPortFile,
expectedContent2,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedLockFile, 0o600);
expect(fs.writeFile).toHaveBeenCalledWith(
expectedPpidPortFile,
expectedContent2,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedPortFile, 0o600);
expect(fs.chmod).toHaveBeenCalledWith(expectedPpidPortFile, 0o600);
});
it('should clear env vars and delete lock file on stop', async () => {
it('should clear env vars and delete port file on stop', async () => {
await ideServer.start(mockContext);
const replaceMock = mockContext.environmentVariableCollection.replace;
const port = getPortFromMock(replaceMock);
const lockFile = path.join('/home/test', '.qwen', 'ide', `${port}.lock`);
expect(fs.writeFile).toHaveBeenCalledWith(lockFile, expect.any(String));
const portFile = path.join('/tmp', `qwen-code-ide-server-${port}.json`);
const ppidPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${process.ppid}.json`,
);
expect(fs.writeFile).toHaveBeenCalledWith(portFile, expect.any(String));
expect(fs.writeFile).toHaveBeenCalledWith(ppidPortFile, expect.any(String));
await ideServer.stop();
expect(mockContext.environmentVariableCollection.clear).toHaveBeenCalled();
expect(fs.unlink).toHaveBeenCalledWith(lockFile);
expect(fs.unlink).toHaveBeenCalledWith(portFile);
expect(fs.unlink).toHaveBeenCalledWith(ppidPortFile);
});
it.skipIf(process.platform !== 'win32')(
@@ -311,24 +344,30 @@ describe('IDEServer', () => {
);
const port = getPortFromMock(replaceMock);
const expectedLockFile = path.join(
'/home/test',
'.qwen',
'ide',
`${port}.lock`,
const expectedPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${port}.json`,
);
const expectedPpidPortFile = path.join(
'/tmp',
`qwen-code-ide-server-${process.ppid}.json`,
);
const expectedContent = JSON.stringify({
port: parseInt(port, 10),
workspacePath: expectedWorkspacePaths,
ppid: process.ppid,
authToken: 'test-auth-token',
ideName: 'VS Code',
});
expect(fs.writeFile).toHaveBeenCalledWith(
expectedLockFile,
expectedPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedLockFile, 0o600);
expect(fs.writeFile).toHaveBeenCalledWith(
expectedPpidPortFile,
expectedContent,
);
expect(fs.chmod).toHaveBeenCalledWith(expectedPortFile, 0o600);
expect(fs.chmod).toHaveBeenCalledWith(expectedPpidPortFile, 0o600);
},
);
@@ -340,7 +379,7 @@ describe('IDEServer', () => {
port = (ideServer as unknown as { port: number }).port;
});
it('should reject request without auth token', async () => {
it('should allow request without auth token for backwards compatibility', async () => {
const response = await fetch(`http://localhost:${port}/mcp`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
@@ -351,9 +390,7 @@ describe('IDEServer', () => {
id: 1,
}),
});
expect(response.status).toBe(401);
const body = await response.text();
expect(body).toBe('Unauthorized');
expect(response.status).not.toBe(401);
});
it('should allow request with valid auth token', async () => {
@@ -513,7 +550,6 @@ describe('IDEServer HTTP endpoints', () => {
headers: {
Host: `localhost:${port}`,
'Content-Type': 'application/json',
Authorization: 'Bearer test-auth-token',
},
},
JSON.stringify({ jsonrpc: '2.0', method: 'initialize' }),

View File

@@ -10,7 +10,6 @@ import {
IdeContextNotificationSchema,
OpenDiffRequestSchema,
} from '@qwen-code/qwen-code-core/src/ide/types.js';
import { detectIdeFromEnv } from '@qwen-code/qwen-code-core/src/ide/detect-ide.js';
import { isInitializeRequest } from '@modelcontextprotocol/sdk/types.js';
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
@@ -39,24 +38,12 @@ class CORSError extends Error {
const MCP_SESSION_ID_HEADER = 'mcp-session-id';
const IDE_SERVER_PORT_ENV_VAR = 'QWEN_CODE_IDE_SERVER_PORT';
const IDE_WORKSPACE_PATH_ENV_VAR = 'QWEN_CODE_IDE_WORKSPACE_PATH';
const QWEN_DIR = '.qwen';
const IDE_DIR = 'ide';
async function getGlobalIdeDir(): Promise<string> {
const homeDir = os.homedir();
// Prefer home dir, but fall back to tmpdir if unavailable (matches core Storage behavior).
const baseDir = homeDir
? path.join(homeDir, QWEN_DIR)
: path.join(os.tmpdir(), QWEN_DIR);
const ideDir = path.join(baseDir, IDE_DIR);
await fs.mkdir(ideDir, { recursive: true });
return ideDir;
}
interface WritePortAndWorkspaceArgs {
context: vscode.ExtensionContext;
port: number;
lockFile: string;
portFile: string;
ppidPortFile: string;
authToken: string;
log: (message: string) => void;
}
@@ -64,7 +51,8 @@ interface WritePortAndWorkspaceArgs {
async function writePortAndWorkspace({
context,
port,
lockFile,
portFile,
ppidPortFile,
authToken,
log,
}: WritePortAndWorkspaceArgs): Promise<void> {
@@ -83,24 +71,26 @@ async function writePortAndWorkspace({
workspacePath,
);
const ideInfo = detectIdeFromEnv();
const content = JSON.stringify({
port,
workspacePath,
ppid: process.ppid,
authToken,
ideName: ideInfo.displayName,
});
log(`Writing IDE lock file to: ${lockFile}`);
log(`Writing port file to: ${portFile}`);
log(`Writing ppid port file to: ${ppidPortFile}`);
try {
await fs.mkdir(path.dirname(lockFile), { recursive: true });
await fs.writeFile(lockFile, content);
await fs.chmod(lockFile, 0o600);
await Promise.all([
fs.writeFile(portFile, content).then(() => fs.chmod(portFile, 0o600)),
fs
.writeFile(ppidPortFile, content)
.then(() => fs.chmod(ppidPortFile, 0o600)),
]);
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
log(`Failed to write IDE lock file: ${message}`);
log(`Failed to write port to file: ${message}`);
}
}
@@ -131,7 +121,8 @@ export class IDEServer {
private server: HTTPServer | undefined;
private context: vscode.ExtensionContext | undefined;
private log: (message: string) => void;
private lockFile: string | undefined;
private portFile: string | undefined;
private ppidPortFile: string | undefined;
private port: number | undefined;
private authToken: string | undefined;
private transports: { [sessionId: string]: StreamableHTTPServerTransport } =
@@ -183,24 +174,19 @@ export class IDEServer {
app.use((req, res, next) => {
const authHeader = req.headers.authorization;
if (!authHeader) {
this.log('Missing Authorization header. Rejecting request.');
res.status(401).send('Unauthorized');
return;
}
const parts = authHeader.split(' ');
if (parts.length !== 2 || parts[0] !== 'Bearer') {
this.log('Malformed Authorization header. Rejecting request.');
res.status(401).send('Unauthorized');
return;
}
const token = parts[1];
if (token !== this.authToken) {
this.log('Invalid auth token provided. Rejecting request.');
res.status(401).send('Unauthorized');
return;
if (authHeader) {
const parts = authHeader.split(' ');
if (parts.length !== 2 || parts[0] !== 'Bearer') {
this.log('Malformed Authorization header. Rejecting request.');
res.status(401).send('Unauthorized');
return;
}
const token = parts[1];
if (token !== this.authToken) {
this.log('Invalid auth token provided. Rejecting request.');
res.status(401).send('Unauthorized');
return;
}
}
next();
});
@@ -341,21 +327,22 @@ export class IDEServer {
const address = (this.server as HTTPServer).address();
if (address && typeof address !== 'string') {
this.port = address.port;
try {
const ideDir = await getGlobalIdeDir();
// Name the lock file by port to support multiple server instances.
this.lockFile = path.join(ideDir, `${this.port}.lock`);
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
this.log(`Failed to determine IDE lock directory: ${message}`);
}
this.portFile = path.join(
os.tmpdir(),
`qwen-code-ide-server-${this.port}.json`,
);
this.ppidPortFile = path.join(
os.tmpdir(),
`qwen-code-ide-server-${process.ppid}.json`,
);
this.log(`IDE server listening on http://127.0.0.1:${this.port}`);
if (this.authToken && this.lockFile) {
if (this.authToken) {
await writePortAndWorkspace({
context,
port: this.port,
lockFile: this.lockFile,
portFile: this.portFile,
ppidPortFile: this.ppidPortFile,
authToken: this.authToken,
log: this.log,
});
@@ -384,13 +371,15 @@ export class IDEServer {
this.context &&
this.server &&
this.port &&
this.lockFile &&
this.portFile &&
this.ppidPortFile &&
this.authToken
) {
await writePortAndWorkspace({
context: this.context,
port: this.port,
lockFile: this.lockFile,
portFile: this.portFile,
ppidPortFile: this.ppidPortFile,
authToken: this.authToken,
log: this.log,
});
@@ -416,9 +405,16 @@ export class IDEServer {
if (this.context) {
this.context.environmentVariableCollection.clear();
}
if (this.lockFile) {
if (this.portFile) {
try {
await fs.unlink(this.lockFile);
await fs.unlink(this.portFile);
} catch (_err) {
// Ignore errors if the file doesn't exist.
}
}
if (this.ppidPortFile) {
try {
await fs.unlink(this.ppidPortFile);
} catch (_err) {
// Ignore errors if the file doesn't exist.
}

View File

@@ -54,31 +54,27 @@ export class AcpSessionManager {
};
return new Promise((resolve, reject) => {
// No timeout for session_prompt as LLM tasks can take 5-10 minutes or longer
// The request should always terminate with a stop_reason
let timeoutId: NodeJS.Timeout | undefined;
let timeoutDuration: number | undefined;
if (method !== AGENT_METHODS.session_prompt) {
// Set timeout for other methods
timeoutDuration = method === AGENT_METHODS.initialize ? 120000 : 60000;
timeoutId = setTimeout(() => {
pendingRequests.delete(id);
reject(new Error(`Request ${method} timed out`));
}, timeoutDuration);
// different timeout durations based on methods
let timeoutDuration = 60000; // default 60 seconds
if (
method === AGENT_METHODS.session_prompt ||
method === AGENT_METHODS.initialize
) {
timeoutDuration = 120000; // 2min for session_prompt and initialize
}
const timeoutId = setTimeout(() => {
pendingRequests.delete(id);
reject(new Error(`Request ${method} timed out`));
}, timeoutDuration);
const pendingRequest: PendingRequest<T> = {
resolve: (value: T) => {
if (timeoutId) {
clearTimeout(timeoutId);
}
clearTimeout(timeoutId);
resolve(value);
},
reject: (error: Error) => {
if (timeoutId) {
clearTimeout(timeoutId);
}
clearTimeout(timeoutId);
reject(error);
},
timeoutId,

View File

@@ -144,7 +144,10 @@ export const InputForm: React.FC<InputFormProps> = ({
: '';
return (
<div className="p-1 px-4 pb-4 absolute bottom-0 left-0 right-0 bg-gradient-to-b from-transparent to-[var(--app-primary-background)]">
<div
className="p-1 px-4 pb-4 absolute bottom-0 left-0 right-0"
style={{ backgroundColor: 'var(--app-primary-background)' }}
>
<div className="block">
<form className="composer-form" onSubmit={onSubmit}>
{/* Inner background layer */}

View File

@@ -152,24 +152,6 @@ export class SessionMessageHandler extends BaseMessageHandler {
this.currentStreamContent = '';
}
/**
* Notify the webview that streaming has finished.
*/
private sendStreamEnd(reason?: string): void {
const data: { timestamp: number; reason?: string } = {
timestamp: Date.now(),
};
if (reason) {
data.reason = reason;
}
this.sendToWebView({
type: 'streamEnd',
data,
});
}
/**
* Prompt user to login and invoke the registered login handler/command.
* Returns true if a login was initiated.
@@ -391,7 +373,10 @@ export class SessionMessageHandler extends BaseMessageHandler {
);
}
this.sendStreamEnd();
this.sendToWebView({
type: 'streamEnd',
data: { timestamp: Date.now() },
});
} catch (error) {
console.error('[SessionMessageHandler] Error sending message:', error);
@@ -413,7 +398,10 @@ export class SessionMessageHandler extends BaseMessageHandler {
if (isAbortLike) {
// Do not show VS Code error popup for intentional cancellations.
// Ensure the webview knows the stream ended due to user action.
this.sendStreamEnd('user_cancelled');
this.sendToWebView({
type: 'streamEnd',
data: { timestamp: Date.now(), reason: 'user_cancelled' },
});
return;
}
// Check for session not found error and handle it appropriately
@@ -435,39 +423,12 @@ export class SessionMessageHandler extends BaseMessageHandler {
type: 'sessionExpired',
data: { message: 'Session expired. Please login again.' },
});
this.sendStreamEnd('session_expired');
} else {
const isTimeoutError =
lower.includes('timeout') || lower.includes('timed out');
if (isTimeoutError) {
// Note: session_prompt no longer has a timeout, so this should rarely occur
// This path may still be hit for other methods (initialize, etc.) or network-level timeouts
console.warn(
'[SessionMessageHandler] Request timed out; suppressing popup',
);
const timeoutMessage: ChatMessage = {
role: 'assistant',
content:
'Request timed out. This may be due to a network issue. Please try again.',
timestamp: Date.now(),
};
// Send a timeout message to the WebView
this.sendToWebView({
type: 'message',
data: timeoutMessage,
});
this.sendStreamEnd('timeout');
} else {
// Handling of Non-Timeout Errors
vscode.window.showErrorMessage(`Error sending message: ${error}`);
this.sendToWebView({
type: 'error',
data: { message: errorMsg },
});
this.sendStreamEnd('error');
}
vscode.window.showErrorMessage(`Error sending message: ${error}`);
this.sendToWebView({
type: 'error',
data: { message: errorMsg },
});
}
}
}

View File

@@ -15,14 +15,6 @@ import type { ToolCallUpdate } from '../../types/chatTypes.js';
import type { ApprovalModeValue } from '../../types/approvalModeValueTypes.js';
import type { PlanEntry } from '../../types/chatTypes.js';
const FORCE_CLEAR_STREAM_END_REASONS = new Set([
'user_cancelled',
'cancelled',
'timeout',
'error',
'session_expired',
]);
interface UseWebViewMessagesProps {
// Session management
sessionManagement: {
@@ -372,12 +364,12 @@ export const useWebViewMessages = ({
).toLowerCase();
/**
* Handle different types of stream end reasons that require a full reset:
* - 'user_cancelled' / 'cancelled': user explicitly cancelled
* - 'timeout' / 'error' / 'session_expired': request failed unexpectedly
* For these cases, immediately clear all active states.
* Handle different types of stream end reasons:
* - 'user_cancelled': User explicitly cancelled operation
* - 'cancelled': General cancellation
* For these cases, immediately clear all active states
*/
if (FORCE_CLEAR_STREAM_END_REASONS.has(reason)) {
if (reason === 'user_cancelled' || reason === 'cancelled') {
// Clear active execution tool call tracking, reset state
activeExecToolCallsRef.current.clear();
// Clear waiting response state to ensure UI returns to normal
@@ -401,9 +393,6 @@ export const useWebViewMessages = ({
}
case 'error':
handlers.messageHandling.endStreaming();
handlers.messageHandling.clearThinking();
activeExecToolCallsRef.current.clear();
handlers.messageHandling.clearWaitingForResponse();
break;

View File

@@ -43,7 +43,7 @@
/* Composer: form wrapper */
.composer-form {
@apply relative flex flex-col max-w-[680px] mx-auto rounded-large border shadow-sm transition-colors duration-200 z-[1];
@apply relative flex flex-col max-w-[680px] mx-auto rounded-large border shadow-sm transition-colors duration-200;
background: var(--app-input-secondary-background);
border-color: var(--app-input-border);
color: var(--app-input-foreground);