Compare commits

..

1 Commits

Author SHA1 Message Date
github-actions[bot]
54cf454dc7 chore(release): v0.6.0-preview.3 2026-01-07 00:16:55 +00:00
150 changed files with 2381 additions and 9244 deletions

3
.github/CODEOWNERS vendored
View File

@@ -1,3 +0,0 @@
* @tanzhenxin @DennisYu07 @gwinthis @LaZzyMan @pomelo-nwu @Mingholy
# SDK TypeScript package changes require review from Mingholy
packages/sdk-typescript/** @Mingholy

View File

@@ -241,7 +241,7 @@ jobs:
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'pr'
env:
GITHUB_TOKEN: '${{ secrets.CI_BOT_PAT }}'
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
@@ -258,15 +258,26 @@ jobs:
echo "PR_URL=${pr_url}" >> "${GITHUB_OUTPUT}"
- name: 'Wait for CI checks to complete'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
run: |-
set -euo pipefail
echo "Waiting for CI checks to complete..."
gh pr checks "${PR_URL}" --watch --interval 30
- name: 'Enable auto-merge for release PR'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.CI_BOT_PAT }}'
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
run: |-
set -euo pipefail
gh pr merge "${PR_URL}" --merge --auto --delete-branch
gh pr merge "${PR_URL}" --merge --auto
- name: 'Create Issue on Failure'
if: |-

View File

@@ -25,7 +25,7 @@ Qwen Code is an open-source AI agent for the terminal, optimized for [Qwen3-Code
- **OpenAI-compatible, OAuth free tier**: use an OpenAI-compatible API, or sign in with Qwen OAuth to get 2,000 free requests/day.
- **Open-source, co-evolving**: both the framework and the Qwen3-Coder model are open-source—and they ship and evolve together.
- **Agentic workflow, feature-rich**: rich built-in tools (Skills, SubAgents, Plan Mode) for a full agentic workflow and a Claude Code-like experience.
- **Terminal-first, IDE-friendly**: built for developers who live in the command line, with optional integration for VS Code, Zed, and JetBrains IDEs.
- **Terminal-first, IDE-friendly**: built for developers who live in the command line, with optional integration for VS Code and Zed.
## Installation
@@ -137,11 +137,10 @@ Use `-p` to run Qwen Code without the interactive UI—ideal for scripts, automa
#### IDE integration
Use Qwen Code inside your editor (VS Code, Zed, and JetBrains IDEs):
Use Qwen Code inside your editor (VS Code and Zed):
- [Use in VS Code](https://qwenlm.github.io/qwen-code-docs/en/users/integration-vscode/)
- [Use in Zed](https://qwenlm.github.io/qwen-code-docs/en/users/integration-zed/)
- [Use in JetBrains IDEs](https://qwenlm.github.io/qwen-code-docs/en/users/integration-jetbrains/)
#### TypeScript SDK

View File

@@ -12,7 +12,6 @@ export default {
},
'integration-vscode': 'Visual Studio Code',
'integration-zed': 'Zed IDE',
'integration-jetbrains': 'JetBrains IDEs',
'integration-github-action': 'Github Actions',
'Code with Qwen Code': {
type: 'separator',

View File

@@ -136,95 +136,6 @@ Settings are organized into categories. All settings should be placed within the
- `"./custom-logs"` - Logs to `./custom-logs` relative to current directory
- `"/tmp/openai-logs"` - Logs to absolute path `/tmp/openai-logs`
#### modelProviders
Use `modelProviders` to declare curated model lists per auth type that the `/model` picker can switch between. Keys must be valid auth types (`openai`, `anthropic`, `gemini`, `vertex-ai`, etc.). Each entry requires an `id` and **must include `envKey`**, with optional `name`, `description`, `baseUrl`, and `generationConfig`. Credentials are never persisted in settings; the runtime reads them from `process.env[envKey]`. Qwen OAuth models remain hard-coded and cannot be overridden.
##### Example
```json
{
"modelProviders": {
"openai": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"envKey": "OPENAI_API_KEY",
"baseUrl": "https://api.openai.com/v1",
"generationConfig": {
"timeout": 60000,
"maxRetries": 3,
"samplingParams": { "temperature": 0.2 }
}
}
],
"anthropic": [
{
"id": "claude-3-5-sonnet",
"envKey": "ANTHROPIC_API_KEY",
"baseUrl": "https://api.anthropic.com/v1"
}
],
"gemini": [
{
"id": "gemini-2.0-flash",
"name": "Gemini 2.0 Flash",
"envKey": "GEMINI_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com"
}
],
"vertex-ai": [
{
"id": "gemini-1.5-pro-vertex",
"envKey": "GOOGLE_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com"
}
]
}
}
```
> [!note]
> Only the `/model` command exposes non-default auth types. Anthropic, Gemini, Vertex AI, etc., must be defined via `modelProviders`. The `/auth` command intentionally lists only the built-in Qwen OAuth and OpenAI flows.
##### Resolution layers and atomicity
The effective auth/model/credential values are chosen per field using the following precedence (first present wins). You can combine `--auth-type` with `--model` to point directly at a provider entry; these CLI flags run before other layers.
| Layer (highest → lowest) | authType | model | apiKey | baseUrl | apiKeyEnvKey | proxy |
| -------------------------- | ----------------------------------- | ----------------------------------------------- | --------------------------------------------------- | ---------------------------------------------------- | ---------------------- | --------------------------------- |
| Programmatic overrides | `/auth ` | `/auth` input | `/auth` input | `/auth` input | — | — |
| Model provider selection | — | `modelProvider.id` | `env[modelProvider.envKey]` | `modelProvider.baseUrl` | `modelProvider.envKey` | — |
| CLI arguments | `--auth-type` | `--model` | `--openaiApiKey` (or provider-specific equivalents) | `--openaiBaseUrl` (or provider-specific equivalents) | — | — |
| Environment variables | — | Provider-specific mapping (e.g. `OPENAI_MODEL`) | Provider-specific mapping (e.g. `OPENAI_API_KEY`) | Provider-specific mapping (e.g. `OPENAI_BASE_URL`) | — | — |
| Settings (`settings.json`) | `security.auth.selectedType` | `model.name` | `security.auth.apiKey` | `security.auth.baseUrl` | — | — |
| Default / computed | Falls back to `AuthType.QWEN_OAUTH` | Built-in default (OpenAI ⇒ `qwen3-coder-plus`) | — | — | — | `Config.getProxy()` if configured |
\*When present, CLI auth flags override settings. Otherwise, `security.auth.selectedType` or the implicit default determine the auth type. Qwen OAuth and OpenAI are the only auth types surfaced without extra configuration.
Model-provider sourced values are applied atomically: once a provider model is active, every field it defines is protected from lower layers until you manually clear credentials via `/auth`. The final `generationConfig` is the projection across all layers—lower layers only fill gaps left by higher ones, and the provider layer remains impenetrable.
The merge strategy for `modelProviders` is REPLACE: the entire `modelProviders` from project settings will override the corresponding section in user settings, rather than merging the two.
##### Generation config layering
Per-field precedence for `generationConfig`:
1. Programmatic overrides (e.g. runtime `/model`, `/auth` changes)
2. `modelProviders[authType][].generationConfig`
3. `settings.model.generationConfig`
4. Content-generator defaults (`getDefaultGenerationConfig` for OpenAI, `getParameterValue` for Gemini, etc.)
`samplingParams` is treated atomically; provider values replace the entire object. Defaults from the content generator apply last so each provider retains its tuned baseline.
##### Selection persistence and recommendations
> [!important]
> Define `modelProviders` in the user-scope `~/.qwen/settings.json` whenever possible and avoid persisting credential overrides in any scope. Keeping the provider catalog in user settings prevents merge/override conflicts between project and user scopes and ensures `/auth` and `/model` updates always write back to a consistent scope.
- `/model` and `/auth` persist `model.name` (where applicable) and `security.auth.selectedType` to the closest writable scope that already defines `modelProviders`; otherwise they fall back to the user scope. This keeps workspace/user files in sync with the active provider catalog.
- Without `modelProviders`, the resolver mixes CLI/env/settings layers, which is fine for single-provider setups but cumbersome when frequently switching. Define provider catalogs whenever multi-model workflows are common so that switches stay atomic, source-attributed, and debuggable.
#### context
| Setting | Type | Description | Default |

View File

@@ -59,7 +59,6 @@ Commands for managing AI tools and models.
| ---------------- | --------------------------------------------- | --------------------------------------------- |
| `/mcp` | List configured MCP servers and tools | `/mcp`, `/mcp desc` |
| `/tools` | Display currently available tool list | `/tools`, `/tools desc` |
| `/skills` | List and run available skills (experimental) | `/skills`, `/skills <name>` |
| `/approval-mode` | Change approval mode for tool usage | `/approval-mode <mode (auto-edit)> --project` |
| →`plan` | Analysis only, no execution | Secure review |
| →`default` | Require approval for edits | Daily use |

View File

@@ -49,8 +49,6 @@ Cross-platform sandboxing with complete process isolation.
By default, Qwen Code uses a published sandbox image (configured in the CLI package) and will pull it as needed.
The container sandbox mounts your workspace and your `~/.qwen` directory into the container so auth and settings persist between runs.
**Best for**: Strong isolation on any OS, consistent tooling inside a known image.
### Choosing a method
@@ -159,7 +157,7 @@ For a working allowlist-style proxy example, see: [Example Proxy Script](/develo
## Linux UID/GID handling
On Linux, Qwen Code defaults to enabling UID/GID mapping so the sandbox runs as your user (and reuses the mounted `~/.qwen`). Override with:
The sandbox automatically handles user permissions on Linux. Override these permissions with:
```bash
export SANDBOX_SET_UID_GID=true # Force host UID/GID

View File

@@ -27,14 +27,6 @@ Agent Skills package expertise into discoverable capabilities. Each Skill consis
Skills are **model-invoked** — the model autonomously decides when to use them based on your request and the Skills description. This is different from slash commands, which are **user-invoked** (you explicitly type `/command`).
If you want to invoke a Skill explicitly, use the `/skills` slash command:
```bash
/skills <skill-name>
```
The `/skills` command is only available when you run with `--experimental-skills`. Use autocomplete to browse available Skills and descriptions.
### Benefits
- Extend Qwen Code for your workflows

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

View File

@@ -1,57 +0,0 @@
# JetBrains IDEs
> JetBrains IDEs provide native support for AI coding assistants through the Agent Control Protocol (ACP). This integration allows you to use Qwen Code directly within your JetBrains IDE with real-time code suggestions.
### Features
- **Native agent experience**: Integrated AI assistant panel within your JetBrains IDE
- **Agent Control Protocol**: Full support for ACP enabling advanced IDE interactions
- **Symbol management**: #-mention files to add them to the conversation context
- **Conversation history**: Access to past conversations within the IDE
### Requirements
- JetBrains IDE with ACP support (IntelliJ IDEA, WebStorm, PyCharm, etc.)
- Qwen Code CLI installed
### Installation
1. Install Qwen Code CLI:
```bash
npm install -g @qwen-code/qwen-code
```
2. Open your JetBrains IDE and navigate to AI Chat tool window.
3. Click the 3-dot menu in the upper-right corner and select **Configure ACP Agent** and configure Qwen Code with the following settings:
```json
{
"agent_servers": {
"qwen": {
"command": "/path/to/qwen",
"args": ["--acp"],
"env": {}
}
}
}
```
4. The Qwen Code agent should now be available in the AI Assistant panel
![Qwen Code in JetBrains AI Chat](./images/jetbrains-acp.png)
## Troubleshooting
### Agent not appearing
- Run `qwen --version` in terminal to verify installation
- Ensure your JetBrains IDE version supports ACP
- Restart your JetBrains IDE
### Qwen Code not responding
- Check your internet connection
- Verify CLI works by running `qwen` in terminal
- [File an issue on GitHub](https://github.com/qwenlm/qwen-code/issues) if the problem persists

View File

@@ -18,7 +18,7 @@
### Requirements
- VS Code 1.85.0 or higher
- VS Code 1.98.0 or higher
### Installation
@@ -34,7 +34,7 @@
### Extension not installing
- Ensure you have VS Code 1.85.0 or higher
- Ensure you have VS Code 1.98.0 or higher
- Check that VS Code has permission to install extensions
- Try installing directly from the Marketplace website

View File

@@ -1,6 +1,5 @@
# Qwen Code overview
[![@qwen-code/qwen-code downloads](https://img.shields.io/npm/dw/@qwen-code/qwen-code.svg)](https://npm-compare.com/@qwen-code/qwen-code)
[![@qwen-code/qwen-code downloads](https://img.shields.io/npm/dw/@qwen-code/qwen-code.svg)](https://npm-compare.com/@qwen-code/qwen-code)
[![@qwen-code/qwen-code version](https://img.shields.io/npm/v/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
> Learn about Qwen Code, Qwen's agentic coding tool that lives in your terminal and helps you turn ideas into code faster than ever before.

View File

@@ -9,18 +9,11 @@ This guide provides solutions to common issues and debugging tips, including top
## Authentication or login errors
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY`, `UNABLE_TO_VERIFY_LEAF_SIGNATURE`, or `unable to get local issuer certificate`**
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY` or `unable to get local issuer certificate`**
- **Cause:** You may be on a corporate network with a firewall that intercepts and inspects SSL/TLS traffic. This often requires a custom root CA certificate to be trusted by Node.js.
- **Solution:** Set the `NODE_EXTRA_CA_CERTS` environment variable to the absolute path of your corporate root CA certificate file.
- Example: `export NODE_EXTRA_CA_CERTS=/path/to/your/corporate-ca.crt`
- **Error: `Device authorization flow failed: fetch failed`**
- **Cause:** Node.js could not reach Qwen OAuth endpoints (often a proxy or SSL/TLS trust issue). When available, Qwen Code will also print the underlying error cause (for example: `UNABLE_TO_VERIFY_LEAF_SIGNATURE`).
- **Solution:**
- Confirm you can access `https://chat.qwen.ai` from the same machine/network.
- If you are behind a proxy, set it via `qwen --proxy <url>` (or the `proxy` setting in `settings.json`).
- If your network uses a corporate TLS inspection CA, set `NODE_EXTRA_CA_CERTS` as described above.
- **Issue: Unable to display UI after authentication failure**
- **Cause:** If authentication fails after selecting an authentication type, the `security.auth.selectedType` setting may be persisted in `settings.json`. On restart, the CLI may get stuck trying to authenticate with the failed auth type and fail to display the UI.
- **Solution:** Clear the `security.auth.selectedType` configuration item in your `settings.json` file:

21
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"workspaces": [
"packages/*"
],
@@ -6216,7 +6216,10 @@
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz",
"integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"readdirp": "^4.0.1"
},
@@ -13879,7 +13882,10 @@
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz",
"integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"engines": {
"node": ">= 14.18.0"
},
@@ -17310,7 +17316,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"dependencies": {
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
@@ -17947,7 +17953,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"hasInstallScript": true,
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
@@ -17968,7 +17974,6 @@
"ajv-formats": "^3.0.0",
"async-mutex": "^0.5.0",
"chardet": "^2.1.0",
"chokidar": "^4.0.3",
"diff": "^7.0.0",
"dotenv": "^17.1.0",
"fast-levenshtein": "^2.0.6",
@@ -18588,7 +18593,7 @@
},
"packages/sdk-typescript": {
"name": "@qwen-code/sdk",
"version": "0.1.2",
"version": "0.1.0",
"license": "Apache-2.0",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",
@@ -21408,7 +21413,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -21420,7 +21425,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-preview.3"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -33,7 +33,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-preview.3"
},
"dependencies": {
"@google/genai": "1.30.0",

View File

@@ -1,112 +1,41 @@
/**
* @license
* Copyright 2025 Qwen Team
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { AuthType } from '@qwen-code/qwen-code-core';
import { vi } from 'vitest';
import { validateAuthMethod } from './auth.js';
import * as settings from './settings.js';
vi.mock('./settings.js', () => ({
loadEnvironment: vi.fn(),
loadSettings: vi.fn().mockReturnValue({
merged: {},
merged: vi.fn().mockReturnValue({}),
}),
}));
describe('validateAuthMethod', () => {
beforeEach(() => {
vi.resetModules();
// Reset mock to default
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {},
} as ReturnType<typeof settings.loadSettings>);
});
afterEach(() => {
vi.unstubAllEnvs();
delete process.env['OPENAI_API_KEY'];
delete process.env['CUSTOM_API_KEY'];
delete process.env['GEMINI_API_KEY'];
delete process.env['GEMINI_API_KEY_ALTERED'];
delete process.env['ANTHROPIC_API_KEY'];
delete process.env['ANTHROPIC_BASE_URL'];
delete process.env['GOOGLE_API_KEY'];
});
it('should return null for USE_OPENAI with default env key', () => {
it('should return null for USE_OPENAI', () => {
process.env['OPENAI_API_KEY'] = 'fake-key';
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBeNull();
});
it('should return an error message for USE_OPENAI if no API key is available', () => {
it('should return an error message for USE_OPENAI if OPENAI_API_KEY is not set', () => {
delete process.env['OPENAI_API_KEY'];
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBe(
"Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the 'OPENAI_API_KEY' environment variable.",
'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.',
);
});
it('should return null for USE_OPENAI with custom envKey from modelProviders', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'custom-model' },
modelProviders: {
openai: [{ id: 'custom-model', envKey: 'CUSTOM_API_KEY' }],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['CUSTOM_API_KEY'] = 'custom-key';
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBeNull();
});
it('should return error with custom envKey hint when modelProviders envKey is set but env var is missing', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'custom-model' },
modelProviders: {
openai: [{ id: 'custom-model', envKey: 'CUSTOM_API_KEY' }],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
const result = validateAuthMethod(AuthType.USE_OPENAI);
expect(result).toContain('CUSTOM_API_KEY');
});
it('should return null for USE_GEMINI with custom envKey', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'gemini-1.5-flash' },
modelProviders: {
gemini: [
{ id: 'gemini-1.5-flash', envKey: 'GEMINI_API_KEY_ALTERED' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['GEMINI_API_KEY_ALTERED'] = 'altered-key';
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBeNull();
});
it('should return error with custom envKey for USE_GEMINI when env var is missing', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'gemini-1.5-flash' },
modelProviders: {
gemini: [
{ id: 'gemini-1.5-flash', envKey: 'GEMINI_API_KEY_ALTERED' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
const result = validateAuthMethod(AuthType.USE_GEMINI);
expect(result).toContain('GEMINI_API_KEY_ALTERED');
});
it('should return null for QWEN_OAUTH', () => {
expect(validateAuthMethod(AuthType.QWEN_OAUTH)).toBeNull();
});
@@ -116,115 +45,4 @@ describe('validateAuthMethod', () => {
'Invalid auth method selected.',
);
});
it('should return null for USE_ANTHROPIC with custom envKey and baseUrl', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'claude-3' },
modelProviders: {
anthropic: [
{
id: 'claude-3',
envKey: 'CUSTOM_ANTHROPIC_KEY',
baseUrl: 'https://api.anthropic.com',
},
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['CUSTOM_ANTHROPIC_KEY'] = 'custom-anthropic-key';
expect(validateAuthMethod(AuthType.USE_ANTHROPIC)).toBeNull();
});
it('should return error for USE_ANTHROPIC when baseUrl is missing', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'claude-3' },
modelProviders: {
anthropic: [{ id: 'claude-3', envKey: 'CUSTOM_ANTHROPIC_KEY' }],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['CUSTOM_ANTHROPIC_KEY'] = 'custom-key';
const result = validateAuthMethod(AuthType.USE_ANTHROPIC);
expect(result).toContain('modelProviders[].baseUrl');
});
it('should return null for USE_VERTEX_AI with custom envKey', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'vertex-model' },
modelProviders: {
'vertex-ai': [
{ id: 'vertex-model', envKey: 'GOOGLE_API_KEY_VERTEX' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['GOOGLE_API_KEY_VERTEX'] = 'vertex-key';
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
});
it('should use config.modelsConfig.getModel() when Config is provided', () => {
// Settings has a different model
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'settings-model' },
modelProviders: {
openai: [
{ id: 'settings-model', envKey: 'SETTINGS_API_KEY' },
{ id: 'cli-model', envKey: 'CLI_API_KEY' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
// Mock Config object that returns a different model (e.g., from CLI args)
const mockConfig = {
modelsConfig: {
getModel: vi.fn().mockReturnValue('cli-model'),
},
} as unknown as import('@qwen-code/qwen-code-core').Config;
// Set the env key for the CLI model, not the settings model
process.env['CLI_API_KEY'] = 'cli-key';
// Should use 'cli-model' from config.modelsConfig.getModel(), not 'settings-model'
const result = validateAuthMethod(AuthType.USE_OPENAI, mockConfig);
expect(result).toBeNull();
expect(mockConfig.modelsConfig.getModel).toHaveBeenCalled();
});
it('should fail validation when Config provides different model without matching env key', () => {
// Clean up any existing env keys first
delete process.env['CLI_API_KEY'];
delete process.env['SETTINGS_API_KEY'];
delete process.env['OPENAI_API_KEY'];
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'settings-model' },
modelProviders: {
openai: [
{ id: 'settings-model', envKey: 'SETTINGS_API_KEY' },
{ id: 'cli-model', envKey: 'CLI_API_KEY' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
const mockConfig = {
modelsConfig: {
getModel: vi.fn().mockReturnValue('cli-model'),
},
} as unknown as import('@qwen-code/qwen-code-core').Config;
// Don't set CLI_API_KEY - validation should fail
const result = validateAuthMethod(AuthType.USE_OPENAI, mockConfig);
expect(result).not.toBeNull();
expect(result).toContain('CLI_API_KEY');
});
});

View File

@@ -1,169 +1,21 @@
/**
* @license
* Copyright 2025 Qwen Team
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
AuthType,
type Config,
type ModelProvidersConfig,
type ProviderModelConfig,
} from '@qwen-code/qwen-code-core';
import { loadEnvironment, loadSettings, type Settings } from './settings.js';
import { t } from '../i18n/index.js';
import { AuthType } from '@qwen-code/qwen-code-core';
import { loadEnvironment, loadSettings } from './settings.js';
/**
* Default environment variable names for each auth type
*/
const DEFAULT_ENV_KEYS: Record<string, string> = {
[AuthType.USE_OPENAI]: 'OPENAI_API_KEY',
[AuthType.USE_ANTHROPIC]: 'ANTHROPIC_API_KEY',
[AuthType.USE_GEMINI]: 'GEMINI_API_KEY',
[AuthType.USE_VERTEX_AI]: 'GOOGLE_API_KEY',
};
/**
* Find model configuration from modelProviders by authType and modelId
*/
function findModelConfig(
modelProviders: ModelProvidersConfig | undefined,
authType: string,
modelId: string | undefined,
): ProviderModelConfig | undefined {
if (!modelProviders || !modelId) {
return undefined;
}
const models = modelProviders[authType];
if (!Array.isArray(models)) {
return undefined;
}
return models.find((m) => m.id === modelId);
}
/**
* Check if API key is available for the given auth type and model configuration.
* Prioritizes custom envKey from modelProviders over default environment variables.
*/
function hasApiKeyForAuth(
authType: string,
settings: Settings,
config?: Config,
): {
hasKey: boolean;
checkedEnvKey: string | undefined;
isExplicitEnvKey: boolean;
} {
const modelProviders = settings.modelProviders as
| ModelProvidersConfig
| undefined;
// Use config.modelsConfig.getModel() if available for accurate model ID resolution
// that accounts for CLI args, env vars, and settings. Fall back to settings.model.name.
const modelId = config?.modelsConfig.getModel() ?? settings.model?.name;
// Try to find model-specific envKey from modelProviders
const modelConfig = findModelConfig(modelProviders, authType, modelId);
if (modelConfig?.envKey) {
// Explicit envKey configured - only check this env var, no apiKey fallback
const hasKey = !!process.env[modelConfig.envKey];
return {
hasKey,
checkedEnvKey: modelConfig.envKey,
isExplicitEnvKey: true,
};
}
// Using default environment variable - apiKey fallback is allowed
const defaultEnvKey = DEFAULT_ENV_KEYS[authType];
if (defaultEnvKey) {
const hasKey = !!process.env[defaultEnvKey];
if (hasKey) {
return { hasKey, checkedEnvKey: defaultEnvKey, isExplicitEnvKey: false };
}
}
// Also check settings.security.auth.apiKey as fallback (only for default env key)
if (settings.security?.auth?.apiKey) {
return {
hasKey: true,
checkedEnvKey: defaultEnvKey || undefined,
isExplicitEnvKey: false,
};
}
return {
hasKey: false,
checkedEnvKey: defaultEnvKey,
isExplicitEnvKey: false,
};
}
/**
* Generate API key error message based on auth check result.
* Returns null if API key is present, otherwise returns the appropriate error message.
*/
function getApiKeyError(
authMethod: string,
settings: Settings,
config?: Config,
): string | null {
const { hasKey, checkedEnvKey, isExplicitEnvKey } = hasApiKeyForAuth(
authMethod,
settings,
config,
);
if (hasKey) {
return null;
}
const envKeyHint = checkedEnvKey || DEFAULT_ENV_KEYS[authMethod];
if (isExplicitEnvKey) {
return t(
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.',
{ envKeyHint },
);
}
return t(
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.',
{ envKeyHint },
);
}
/**
* Validate that the required credentials and configuration exist for the given auth method.
*/
export function validateAuthMethod(
authMethod: string,
config?: Config,
): string | null {
export function validateAuthMethod(authMethod: string): string | null {
const settings = loadSettings();
loadEnvironment(settings.merged);
if (authMethod === AuthType.USE_OPENAI) {
const { hasKey, checkedEnvKey, isExplicitEnvKey } = hasApiKeyForAuth(
authMethod,
settings.merged,
config,
);
if (!hasKey) {
const envKeyHint = checkedEnvKey
? `'${checkedEnvKey}'`
: "'OPENAI_API_KEY'";
if (isExplicitEnvKey) {
// Explicit envKey configured - only suggest setting the env var
return t(
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.',
{ envKeyHint },
);
}
// Default env key - can use either apiKey or env var
return t(
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.',
{ envKeyHint },
);
const hasApiKey =
process.env['OPENAI_API_KEY'] || settings.merged.security?.auth?.apiKey;
if (!hasApiKey) {
return 'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
}
return null;
}
@@ -175,49 +27,36 @@ export function validateAuthMethod(
}
if (authMethod === AuthType.USE_ANTHROPIC) {
const apiKeyError = getApiKeyError(authMethod, settings.merged, config);
if (apiKeyError) {
return apiKeyError;
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
if (!hasApiKey) {
return 'ANTHROPIC_API_KEY environment variable not found.';
}
// Check baseUrl - can come from modelProviders or environment
const modelProviders = settings.merged.modelProviders as
| ModelProvidersConfig
| undefined;
// Use config.modelsConfig.getModel() if available for accurate model ID
const modelId =
config?.modelsConfig.getModel() ?? settings.merged.model?.name;
const modelConfig = findModelConfig(modelProviders, authMethod, modelId);
if (modelConfig && !modelConfig.baseUrl) {
return t(
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.',
);
}
if (!modelConfig && !process.env['ANTHROPIC_BASE_URL']) {
return t('ANTHROPIC_BASE_URL environment variable not found.');
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
if (!hasBaseUrl) {
return 'ANTHROPIC_BASE_URL environment variable not found.';
}
return null;
}
if (authMethod === AuthType.USE_GEMINI) {
const apiKeyError = getApiKeyError(authMethod, settings.merged, config);
if (apiKeyError) {
return apiKeyError;
const hasApiKey = process.env['GEMINI_API_KEY'];
if (!hasApiKey) {
return 'GEMINI_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
}
return null;
}
if (authMethod === AuthType.USE_VERTEX_AI) {
const apiKeyError = getApiKeyError(authMethod, settings.merged, config);
if (apiKeyError) {
return apiKeyError;
const hasApiKey = process.env['GOOGLE_API_KEY'];
if (!hasApiKey) {
return 'GOOGLE_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
}
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
return null;
}
return t('Invalid auth method selected.');
return 'Invalid auth method selected.';
}

View File

@@ -77,8 +77,10 @@ vi.mock('read-package-up', () => ({
),
}));
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const actualServer = await importOriginal<typeof ServerConfig>();
vi.mock('@qwen-code/qwen-code-core', async () => {
const actualServer = await vi.importActual<typeof ServerConfig>(
'@qwen-code/qwen-code-core',
);
return {
...actualServer,
IdeClient: {
@@ -1595,58 +1597,6 @@ describe('Approval mode tool exclusion logic', () => {
expect(excludedTools).toContain(WriteFileTool.Name);
});
it('should not exclude a tool explicitly allowed in tools.allowed', async () => {
process.argv = ['node', 'script.js', '-p', 'test'];
const argv = await parseArguments({} as Settings);
const settings: Settings = {
tools: {
allowed: [ShellTool.Name],
},
};
const extensions: Extension[] = [];
const config = await loadCliConfig(
settings,
extensions,
new ExtensionEnablementManager(
ExtensionStorage.getUserExtensionsDir(),
argv.extensions,
),
argv,
);
const excludedTools = config.getExcludeTools();
expect(excludedTools).not.toContain(ShellTool.Name);
expect(excludedTools).toContain(EditTool.Name);
expect(excludedTools).toContain(WriteFileTool.Name);
});
it('should not exclude a tool explicitly allowed in tools.core', async () => {
process.argv = ['node', 'script.js', '-p', 'test'];
const argv = await parseArguments({} as Settings);
const settings: Settings = {
tools: {
core: [ShellTool.Name],
},
};
const extensions: Extension[] = [];
const config = await loadCliConfig(
settings,
extensions,
new ExtensionEnablementManager(
ExtensionStorage.getUserExtensionsDir(),
argv.extensions,
),
argv,
);
const excludedTools = config.getExcludeTools();
expect(excludedTools).not.toContain(ShellTool.Name);
expect(excludedTools).toContain(EditTool.Name);
expect(excludedTools).toContain(WriteFileTool.Name);
});
it('should exclude only shell tools in non-interactive mode with auto-edit approval mode', async () => {
process.argv = [
'node',

View File

@@ -10,31 +10,25 @@ import {
Config,
DEFAULT_QWEN_EMBEDDING_MODEL,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
EditTool,
FileDiscoveryService,
getCurrentGeminiMdFilename,
loadServerHierarchicalMemory,
setGeminiMdFilename as setServerGeminiMdFilename,
ShellTool,
WriteFileTool,
resolveTelemetrySettings,
FatalConfigError,
Storage,
InputFormat,
OutputFormat,
isToolEnabled,
SessionService,
type ResumedSessionData,
type FileFilteringOptions,
type MCPServerConfig,
type ToolName,
EditTool,
ShellTool,
WriteFileTool,
} from '@qwen-code/qwen-code-core';
import { extensionsCommand } from '../commands/extensions.js';
import type { Settings } from './settings.js';
import {
resolveCliGenerationConfig,
getAuthTypeFromEnv,
} from '../utils/modelConfigUtils.js';
import yargs, { type Argv } from 'yargs';
import { hideBin } from 'yargs/helpers';
import * as fs from 'node:fs';
@@ -170,17 +164,7 @@ function normalizeOutputFormat(
}
export async function parseArguments(settings: Settings): Promise<CliArgs> {
let rawArgv = hideBin(process.argv);
// hack: if the first argument is the CLI entry point, remove it
if (
rawArgv.length > 0 &&
(rawArgv[0].endsWith('/dist/qwen-cli/cli.js') ||
rawArgv[0].endsWith('/dist/cli.js'))
) {
rawArgv = rawArgv.slice(1);
}
const rawArgv = hideBin(process.argv);
const yargsInstance = yargs(rawArgv)
.locale('en')
.scriptName('qwen')
@@ -852,28 +836,6 @@ export async function loadCliConfig(
// However, if stream-json input is used, control can be requested via JSON messages,
// so tools should not be excluded in that case.
const extraExcludes: string[] = [];
const resolvedCoreTools = argv.coreTools || settings.tools?.core || [];
const resolvedAllowedTools =
argv.allowedTools || settings.tools?.allowed || [];
const isExplicitlyEnabled = (toolName: ToolName): boolean => {
if (resolvedCoreTools.length > 0) {
if (isToolEnabled(toolName, resolvedCoreTools, [])) {
return true;
}
}
if (resolvedAllowedTools.length > 0) {
if (isToolEnabled(toolName, resolvedAllowedTools, [])) {
return true;
}
}
return false;
};
const excludeUnlessExplicit = (toolName: ToolName): void => {
if (!isExplicitlyEnabled(toolName)) {
extraExcludes.push(toolName);
}
};
if (
!interactive &&
!argv.experimentalAcp &&
@@ -882,15 +844,12 @@ export async function loadCliConfig(
switch (approvalMode) {
case ApprovalMode.PLAN:
case ApprovalMode.DEFAULT:
// In default non-interactive mode, all tools that require approval are excluded,
// unless explicitly enabled via coreTools/allowedTools.
excludeUnlessExplicit(ShellTool.Name as ToolName);
excludeUnlessExplicit(EditTool.Name as ToolName);
excludeUnlessExplicit(WriteFileTool.Name as ToolName);
// In default non-interactive mode, all tools that require approval are excluded.
extraExcludes.push(ShellTool.Name, EditTool.Name, WriteFileTool.Name);
break;
case ApprovalMode.AUTO_EDIT:
// In auto-edit non-interactive mode, only tools that still require a prompt are excluded.
excludeUnlessExplicit(ShellTool.Name as ToolName);
extraExcludes.push(ShellTool.Name);
break;
case ApprovalMode.YOLO:
// No extra excludes for YOLO mode.
@@ -938,25 +897,28 @@ export async function loadCliConfig(
const selectedAuthType =
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType ||
/* getAuthTypeFromEnv means no authType was explicitly provided, we infer the authType from env vars */
getAuthTypeFromEnv();
settings.security?.auth?.selectedType;
// Unified resolution of generation config with source attribution
const resolvedCliConfig = resolveCliGenerationConfig({
argv: {
model: argv.model,
openaiApiKey: argv.openaiApiKey,
openaiBaseUrl: argv.openaiBaseUrl,
openaiLogging: argv.openaiLogging,
openaiLoggingDir: argv.openaiLoggingDir,
},
settings,
selectedAuthType,
env: process.env as Record<string, string | undefined>,
});
const { model: resolvedModel } = resolvedCliConfig;
const apiKey =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey
: '') || '';
const baseUrl =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl
: '') || '';
const resolvedModel =
argv.model ||
(selectedAuthType === AuthType.USE_OPENAI
? process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name
: '') ||
'';
const sandboxConfig = await loadSandboxConfig(settings, argv);
const screenReader =
@@ -990,8 +952,6 @@ export async function loadCliConfig(
}
}
const modelProvidersConfig = settings.modelProviders;
return new Config({
sessionId,
sessionData,
@@ -1049,11 +1009,24 @@ export async function loadCliConfig(
inputFormat,
outputFormat,
includePartialMessages,
modelProvidersConfig,
generationConfigSources: resolvedCliConfig.sources,
generationConfig: resolvedCliConfig.generationConfig,
generationConfig: {
...(settings.model?.generationConfig || {}),
model: resolvedModel,
apiKey,
baseUrl,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging
: argv.openaiLogging) ?? false,
openAILoggingDir:
argv.openaiLoggingDir || settings.model?.openAILoggingDir,
},
cliVersion: await getCliVersion(),
webSearch: buildWebSearchConfig(argv, settings, selectedAuthType),
webSearch: buildWebSearchConfig(
argv,
settings,
settings.security?.auth?.selectedType,
),
summarizeToolOutput: settings.model?.summarizeToolOutput,
ideMode,
chatCompression: settings.model?.chatCompression,

View File

@@ -1,87 +0,0 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect, it } from 'vitest';
import { SettingScope } from './settings.js';
import { getPersistScopeForModelSelection } from './modelProvidersScope.js';
function makeSettings({
isTrusted,
userModelProviders,
workspaceModelProviders,
}: {
isTrusted: boolean;
userModelProviders?: unknown;
workspaceModelProviders?: unknown;
}) {
const userSettings: Record<string, unknown> = {};
const workspaceSettings: Record<string, unknown> = {};
// When undefined, treat as "not present in this scope" (the key is omitted),
// matching how LoadedSettings is shaped when a settings file doesn't define it.
if (userModelProviders !== undefined) {
userSettings['modelProviders'] = userModelProviders;
}
if (workspaceModelProviders !== undefined) {
workspaceSettings['modelProviders'] = workspaceModelProviders;
}
return {
isTrusted,
user: { settings: userSettings },
workspace: { settings: workspaceSettings },
} as unknown as import('./settings.js').LoadedSettings;
}
describe('getPersistScopeForModelSelection', () => {
it('prefers workspace when trusted and workspace defines modelProviders', () => {
const settings = makeSettings({
isTrusted: true,
workspaceModelProviders: {},
userModelProviders: { anything: true },
});
expect(getPersistScopeForModelSelection(settings)).toBe(
SettingScope.Workspace,
);
});
it('falls back to user when workspace does not define modelProviders', () => {
const settings = makeSettings({
isTrusted: true,
workspaceModelProviders: undefined,
userModelProviders: {},
});
expect(getPersistScopeForModelSelection(settings)).toBe(SettingScope.User);
});
it('ignores workspace modelProviders when workspace is untrusted', () => {
const settings = makeSettings({
isTrusted: false,
workspaceModelProviders: {},
userModelProviders: undefined,
});
expect(getPersistScopeForModelSelection(settings)).toBe(SettingScope.User);
});
it('falls back to legacy trust heuristic when neither scope defines modelProviders', () => {
const trusted = makeSettings({
isTrusted: true,
userModelProviders: undefined,
workspaceModelProviders: undefined,
});
expect(getPersistScopeForModelSelection(trusted)).toBe(SettingScope.User);
const untrusted = makeSettings({
isTrusted: false,
userModelProviders: undefined,
workspaceModelProviders: undefined,
});
expect(getPersistScopeForModelSelection(untrusted)).toBe(SettingScope.User);
});
});

View File

@@ -1,48 +0,0 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { SettingScope, type LoadedSettings } from './settings.js';
function hasOwnModelProviders(settingsObj: unknown): boolean {
if (!settingsObj || typeof settingsObj !== 'object') {
return false;
}
const obj = settingsObj as Record<string, unknown>;
// Treat an explicitly configured empty object (modelProviders: {}) as "owned"
// by this scope, which is important when mergeStrategy is REPLACE.
return Object.prototype.hasOwnProperty.call(obj, 'modelProviders');
}
/**
* Returns which writable scope (Workspace/User) owns the effective modelProviders
* configuration.
*
* Note: Workspace scope is only considered when the workspace is trusted.
*/
export function getModelProvidersOwnerScope(
settings: LoadedSettings,
): SettingScope | undefined {
if (settings.isTrusted && hasOwnModelProviders(settings.workspace.settings)) {
return SettingScope.Workspace;
}
if (hasOwnModelProviders(settings.user.settings)) {
return SettingScope.User;
}
return undefined;
}
/**
* Choose the settings scope to persist a model selection.
* Prefer persisting back to the scope that contains the effective modelProviders
* config, otherwise fall back to the legacy trust-based heuristic.
*/
export function getPersistScopeForModelSelection(
settings: LoadedSettings,
): SettingScope {
return getModelProvidersOwnerScope(settings) ?? SettingScope.User;
}

View File

@@ -55,7 +55,6 @@ import { disableExtension } from './extension.js';
// These imports will get the versions from the vi.mock('./settings.js', ...) factory.
import {
getSettingsWarnings,
loadSettings,
USER_SETTINGS_PATH, // This IS the mocked path.
getSystemSettingsPath,
@@ -419,86 +418,6 @@ describe('Settings Loading and Merging', () => {
});
});
it('should warn about ignored legacy keys in a v2 settings file', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
usageStatisticsEnabled: false,
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining(
"Legacy setting 'usageStatisticsEnabled' will be ignored",
),
]),
);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining("'privacy.usageStatisticsEnabled'"),
]),
);
});
it('should warn about unknown top-level keys in a v2 settings file', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
someUnknownKey: 'value',
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining(
"Unknown setting 'someUnknownKey' will be ignored",
),
]),
);
});
it('should not warn for valid v2 container keys', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
model: { name: 'qwen-coder' },
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual([]);
});
it('should rewrite allowedTools to tools.allowed during migration', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,

View File

@@ -344,97 +344,6 @@ const KNOWN_V2_CONTAINERS = new Set(
Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]),
);
function getSettingsFileKeyWarnings(
settings: Record<string, unknown>,
settingsFilePath: string,
): string[] {
const version = settings[SETTINGS_VERSION_KEY];
if (typeof version !== 'number' || version < SETTINGS_VERSION) {
return [];
}
const warnings: string[] = [];
const ignoredLegacyKeys = new Set<string>();
// Ignored legacy keys (V1 top-level keys that moved to a nested V2 path).
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
if (oldKey === newPath) {
continue;
}
if (!(oldKey in settings)) {
continue;
}
const oldValue = settings[oldKey];
// If this key is a V2 container (like 'model') and it's already an object,
// it's likely already in V2 format. Don't warn.
if (
KNOWN_V2_CONTAINERS.has(oldKey) &&
typeof oldValue === 'object' &&
oldValue !== null &&
!Array.isArray(oldValue)
) {
continue;
}
ignoredLegacyKeys.add(oldKey);
warnings.push(
`⚠️ Legacy setting '${oldKey}' will be ignored in ${settingsFilePath}. Please use '${newPath}' instead.`,
);
}
// Unknown top-level keys.
const schemaKeys = new Set(Object.keys(getSettingsSchema()));
for (const key of Object.keys(settings)) {
if (key === SETTINGS_VERSION_KEY) {
continue;
}
if (ignoredLegacyKeys.has(key)) {
continue;
}
if (schemaKeys.has(key)) {
continue;
}
warnings.push(
`⚠️ Unknown setting '${key}' will be ignored in ${settingsFilePath}.`,
);
}
return warnings;
}
/**
* Collects warnings for ignored legacy and unknown settings keys.
*
* For `$version: 2` settings files, we do not apply implicit migrations.
* Instead, we surface actionable, de-duplicated warnings in the terminal UI.
*/
export function getSettingsWarnings(loadedSettings: LoadedSettings): string[] {
const warningSet = new Set<string>();
for (const scope of [SettingScope.User, SettingScope.Workspace]) {
const settingsFile = loadedSettings.forScope(scope);
if (settingsFile.rawJson === undefined) {
continue; // File not present / not loaded.
}
const settingsObject = settingsFile.originalSettings as unknown as Record<
string,
unknown
>;
for (const warning of getSettingsFileKeyWarnings(
settingsObject,
settingsFile.path,
)) {
warningSet.add(warning);
}
}
return [...warningSet];
}
export function migrateSettingsToV1(
v2Settings: Record<string, unknown>,
): Record<string, unknown> {

View File

@@ -10,7 +10,6 @@ import type {
TelemetrySettings,
AuthType,
ChatCompressionSettings,
ModelProvidersConfig,
} from '@qwen-code/qwen-code-core';
import {
ApprovalMode,
@@ -103,19 +102,6 @@ const SETTINGS_SCHEMA = {
mergeStrategy: MergeStrategy.SHALLOW_MERGE,
},
// Model providers configuration grouped by authType
modelProviders: {
type: 'object',
label: 'Model Providers',
category: 'Model',
requiresRestart: false,
default: {} as ModelProvidersConfig,
description:
'Model providers configuration grouped by authType. Each authType contains an array of model configurations.',
showInDialog: false,
mergeStrategy: MergeStrategy.REPLACE,
},
general: {
type: 'object',
label: 'General',

View File

@@ -45,9 +45,7 @@ export async function initializeApp(
// Auto-detect and set LLM output language on first use
initializeLlmOutputLanguage();
// Use authType from modelsConfig which respects CLI --auth-type argument
// over settings.security.auth.selectedType
const authType = config.modelsConfig.getCurrentAuthType();
const authType = settings.merged.security?.auth?.selectedType;
const authError = await performInitialAuth(config, authType);
// Fallback to user select when initial authentication fails
@@ -61,7 +59,7 @@ export async function initializeApp(
const themeError = validateTheme(settings);
const shouldOpenAuthDialog =
!config.modelsConfig.wasAuthTypeExplicitlyProvided() || !!authError;
settings.merged.security?.auth?.selectedType === undefined || !!authError;
if (config.getIdeMode()) {
const ideClient = await IdeClient.getInstance();

View File

@@ -87,15 +87,6 @@ vi.mock('./config/sandboxConfig.js', () => ({
loadSandboxConfig: vi.fn(),
}));
vi.mock('./core/initializer.js', () => ({
initializeApp: vi.fn().mockResolvedValue({
authError: null,
themeError: null,
shouldOpenAuthDialog: false,
geminiMdFileCount: 0,
}),
}));
describe('gemini.tsx main function', () => {
let originalEnvGeminiSandbox: string | undefined;
let originalEnvSandbox: string | undefined;
@@ -371,6 +362,7 @@ describe('gemini.tsx main function', () => {
expect(inputArg).toBe('hello stream');
expect(validateAuthSpy).toHaveBeenCalledWith(
undefined,
undefined,
configStub,
expect.any(Object),

View File

@@ -4,7 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config } from '@qwen-code/qwen-code-core';
import type { Config, AuthType } from '@qwen-code/qwen-code-core';
import { InputFormat, logUserPrompt } from '@qwen-code/qwen-code-core';
import { render } from 'ink';
import dns from 'node:dns';
@@ -17,11 +17,7 @@ import * as cliConfig from './config/config.js';
import { loadCliConfig, parseArguments } from './config/config.js';
import { ExtensionStorage, loadExtensions } from './config/extension.js';
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
import {
getSettingsWarnings,
loadSettings,
migrateDeprecatedSettings,
} from './config/settings.js';
import { loadSettings, migrateDeprecatedSettings } from './config/settings.js';
import {
initializeApp,
type InitializationResult,
@@ -256,20 +252,22 @@ export async function main() {
argv,
);
if (!settings.merged.security?.auth?.useExternal) {
if (
settings.merged.security?.auth?.selectedType &&
!settings.merged.security?.auth?.useExternal
) {
// Validate authentication here because the sandbox will interfere with the Oauth2 web redirect.
try {
const authType = partialConfig.modelsConfig.getCurrentAuthType();
// Fresh users may not have selected/persisted an authType yet.
// In that case, defer auth prompting/selection to the main interactive flow.
if (authType) {
const err = validateAuthMethod(authType, partialConfig);
if (err) {
throw new Error(err);
}
await partialConfig.refreshAuth(authType);
const err = validateAuthMethod(
settings.merged.security.auth.selectedType,
);
if (err) {
throw new Error(err);
}
await partialConfig.refreshAuth(
settings.merged.security.auth.selectedType,
);
} catch (err) {
console.error('Error authenticating:', err);
process.exit(1);
@@ -346,7 +344,6 @@ export async function main() {
extensionEnablementManager,
argv,
);
registerCleanup(() => config.shutdown());
if (config.getListExtensions()) {
console.log('Installed extensions:');
@@ -405,15 +402,12 @@ export async function main() {
let input = config.getQuestion();
const startupWarnings = [
...new Set([
...(await getStartupWarnings()),
...(await getUserStartupWarnings({
workspaceRoot: process.cwd(),
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
})),
...getSettingsWarnings(settings),
]),
...(await getStartupWarnings()),
...(await getUserStartupWarnings({
workspaceRoot: process.cwd(),
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
})),
];
// Render UI, passing necessary config values. Check that there is no command line question.
@@ -446,6 +440,8 @@ export async function main() {
}
const nonInteractiveConfig = await validateNonInteractiveAuth(
(argv.authType as AuthType) ||
settings.merged.security?.auth?.selectedType,
settings.merged.security?.auth?.useExternal,
config,
settings,

View File

@@ -45,8 +45,7 @@ export default {
'Initializing...': 'Initialisierung...',
'Connecting to MCP servers... ({{connected}}/{{total}})':
'Verbindung zu MCP-Servern wird hergestellt... ({{connected}}/{{total}})',
'Type your message or @path/to/file':
'Nachricht eingeben oder @Pfad/zur/Datei',
'Type your message or @path/to/file': 'Nachricht eingeben oder @Pfad/zur/Datei',
"Press 'i' for INSERT mode and 'Esc' for NORMAL mode.":
"Drücken Sie 'i' für den EINFÜGE-Modus und 'Esc' für den NORMAL-Modus.",
'Cancel operation / Clear input (double press)':
@@ -90,8 +89,7 @@ export default {
'No tools available': 'Keine Werkzeuge verfügbar',
'View or change the approval mode for tool usage':
'Genehmigungsmodus für Werkzeugnutzung anzeigen oder ändern',
'View or change the language setting':
'Spracheinstellung anzeigen oder ändern',
'View or change the language setting': 'Spracheinstellung anzeigen oder ändern',
'change the theme': 'Design ändern',
'Select Theme': 'Design auswählen',
Preview: 'Vorschau',
@@ -215,16 +213,14 @@ export default {
'All Tools': 'Alle Werkzeuge',
'Read-only Tools': 'Nur-Lese-Werkzeuge',
'Read & Edit Tools': 'Lese- und Bearbeitungswerkzeuge',
'Read & Edit & Execution Tools':
'Lese-, Bearbeitungs- und Ausführungswerkzeuge',
'Read & Edit & Execution Tools': 'Lese-, Bearbeitungs- und Ausführungswerkzeuge',
'All tools selected, including MCP tools':
'Alle Werkzeuge ausgewählt, einschließlich MCP-Werkzeuge',
'Selected tools:': 'Ausgewählte Werkzeuge:',
'Read-only tools:': 'Nur-Lese-Werkzeuge:',
'Edit tools:': 'Bearbeitungswerkzeuge:',
'Execution tools:': 'Ausführungswerkzeuge:',
'Step {{n}}: Choose Background Color':
'Schritt {{n}}: Hintergrundfarbe wählen',
'Step {{n}}: Choose Background Color': 'Schritt {{n}}: Hintergrundfarbe wählen',
'Step {{n}}: Confirm and Save': 'Schritt {{n}}: Bestätigen und Speichern',
// Agents - Navigation & Instructions
'Esc to cancel': 'Esc zum Abbrechen',
@@ -249,16 +245,14 @@ export default {
'e.g., Reviews code for best practices and potential bugs.':
'z.B. Überprüft Code auf Best Practices und mögliche Fehler.',
'Description cannot be empty.': 'Beschreibung darf nicht leer sein.',
'Failed to launch editor: {{error}}':
'Fehler beim Starten des Editors: {{error}}',
'Failed to launch editor: {{error}}': 'Fehler beim Starten des Editors: {{error}}',
'Failed to save and edit subagent: {{error}}':
'Fehler beim Speichern und Bearbeiten des Unteragenten: {{error}}',
// ============================================================================
// Commands - General (continued)
// ============================================================================
'View and edit Qwen Code settings':
'Qwen Code Einstellungen anzeigen und bearbeiten',
'View and edit Qwen Code settings': 'Qwen Code Einstellungen anzeigen und bearbeiten',
Settings: 'Einstellungen',
'(Use Enter to select{{tabText}})': '(Enter zum Auswählen{{tabText}})',
', Tab to change focus': ', Tab zum Fokuswechsel',
@@ -314,8 +308,7 @@ export default {
'Use Ripgrep': 'Ripgrep verwenden',
'Use Builtin Ripgrep': 'Integriertes Ripgrep verwenden',
'Enable Tool Output Truncation': 'Werkzeugausgabe-Kürzung aktivieren',
'Tool Output Truncation Threshold':
'Schwellenwert für Werkzeugausgabe-Kürzung',
'Tool Output Truncation Threshold': 'Schwellenwert für Werkzeugausgabe-Kürzung',
'Tool Output Truncation Lines': 'Zeilen für Werkzeugausgabe-Kürzung',
'Folder Trust': 'Ordnervertrauen',
'Vision Model Preview': 'Vision-Modell-Vorschau',
@@ -371,8 +364,7 @@ export default {
'Failed to parse {{terminalName}} keybindings.json. The file contains invalid JSON. Please fix the file manually or delete it to allow automatic configuration.':
'Fehler beim Parsen von {{terminalName}} keybindings.json. Die Datei enthält ungültiges JSON. Bitte korrigieren Sie die Datei manuell oder löschen Sie sie, um automatische Konfiguration zu ermöglichen.',
'Error: {{error}}': 'Fehler: {{error}}',
'Shift+Enter binding already exists':
'Umschalt+Enter-Belegung existiert bereits',
'Shift+Enter binding already exists': 'Umschalt+Enter-Belegung existiert bereits',
'Ctrl+Enter binding already exists': 'Strg+Enter-Belegung existiert bereits',
'Existing keybindings detected. Will not modify to avoid conflicts.':
'Bestehende Tastenbelegungen erkannt. Keine Änderungen, um Konflikte zu vermeiden.',
@@ -406,8 +398,7 @@ export default {
'Set UI language': 'UI-Sprache festlegen',
'Set LLM output language': 'LLM-Ausgabesprache festlegen',
'Usage: /language ui [zh-CN|en-US]': 'Verwendung: /language ui [zh-CN|en-US]',
'Usage: /language output <language>':
'Verwendung: /language output <Sprache>',
'Usage: /language output <language>': 'Verwendung: /language output <Sprache>',
'Example: /language output 中文': 'Beispiel: /language output Deutsch',
'Example: /language output English': 'Beispiel: /language output English',
'Example: /language output 日本語': 'Beispiel: /language output Japanisch',
@@ -428,8 +419,7 @@ export default {
' - en-US: English': ' - en-US: Englisch',
'Set UI language to Simplified Chinese (zh-CN)':
'UI-Sprache auf Vereinfachtes Chinesisch (zh-CN) setzen',
'Set UI language to English (en-US)':
'UI-Sprache auf Englisch (en-US) setzen',
'Set UI language to English (en-US)': 'UI-Sprache auf Englisch (en-US) setzen',
// ============================================================================
// Commands - Approval Mode
@@ -437,8 +427,7 @@ export default {
'Approval Mode': 'Genehmigungsmodus',
'Current approval mode: {{mode}}': 'Aktueller Genehmigungsmodus: {{mode}}',
'Available approval modes:': 'Verfügbare Genehmigungsmodi:',
'Approval mode changed to: {{mode}}':
'Genehmigungsmodus geändert zu: {{mode}}',
'Approval mode changed to: {{mode}}': 'Genehmigungsmodus geändert zu: {{mode}}',
'Approval mode changed to: {{mode}} (saved to {{scope}} settings{{location}})':
'Genehmigungsmodus geändert zu: {{mode}} (gespeichert in {{scope}} Einstellungen{{location}})',
'Usage: /approval-mode <mode> [--session|--user|--project]':
@@ -463,16 +452,14 @@ export default {
'Fehler beim Ändern des Genehmigungsmodus: {{error}}',
'Apply to current session only (temporary)':
'Nur auf aktuelle Sitzung anwenden (temporär)',
'Persist for this project/workspace':
'Für dieses Projekt/Arbeitsbereich speichern',
'Persist for this project/workspace': 'Für dieses Projekt/Arbeitsbereich speichern',
'Persist for this user on this machine':
'Für diesen Benutzer auf diesem Computer speichern',
'Analyze only, do not modify files or execute commands':
'Nur analysieren, keine Dateien ändern oder Befehle ausführen',
'Require approval for file edits or shell commands':
'Genehmigung für Dateibearbeitungen oder Shell-Befehle erforderlich',
'Automatically approve file edits':
'Dateibearbeitungen automatisch genehmigen',
'Automatically approve file edits': 'Dateibearbeitungen automatisch genehmigen',
'Automatically approve all tools': 'Alle Werkzeuge automatisch genehmigen',
'Workspace approval mode exists and takes priority. User-level change will have no effect.':
'Arbeitsbereich-Genehmigungsmodus existiert und hat Vorrang. Benutzerebene-Änderung hat keine Wirkung.',
@@ -488,14 +475,12 @@ export default {
'Commands for interacting with memory.':
'Befehle für die Interaktion mit dem Speicher.',
'Show the current memory contents.': 'Aktuellen Speicherinhalt anzeigen.',
'Show project-level memory contents.':
'Projektebene-Speicherinhalt anzeigen.',
'Show project-level memory contents.': 'Projektebene-Speicherinhalt anzeigen.',
'Show global memory contents.': 'Globalen Speicherinhalt anzeigen.',
'Add content to project-level memory.':
'Inhalt zum Projektebene-Speicher hinzufügen.',
'Add content to global memory.': 'Inhalt zum globalen Speicher hinzufügen.',
'Refresh the memory from the source.':
'Speicher aus der Quelle aktualisieren.',
'Refresh the memory from the source.': 'Speicher aus der Quelle aktualisieren.',
'Usage: /memory add --project <text to remember>':
'Verwendung: /memory add --project <zu merkender Text>',
'Usage: /memory add --global <text to remember>':
@@ -535,8 +520,7 @@ export default {
'Konfigurierte MCP-Server und Werkzeuge auflisten',
'Restarts MCP servers.': 'MCP-Server neu starten.',
'Config not loaded.': 'Konfiguration nicht geladen.',
'Could not retrieve tool registry.':
'Werkzeugregister konnte nicht abgerufen werden.',
'Could not retrieve tool registry.': 'Werkzeugregister konnte nicht abgerufen werden.',
'No MCP servers configured with OAuth authentication.':
'Keine MCP-Server mit OAuth-Authentifizierung konfiguriert.',
'MCP servers with OAuth authentication:':
@@ -555,8 +539,7 @@ export default {
// Commands - Chat
// ============================================================================
'Manage conversation history.': 'Gesprächsverlauf verwalten.',
'List saved conversation checkpoints':
'Gespeicherte Gesprächsprüfpunkte auflisten',
'List saved conversation checkpoints': 'Gespeicherte Gesprächsprüfpunkte auflisten',
'No saved conversation checkpoints found.':
'Keine gespeicherten Gesprächsprüfpunkte gefunden.',
'List of saved conversations:': 'Liste gespeicherter Gespräche:',
@@ -606,8 +589,7 @@ export default {
'Kein Chat-Client verfügbar, um Zusammenfassung zu generieren.',
'Already generating summary, wait for previous request to complete':
'Zusammenfassung wird bereits generiert, warten Sie auf Abschluss der vorherigen Anfrage',
'No conversation found to summarize.':
'Kein Gespräch zum Zusammenfassen gefunden.',
'No conversation found to summarize.': 'Kein Gespräch zum Zusammenfassen gefunden.',
'Failed to generate project context summary: {{error}}':
'Fehler beim Generieren der Projektkontextzusammenfassung: {{error}}',
'Saved project summary to {{filePathForDisplay}}.':
@@ -623,8 +605,7 @@ export default {
'Switch the model for this session': 'Modell für diese Sitzung wechseln',
'Content generator configuration not available.':
'Inhaltsgenerator-Konfiguration nicht verfügbar.',
'Authentication type not available.':
'Authentifizierungstyp nicht verfügbar.',
'Authentication type not available.': 'Authentifizierungstyp nicht verfügbar.',
'No models available for the current authentication type ({{authType}}).':
'Keine Modelle für den aktuellen Authentifizierungstyp ({{authType}}) verfügbar.',
@@ -641,8 +622,7 @@ export default {
// ============================================================================
'Already compressing, wait for previous request to complete':
'Komprimierung läuft bereits, warten Sie auf Abschluss der vorherigen Anfrage',
'Failed to compress chat history.':
'Fehler beim Komprimieren des Chatverlaufs.',
'Failed to compress chat history.': 'Fehler beim Komprimieren des Chatverlaufs.',
'Failed to compress chat history: {{error}}':
'Fehler beim Komprimieren des Chatverlaufs: {{error}}',
'Compressing chat history': 'Chatverlauf wird komprimiert',
@@ -664,12 +644,10 @@ export default {
'Bitte geben Sie mindestens einen Pfad zum Hinzufügen an.',
'The /directory add command is not supported in restrictive sandbox profiles. Please use --include-directories when starting the session instead.':
'Der Befehl /directory add wird in restriktiven Sandbox-Profilen nicht unterstützt. Bitte verwenden Sie --include-directories beim Starten der Sitzung.',
"Error adding '{{path}}': {{error}}":
"Fehler beim Hinzufügen von '{{path}}': {{error}}",
"Error adding '{{path}}': {{error}}": "Fehler beim Hinzufügen von '{{path}}': {{error}}",
'Successfully added QWEN.md files from the following directories if there are:\n- {{directories}}':
'QWEN.md-Dateien aus folgenden Verzeichnissen erfolgreich hinzugefügt, falls vorhanden:\n- {{directories}}',
'Error refreshing memory: {{error}}':
'Fehler beim Aktualisieren des Speichers: {{error}}',
'Error refreshing memory: {{error}}': 'Fehler beim Aktualisieren des Speichers: {{error}}',
'Successfully added directories:\n- {{directories}}':
'Verzeichnisse erfolgreich hinzugefügt:\n- {{directories}}',
'Current workspace directories:\n{{directories}}':
@@ -699,8 +677,7 @@ export default {
'Yes, allow always': 'Ja, immer erlauben',
'Modify with external editor': 'Mit externem Editor bearbeiten',
'No, suggest changes (esc)': 'Nein, Änderungen vorschlagen (Esc)',
"Allow execution of: '{{command}}'?":
"Ausführung erlauben von: '{{command}}'?",
"Allow execution of: '{{command}}'?": "Ausführung erlauben von: '{{command}}'?",
'Yes, allow always ...': 'Ja, immer erlauben ...',
'Yes, and auto-accept edits': 'Ja, und Änderungen automatisch akzeptieren',
'Yes, and manually approve edits': 'Ja, und Änderungen manuell genehmigen',
@@ -772,14 +749,12 @@ export default {
'Qwen OAuth authentication cancelled.':
'Qwen OAuth-Authentifizierung abgebrochen.',
'Qwen OAuth Authentication': 'Qwen OAuth-Authentifizierung',
'Please visit this URL to authorize:':
'Bitte besuchen Sie diese URL zur Autorisierung:',
'Please visit this URL to authorize:': 'Bitte besuchen Sie diese URL zur Autorisierung:',
'Or scan the QR code below:': 'Oder scannen Sie den QR-Code unten:',
'Waiting for authorization': 'Warten auf Autorisierung',
'Time remaining:': 'Verbleibende Zeit:',
'(Press ESC or CTRL+C to cancel)': '(ESC oder STRG+C zum Abbrechen drücken)',
'Qwen OAuth Authentication Timeout':
'Qwen OAuth-Authentifizierung abgelaufen',
'Qwen OAuth Authentication Timeout': 'Qwen OAuth-Authentifizierung abgelaufen',
'OAuth token expired (over {{seconds}} seconds). Please select authentication method again.':
'OAuth-Token abgelaufen (über {{seconds}} Sekunden). Bitte wählen Sie erneut eine Authentifizierungsmethode.',
'Press any key to return to authentication type selection.':
@@ -792,22 +767,6 @@ export default {
'Authentifizierung abgelaufen. Bitte versuchen Sie es erneut.',
'Waiting for auth... (Press ESC or CTRL+C to cancel)':
'Warten auf Authentifizierung... (ESC oder STRG+C zum Abbrechen drücken)',
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.':
'API-Schlüssel für OpenAI-kompatible Authentifizierung fehlt. Setzen Sie settings.security.auth.apiKey oder die Umgebungsvariable {{envKeyHint}}.',
'{{envKeyHint}} environment variable not found.':
'Umgebungsvariable {{envKeyHint}} wurde nicht gefunden.',
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.':
'Umgebungsvariable {{envKeyHint}} wurde nicht gefunden. Bitte legen Sie sie in Ihrer .env-Datei oder den Systemumgebungsvariablen fest.',
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.':
'Umgebungsvariable {{envKeyHint}} wurde nicht gefunden (oder setzen Sie settings.security.auth.apiKey). Bitte legen Sie sie in Ihrer .env-Datei oder den Systemumgebungsvariablen fest.',
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.':
'API-Schlüssel für OpenAI-kompatible Authentifizierung fehlt. Setzen Sie die Umgebungsvariable {{envKeyHint}}.',
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.':
'Anthropic-Anbieter fehlt erforderliche baseUrl in modelProviders[].baseUrl.',
'ANTHROPIC_BASE_URL environment variable not found.':
'Umgebungsvariable ANTHROPIC_BASE_URL wurde nicht gefunden.',
'Invalid auth method selected.':
'Ungültige Authentifizierungsmethode ausgewählt.',
'Failed to authenticate. Message: {{message}}':
'Authentifizierung fehlgeschlagen. Meldung: {{message}}',
'Authenticated successfully with {{authType}} credentials.':
@@ -820,8 +779,7 @@ export default {
'API Key:': 'API-Schlüssel:',
'Invalid credentials: {{errorMessage}}':
'Ungültige Anmeldedaten: {{errorMessage}}',
'Failed to validate credentials':
'Anmeldedaten konnten nicht validiert werden',
'Failed to validate credentials': 'Anmeldedaten konnten nicht validiert werden',
'Press Enter to continue, Tab/↑↓ to navigate, Esc to cancel':
'Enter zum Fortfahren, Tab/↑↓ zum Navigieren, Esc zum Abbrechen',
@@ -830,15 +788,6 @@ export default {
// ============================================================================
'Select Model': 'Modell auswählen',
'(Press Esc to close)': '(Esc zum Schließen drücken)',
'Current (effective) configuration': 'Aktuelle (wirksame) Konfiguration',
AuthType: 'Authentifizierungstyp',
'API Key': 'API-Schlüssel',
unset: 'nicht gesetzt',
'(default)': '(Standard)',
'(set)': '(gesetzt)',
'(not set)': '(nicht gesetzt)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Modell konnte nicht auf '{{modelId}}' umgestellt werden.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'Das neueste Qwen Coder Modell von Alibaba Cloud ModelStudio (Version: qwen3-coder-plus-2025-09-23)',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
@@ -928,10 +877,8 @@ export default {
// ============================================================================
// Exit Screen / Stats
// ============================================================================
'Agent powering down. Goodbye!':
'Agent wird heruntergefahren. Auf Wiedersehen!',
'To continue this session, run':
'Um diese Sitzung fortzusetzen, führen Sie aus',
'Agent powering down. Goodbye!': 'Agent wird heruntergefahren. Auf Wiedersehen!',
'To continue this session, run': 'Um diese Sitzung fortzusetzen, führen Sie aus',
'Interaction Summary': 'Interaktionszusammenfassung',
'Session ID:': 'Sitzungs-ID:',
'Tool Calls:': 'Werkzeugaufrufe:',

View File

@@ -89,9 +89,6 @@ export default {
'No tools available': 'No tools available',
'View or change the approval mode for tool usage':
'View or change the approval mode for tool usage',
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}':
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}',
'Approval mode set to "{{mode}}"': 'Approval mode set to "{{mode}}"',
'View or change the language setting': 'View or change the language setting',
'change the theme': 'change the theme',
'Select Theme': 'Select Theme',
@@ -770,21 +767,6 @@ export default {
'Authentication timed out. Please try again.',
'Waiting for auth... (Press ESC or CTRL+C to cancel)':
'Waiting for auth... (Press ESC or CTRL+C to cancel)',
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.':
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.',
'{{envKeyHint}} environment variable not found.':
'{{envKeyHint}} environment variable not found.',
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.':
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.',
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.':
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.',
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.':
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.',
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.':
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.',
'ANTHROPIC_BASE_URL environment variable not found.':
'ANTHROPIC_BASE_URL environment variable not found.',
'Invalid auth method selected.': 'Invalid auth method selected.',
'Failed to authenticate. Message: {{message}}':
'Failed to authenticate. Message: {{message}}',
'Authenticated successfully with {{authType}} credentials.':
@@ -806,15 +788,6 @@ export default {
// ============================================================================
'Select Model': 'Select Model',
'(Press Esc to close)': '(Press Esc to close)',
'Current (effective) configuration': 'Current (effective) configuration',
AuthType: 'AuthType',
'API Key': 'API Key',
unset: 'unset',
'(default)': '(default)',
'(set)': '(set)',
'(not set)': '(not set)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Failed to switch model to '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':

View File

@@ -89,10 +89,6 @@ export default {
'No tools available': 'Нет доступных инструментов',
'View or change the approval mode for tool usage':
'Просмотр или изменение режима подтверждения для использования инструментов',
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}':
'Недопустимый режим подтверждения "{{arg}}". Допустимые режимы: {{modes}}',
'Approval mode set to "{{mode}}"':
'Режим подтверждения установлен на "{{mode}}"',
'View or change the language setting':
'Просмотр или изменение настроек языка',
'change the theme': 'Изменение темы',
@@ -786,21 +782,6 @@ export default {
'Время ожидания авторизации истекло. Пожалуйста, попробуйте снова.',
'Waiting for auth... (Press ESC or CTRL+C to cancel)':
'Ожидание авторизации... (Нажмите ESC или CTRL+C для отмены)',
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.':
'Отсутствует API-ключ для аутентификации, совместимой с OpenAI. Укажите settings.security.auth.apiKey или переменную окружения {{envKeyHint}}.',
'{{envKeyHint}} environment variable not found.':
'Переменная окружения {{envKeyHint}} не найдена.',
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.':
'Переменная окружения {{envKeyHint}} не найдена. Укажите её в файле .env или среди системных переменных.',
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.':
'Переменная окружения {{envKeyHint}} не найдена (или установите settings.security.auth.apiKey). Укажите её в файле .env или среди системных переменных.',
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.':
'Отсутствует API-ключ для аутентификации, совместимой с OpenAI. Установите переменную окружения {{envKeyHint}}.',
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.':
'У провайдера Anthropic отсутствует обязательный baseUrl в modelProviders[].baseUrl.',
'ANTHROPIC_BASE_URL environment variable not found.':
'Переменная окружения ANTHROPIC_BASE_URL не найдена.',
'Invalid auth method selected.': 'Выбран недопустимый метод авторизации.',
'Failed to authenticate. Message: {{message}}':
'Не удалось авторизоваться. Сообщение: {{message}}',
'Authenticated successfully with {{authType}} credentials.':
@@ -822,15 +803,6 @@ export default {
// ============================================================================
'Select Model': 'Выбрать модель',
'(Press Esc to close)': '(Нажмите Esc для закрытия)',
'Current (effective) configuration': 'Текущая (фактическая) конфигурация',
AuthType: 'Тип авторизации',
'API Key': 'API-ключ',
unset: 'не задано',
'(default)': '(по умолчанию)',
'(set)': '(установлено)',
'(not set)': '(не задано)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Не удалось переключиться на модель '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'Последняя модель Qwen Coder от Alibaba Cloud ModelStudio (версия: qwen3-coder-plus-2025-09-23)',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':

View File

@@ -88,9 +88,6 @@ export default {
'No tools available': '没有可用工具',
'View or change the approval mode for tool usage':
'查看或更改工具使用的审批模式',
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}':
'无效的审批模式 "{{arg}}"。有效模式:{{modes}}',
'Approval mode set to "{{mode}}"': '审批模式已设置为 "{{mode}}"',
'View or change the language setting': '查看或更改语言设置',
'change the theme': '更改主题',
'Select Theme': '选择主题',
@@ -728,21 +725,6 @@ export default {
'Authentication timed out. Please try again.': '认证超时。请重试。',
'Waiting for auth... (Press ESC or CTRL+C to cancel)':
'正在等待认证...(按 ESC 或 CTRL+C 取消)',
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.':
'缺少 OpenAI 兼容认证的 API 密钥。请设置 settings.security.auth.apiKey 或设置 {{envKeyHint}} 环境变量。',
'{{envKeyHint}} environment variable not found.':
'未找到 {{envKeyHint}} 环境变量。',
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.':
'未找到 {{envKeyHint}} 环境变量。请在 .env 文件或系统环境变量中进行设置。',
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.':
'未找到 {{envKeyHint}} 环境变量(或设置 settings.security.auth.apiKey。请在 .env 文件或系统环境变量中进行设置。',
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.':
'缺少 OpenAI 兼容认证的 API 密钥。请设置 {{envKeyHint}} 环境变量。',
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.':
'Anthropic 提供商缺少必需的 baseUrl请在 modelProviders[].baseUrl 中配置。',
'ANTHROPIC_BASE_URL environment variable not found.':
'未找到 ANTHROPIC_BASE_URL 环境变量。',
'Invalid auth method selected.': '选择了无效的认证方式。',
'Failed to authenticate. Message: {{message}}': '认证失败。消息:{{message}}',
'Authenticated successfully with {{authType}} credentials.':
'使用 {{authType}} 凭据成功认证。',
@@ -762,15 +744,6 @@ export default {
// ============================================================================
'Select Model': '选择模型',
'(Press Esc to close)': '(按 Esc 关闭)',
'Current (effective) configuration': '当前(实际生效)配置',
AuthType: '认证方式',
'API Key': 'API 密钥',
unset: '未设置',
'(default)': '(默认)',
'(set)': '(已设置)',
'(not set)': '(未设置)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"无法切换到模型 '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'来自阿里云 ModelStudio 的最新 Qwen Coder 模型版本qwen3-coder-plus-2025-09-23',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':

View File

@@ -31,7 +31,6 @@ import { quitCommand } from '../ui/commands/quitCommand.js';
import { restoreCommand } from '../ui/commands/restoreCommand.js';
import { resumeCommand } from '../ui/commands/resumeCommand.js';
import { settingsCommand } from '../ui/commands/settingsCommand.js';
import { skillsCommand } from '../ui/commands/skillsCommand.js';
import { statsCommand } from '../ui/commands/statsCommand.js';
import { summaryCommand } from '../ui/commands/summaryCommand.js';
import { terminalSetupCommand } from '../ui/commands/terminalSetupCommand.js';
@@ -79,7 +78,6 @@ export class BuiltinCommandLoader implements ICommandLoader {
quitCommand,
restoreCommand(this.config),
resumeCommand,
...(this.config?.getExperimentalSkills?.() ? [skillsCommand] : []),
statsCommand,
summaryCommand,
themeCommand,

View File

@@ -72,7 +72,6 @@ describe('ShellProcessor', () => {
getApprovalMode: vi.fn().mockReturnValue(ApprovalMode.DEFAULT),
getShouldUseNodePtyShell: vi.fn().mockReturnValue(false),
getShellExecutionConfig: vi.fn().mockReturnValue({}),
getAllowedTools: vi.fn().mockReturnValue([]),
};
context = createMockCommandContext({
@@ -197,35 +196,6 @@ describe('ShellProcessor', () => {
);
});
it('should NOT throw ConfirmationRequiredError when a command matches allowedTools', async () => {
const processor = new ShellProcessor('test-command');
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Do something dangerous: !{rm -rf /}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['rm -rf /'],
});
(mockConfig.getAllowedTools as Mock).mockReturnValue([
'ShellTool(rm -rf /)',
]);
mockShellExecute.mockReturnValue({
result: Promise.resolve({ ...SUCCESS_RESULT, output: 'deleted' }),
});
const result = await processor.process(prompt, context);
expect(mockShellExecute).toHaveBeenCalledWith(
'rm -rf /',
expect.any(String),
expect.any(Function),
expect.any(Object),
false,
expect.any(Object),
);
expect(result).toEqual([{ text: 'Do something dangerous: deleted' }]);
});
it('should NOT throw ConfirmationRequiredError if a command is not allowed but approval mode is YOLO', async () => {
const processor = new ShellProcessor('test-command');
const prompt: PromptPipelineContent = createPromptPipelineContent(

View File

@@ -7,13 +7,11 @@
import {
ApprovalMode,
checkCommandPermissions,
doesToolInvocationMatch,
escapeShellArg,
getShellConfiguration,
ShellExecutionService,
flatMapTextParts,
} from '@qwen-code/qwen-code-core';
import type { AnyToolInvocation } from '@qwen-code/qwen-code-core';
import type { CommandContext } from '../../ui/commands/types.js';
import type { IPromptProcessor, PromptPipelineContent } from './types.js';
@@ -126,15 +124,6 @@ export class ShellProcessor implements IPromptProcessor {
// Security check on the final, escaped command string.
const { allAllowed, disallowedCommands, blockReason, isHardDenial } =
checkCommandPermissions(command, config, sessionShellAllowlist);
const allowedTools = config.getAllowedTools() || [];
const invocation = {
params: { command },
} as AnyToolInvocation;
const isAllowedBySettings = doesToolInvocationMatch(
'run_shell_command',
invocation,
allowedTools,
);
if (!allAllowed) {
if (isHardDenial) {
@@ -143,17 +132,10 @@ export class ShellProcessor implements IPromptProcessor {
);
}
// If the command is allowed by settings, skip confirmation.
if (isAllowedBySettings) {
continue;
}
// If not a hard denial, respect YOLO mode and auto-approve.
if (config.getApprovalMode() === ApprovalMode.YOLO) {
continue;
if (config.getApprovalMode() !== ApprovalMode.YOLO) {
disallowedCommands.forEach((uc) => commandsToConfirm.add(uc));
}
disallowedCommands.forEach((uc) => commandsToConfirm.add(uc));
}
}

View File

@@ -6,12 +6,10 @@
import { render } from 'ink-testing-library';
import type React from 'react';
import type { Config } from '@qwen-code/qwen-code-core';
import { LoadedSettings } from '../config/settings.js';
import { KeypressProvider } from '../ui/contexts/KeypressContext.js';
import { SettingsContext } from '../ui/contexts/SettingsContext.js';
import { ShellFocusContext } from '../ui/contexts/ShellFocusContext.js';
import { ConfigContext } from '../ui/contexts/ConfigContext.js';
const mockSettings = new LoadedSettings(
{ path: '', settings: {}, originalSettings: {} },
@@ -24,24 +22,14 @@ const mockSettings = new LoadedSettings(
export const renderWithProviders = (
component: React.ReactElement,
{
shellFocus = true,
settings = mockSettings,
config = undefined,
}: {
shellFocus?: boolean;
settings?: LoadedSettings;
config?: Config;
} = {},
{ shellFocus = true, settings = mockSettings } = {},
): ReturnType<typeof render> =>
render(
<SettingsContext.Provider value={settings}>
<ConfigContext.Provider value={config}>
<ShellFocusContext.Provider value={shellFocus}>
<KeypressProvider kittyProtocolEnabled={true}>
{component}
</KeypressProvider>
</ShellFocusContext.Provider>
</ConfigContext.Provider>
<ShellFocusContext.Provider value={shellFocus}>
<KeypressProvider kittyProtocolEnabled={true}>
{component}
</KeypressProvider>
</ShellFocusContext.Provider>
</SettingsContext.Provider>,
);

View File

@@ -32,6 +32,7 @@ import {
type Config,
type IdeInfo,
type IdeContext,
DEFAULT_GEMINI_FLASH_MODEL,
IdeClient,
ideContextStore,
getErrorMessage,
@@ -179,10 +180,15 @@ export const AppContainer = (props: AppContainerProps) => {
[],
);
// Helper to determine the current model (polled, since Config has no model-change event).
const getCurrentModel = useCallback(() => config.getModel(), [config]);
// Helper to determine the effective model, considering the fallback state.
const getEffectiveModel = useCallback(() => {
if (config.isInFallbackMode()) {
return DEFAULT_GEMINI_FLASH_MODEL;
}
return config.getModel();
}, [config]);
const [currentModel, setCurrentModel] = useState(getCurrentModel());
const [currentModel, setCurrentModel] = useState(getEffectiveModel());
const [isConfigInitialized, setConfigInitialized] = useState(false);
@@ -235,12 +241,12 @@ export const AppContainer = (props: AppContainerProps) => {
[historyManager.addItem],
);
// Watch for model changes (e.g., user switches model via /model)
// Watch for model changes (e.g., from Flash fallback)
useEffect(() => {
const checkModelChange = () => {
const model = getCurrentModel();
if (model !== currentModel) {
setCurrentModel(model);
const effectiveModel = getEffectiveModel();
if (effectiveModel !== currentModel) {
setCurrentModel(effectiveModel);
}
};
@@ -248,7 +254,7 @@ export const AppContainer = (props: AppContainerProps) => {
const interval = setInterval(checkModelChange, 1000); // Check every second
return () => clearInterval(interval);
}, [config, currentModel, getCurrentModel]);
}, [config, currentModel, getEffectiveModel]);
const {
consoleMessages,
@@ -370,36 +376,37 @@ export const AppContainer = (props: AppContainerProps) => {
// Check for enforced auth type mismatch
useEffect(() => {
// Check for initialization error first
const currentAuthType = config.modelsConfig.getCurrentAuthType();
if (
settings.merged.security?.auth?.enforcedType &&
currentAuthType &&
settings.merged.security?.auth.enforcedType !== currentAuthType
settings.merged.security?.auth.selectedType &&
settings.merged.security?.auth.enforcedType !==
settings.merged.security?.auth.selectedType
) {
onAuthError(
t(
'Authentication is enforced to be {{enforcedType}}, but you are currently using {{currentType}}.',
{
enforcedType: String(settings.merged.security?.auth.enforcedType),
currentType: String(currentAuthType),
enforcedType: settings.merged.security?.auth.enforcedType,
currentType: settings.merged.security?.auth.selectedType,
},
),
);
} else if (!settings.merged.security?.auth?.useExternal) {
// If no authType is selected yet, allow the auth UI flow to prompt the user.
// Only validate credentials once a concrete authType exists.
if (currentAuthType) {
const error = validateAuthMethod(currentAuthType, config);
if (error) {
onAuthError(error);
}
} else if (
settings.merged.security?.auth?.selectedType &&
!settings.merged.security?.auth?.useExternal
) {
const error = validateAuthMethod(
settings.merged.security.auth.selectedType,
);
if (error) {
onAuthError(error);
}
}
}, [
settings.merged.security?.auth?.selectedType,
settings.merged.security?.auth?.enforcedType,
settings.merged.security?.auth?.useExternal,
config,
onAuthError,
]);
@@ -918,12 +925,7 @@ export const AppContainer = (props: AppContainerProps) => {
const handleIdePromptComplete = useCallback(
(result: IdeIntegrationNudgeResult) => {
if (result.userSelection === 'yes') {
// Check whether the extension has been pre-installed
if (result.isExtensionPreInstalled) {
handleSlashCommand('/ide enable');
} else {
handleSlashCommand('/ide install');
}
handleSlashCommand('/ide install');
settings.setValue(SettingScope.User, 'ide.hasSeenNudge', true);
} else if (result.userSelection === 'dismiss') {
settings.setValue(SettingScope.User, 'ide.hasSeenNudge', true);

View File

@@ -38,7 +38,6 @@ export function IdeIntegrationNudge({
);
const { displayName: ideName } = ide;
const isInSandbox = !!process.env['SANDBOX'];
// Assume extension is already installed if the env variables are set.
const isExtensionPreInstalled =
!!process.env['QWEN_CODE_IDE_SERVER_PORT'] &&
@@ -71,15 +70,13 @@ export function IdeIntegrationNudge({
},
];
const installText = isInSandbox
? `Note: In sandbox environments, IDE integration requires manual setup on the host system. If you select Yes, you'll receive instructions on how to set this up.`
: isExtensionPreInstalled
? `If you select Yes, the CLI will connect to your ${
ideName ?? 'editor'
} and have access to your open files and display diffs directly.`
: `If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${
ideName ?? 'your editor'
}.`;
const installText = isExtensionPreInstalled
? `If you select Yes, the CLI will have access to your open files and display diffs directly in ${
ideName ?? 'your editor'
}.`
: `If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${
ideName ?? 'your editor'
}.`;
return (
<Box

View File

@@ -6,8 +6,7 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { AuthDialog } from './AuthDialog.js';
import { LoadedSettings } from '../../config/settings.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { LoadedSettings, SettingScope } from '../../config/settings.js';
import { AuthType } from '@qwen-code/qwen-code-core';
import { renderWithProviders } from '../../test-utils/render.js';
import { UIStateContext } from '../contexts/UIStateContext.js';
@@ -44,24 +43,17 @@ const renderAuthDialog = (
settings: LoadedSettings,
uiStateOverrides: Partial<UIState> = {},
uiActionsOverrides: Partial<UIActions> = {},
configAuthType: AuthType | undefined = undefined,
configApiKey: string | undefined = undefined,
) => {
const uiState = createMockUIState(uiStateOverrides);
const uiActions = createMockUIActions(uiActionsOverrides);
const mockConfig = {
getAuthType: vi.fn(() => configAuthType),
getContentGeneratorConfig: vi.fn(() => ({ apiKey: configApiKey })),
} as unknown as Config;
return renderWithProviders(
<UIStateContext.Provider value={uiState}>
<UIActionsContext.Provider value={uiActions}>
<AuthDialog />
</UIActionsContext.Provider>
</UIStateContext.Provider>,
{ settings, config: mockConfig },
{ settings },
);
};
@@ -429,7 +421,6 @@ describe('AuthDialog', () => {
settings,
{},
{ handleAuthSelect },
undefined, // config.getAuthType() returns undefined
);
await wait();
@@ -484,7 +475,6 @@ describe('AuthDialog', () => {
settings,
{ authError: 'Initial error' },
{ handleAuthSelect },
undefined, // config.getAuthType() returns undefined
);
await wait();
@@ -538,7 +528,6 @@ describe('AuthDialog', () => {
settings,
{},
{ handleAuthSelect },
AuthType.USE_OPENAI, // config.getAuthType() returns USE_OPENAI
);
await wait();
@@ -547,7 +536,7 @@ describe('AuthDialog', () => {
await wait();
// Should call handleAuthSelect with undefined to exit
expect(handleAuthSelect).toHaveBeenCalledWith(undefined);
expect(handleAuthSelect).toHaveBeenCalledWith(undefined, SettingScope.User);
unmount();
});
});

View File

@@ -8,12 +8,13 @@ import type React from 'react';
import { useState } from 'react';
import { AuthType } from '@qwen-code/qwen-code-core';
import { Box, Text } from 'ink';
import { SettingScope } from '../../config/settings.js';
import { Colors } from '../colors.js';
import { useKeypress } from '../hooks/useKeypress.js';
import { RadioButtonSelect } from '../components/shared/RadioButtonSelect.js';
import { useUIState } from '../contexts/UIStateContext.js';
import { useUIActions } from '../contexts/UIActionsContext.js';
import { useConfig } from '../contexts/ConfigContext.js';
import { useSettings } from '../contexts/SettingsContext.js';
import { t } from '../../i18n/index.js';
function parseDefaultAuthType(
@@ -31,7 +32,7 @@ function parseDefaultAuthType(
export function AuthDialog(): React.JSX.Element {
const { pendingAuthType, authError } = useUIState();
const { handleAuthSelect: onAuthSelect } = useUIActions();
const config = useConfig();
const settings = useSettings();
const [errorMessage, setErrorMessage] = useState<string | null>(null);
const [selectedIndex, setSelectedIndex] = useState<number | null>(null);
@@ -57,10 +58,9 @@ export function AuthDialog(): React.JSX.Element {
return item.value === pendingAuthType;
}
// Priority 2: config.getAuthType() - the source of truth
const currentAuthType = config.getAuthType();
if (currentAuthType) {
return item.value === currentAuthType;
// Priority 2: settings.merged.security?.auth?.selectedType
if (settings.merged.security?.auth?.selectedType) {
return item.value === settings.merged.security?.auth?.selectedType;
}
// Priority 3: QWEN_DEFAULT_AUTH_TYPE env var
@@ -76,7 +76,7 @@ export function AuthDialog(): React.JSX.Element {
}),
);
const hasApiKey = Boolean(config.getContentGeneratorConfig()?.apiKey);
const hasApiKey = Boolean(settings.merged.security?.auth?.apiKey);
const currentSelectedAuthType =
selectedIndex !== null
? items[selectedIndex]?.value
@@ -84,7 +84,7 @@ export function AuthDialog(): React.JSX.Element {
const handleAuthSelect = async (authMethod: AuthType) => {
setErrorMessage(null);
await onAuthSelect(authMethod);
await onAuthSelect(authMethod, SettingScope.User);
};
const handleHighlight = (authMethod: AuthType) => {
@@ -100,7 +100,7 @@ export function AuthDialog(): React.JSX.Element {
if (errorMessage) {
return;
}
if (config.getAuthType() === undefined) {
if (settings.merged.security?.auth?.selectedType === undefined) {
// Prevent exiting if no auth method is set
setErrorMessage(
t(
@@ -109,7 +109,7 @@ export function AuthDialog(): React.JSX.Element {
);
return;
}
onAuthSelect(undefined);
onAuthSelect(undefined, SettingScope.User);
}
},
{ isActive: true },

View File

@@ -4,16 +4,16 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config, ModelProvidersConfig } from '@qwen-code/qwen-code-core';
import type { Config } from '@qwen-code/qwen-code-core';
import {
AuthEvent,
AuthType,
clearCachedCredentialFile,
getErrorMessage,
logAuth,
} from '@qwen-code/qwen-code-core';
import { useCallback, useEffect, useState } from 'react';
import type { LoadedSettings } from '../../config/settings.js';
import { getPersistScopeForModelSelection } from '../../config/modelProvidersScope.js';
import type { LoadedSettings, SettingScope } from '../../config/settings.js';
import type { OpenAICredentials } from '../components/OpenAIKeyPrompt.js';
import { useQwenAuth } from '../hooks/useQwenAuth.js';
import { AuthState, MessageType } from '../types.js';
@@ -27,7 +27,8 @@ export const useAuthCommand = (
config: Config,
addItem: (item: Omit<HistoryItem, 'id'>, timestamp: number) => void,
) => {
const unAuthenticated = config.getAuthType() === undefined;
const unAuthenticated =
settings.merged.security?.auth?.selectedType === undefined;
const [authState, setAuthState] = useState<AuthState>(
unAuthenticated ? AuthState.Updating : AuthState.Unauthenticated,
@@ -80,35 +81,35 @@ export const useAuthCommand = (
);
const handleAuthSuccess = useCallback(
async (authType: AuthType, credentials?: OpenAICredentials) => {
async (
authType: AuthType,
scope: SettingScope,
credentials?: OpenAICredentials,
) => {
try {
const authTypeScope = getPersistScopeForModelSelection(settings);
settings.setValue(
authTypeScope,
'security.auth.selectedType',
authType,
);
settings.setValue(scope, 'security.auth.selectedType', authType);
// Only update credentials if not switching to QWEN_OAUTH,
// so that OpenAI credentials are preserved when switching to QWEN_OAUTH.
if (authType !== AuthType.QWEN_OAUTH && credentials) {
if (credentials?.apiKey != null) {
settings.setValue(
authTypeScope,
scope,
'security.auth.apiKey',
credentials.apiKey,
);
}
if (credentials?.baseUrl != null) {
settings.setValue(
authTypeScope,
scope,
'security.auth.baseUrl',
credentials.baseUrl,
);
}
if (credentials?.model != null) {
settings.setValue(authTypeScope, 'model.name', credentials.model);
settings.setValue(scope, 'model.name', credentials.model);
}
await clearCachedCredentialFile();
}
} catch (error) {
handleAuthFailure(error);
@@ -140,10 +141,14 @@ export const useAuthCommand = (
);
const performAuth = useCallback(
async (authType: AuthType, credentials?: OpenAICredentials) => {
async (
authType: AuthType,
scope: SettingScope,
credentials?: OpenAICredentials,
) => {
try {
await config.refreshAuth(authType);
handleAuthSuccess(authType, credentials);
handleAuthSuccess(authType, scope, credentials);
} catch (e) {
handleAuthFailure(e);
}
@@ -151,51 +156,18 @@ export const useAuthCommand = (
[config, handleAuthSuccess, handleAuthFailure],
);
const isProviderManagedModel = useCallback(
(authType: AuthType, modelId: string | undefined) => {
if (!modelId) {
return false;
}
const modelProviders = settings.merged.modelProviders as
| ModelProvidersConfig
| undefined;
if (!modelProviders) {
return false;
}
const providerModels = modelProviders[authType];
if (!Array.isArray(providerModels)) {
return false;
}
return providerModels.some(
(providerModel) => providerModel.id === modelId,
);
},
[settings],
);
const handleAuthSelect = useCallback(
async (authType: AuthType | undefined, credentials?: OpenAICredentials) => {
async (
authType: AuthType | undefined,
scope: SettingScope,
credentials?: OpenAICredentials,
) => {
if (!authType) {
setIsAuthDialogOpen(false);
setAuthError(null);
return;
}
if (
authType === AuthType.USE_OPENAI &&
credentials?.model &&
isProviderManagedModel(authType, credentials.model)
) {
onAuthError(
t(
'Model "{{modelName}}" is managed via settings.modelProviders. Please complete the fields in settings, or use another model id.',
{ modelName: credentials.model },
),
);
return;
}
setPendingAuthType(authType);
setAuthError(null);
setIsAuthDialogOpen(false);
@@ -208,14 +180,14 @@ export const useAuthCommand = (
baseUrl: credentials.baseUrl,
model: credentials.model,
});
await performAuth(authType, credentials);
await performAuth(authType, scope, credentials);
}
return;
}
await performAuth(authType);
await performAuth(authType, scope);
},
[config, performAuth, isProviderManagedModel, onAuthError],
[config, performAuth],
);
const openAuthDialog = useCallback(() => {

View File

@@ -4,28 +4,31 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { describe, it, expect } from 'vitest';
import { approvalModeCommand } from './approvalModeCommand.js';
import {
type CommandContext,
CommandKind,
type OpenDialogActionReturn,
type MessageActionReturn,
} from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import type { LoadedSettings } from '../../config/settings.js';
describe('approvalModeCommand', () => {
let mockContext: CommandContext;
let mockSetApprovalMode: ReturnType<typeof vi.fn>;
beforeEach(() => {
mockSetApprovalMode = vi.fn();
mockContext = createMockCommandContext({
services: {
config: {
getApprovalMode: () => 'default',
setApprovalMode: mockSetApprovalMode,
setApprovalMode: () => {},
},
settings: {
merged: {},
setValue: () => {},
forScope: () => ({}),
} as unknown as LoadedSettings,
},
});
});
@@ -38,7 +41,7 @@ describe('approvalModeCommand', () => {
expect(approvalModeCommand.kind).toBe(CommandKind.BUILT_IN);
});
it('should open approval mode dialog when invoked without arguments', async () => {
it('should open approval mode dialog when invoked', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'',
@@ -48,123 +51,16 @@ describe('approvalModeCommand', () => {
expect(result.dialog).toBe('approval-mode');
});
it('should open approval mode dialog when invoked with whitespace only', async () => {
it('should open approval mode dialog with arguments (ignored)', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
' ',
'some arguments',
)) as OpenDialogActionReturn;
expect(result.type).toBe('dialog');
expect(result.dialog).toBe('approval-mode');
});
describe('direct mode setting (session-only)', () => {
it('should set approval mode to "plan" when argument is "plan"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'plan',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('plan');
expect(mockSetApprovalMode).toHaveBeenCalledWith('plan');
});
it('should set approval mode to "yolo" when argument is "yolo"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'yolo',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('yolo');
expect(mockSetApprovalMode).toHaveBeenCalledWith('yolo');
});
it('should set approval mode to "auto-edit" when argument is "auto-edit"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'auto-edit',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('auto-edit');
expect(mockSetApprovalMode).toHaveBeenCalledWith('auto-edit');
});
it('should set approval mode to "default" when argument is "default"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'default',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('default');
expect(mockSetApprovalMode).toHaveBeenCalledWith('default');
});
it('should be case-insensitive for mode argument', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'YOLO',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(mockSetApprovalMode).toHaveBeenCalledWith('yolo');
});
it('should handle argument with leading/trailing whitespace', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
' plan ',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(mockSetApprovalMode).toHaveBeenCalledWith('plan');
});
});
describe('invalid mode argument', () => {
it('should return error for invalid mode', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'invalid-mode',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('error');
expect(result.content).toContain('invalid-mode');
expect(result.content).toContain('plan');
expect(result.content).toContain('yolo');
expect(mockSetApprovalMode).not.toHaveBeenCalled();
});
});
describe('untrusted folder handling', () => {
it('should return error when setApprovalMode throws (e.g., untrusted folder)', async () => {
const errorMessage =
'Cannot enable privileged approval modes in an untrusted folder.';
mockSetApprovalMode.mockImplementation(() => {
throw new Error(errorMessage);
});
const result = (await approvalModeCommand.action?.(
mockContext,
'yolo',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('error');
expect(result.content).toBe(errorMessage);
});
});
it('should not have subcommands', () => {
expect(approvalModeCommand.subCommands).toBeUndefined();
});

View File

@@ -8,25 +8,9 @@ import type {
SlashCommand,
CommandContext,
OpenDialogActionReturn,
MessageActionReturn,
} from './types.js';
import { CommandKind } from './types.js';
import { t } from '../../i18n/index.js';
import type { ApprovalMode } from '@qwen-code/qwen-code-core';
import { APPROVAL_MODES } from '@qwen-code/qwen-code-core';
/**
* Parses the argument string and returns the corresponding ApprovalMode if valid.
* Returns undefined if the argument is empty or not a valid mode.
*/
function parseApprovalModeArg(arg: string): ApprovalMode | undefined {
const trimmed = arg.trim().toLowerCase();
if (!trimmed) {
return undefined;
}
// Match against valid approval modes (case-insensitive)
return APPROVAL_MODES.find((mode) => mode.toLowerCase() === trimmed);
}
export const approvalModeCommand: SlashCommand = {
name: 'approval-mode',
@@ -35,49 +19,10 @@ export const approvalModeCommand: SlashCommand = {
},
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<OpenDialogActionReturn | MessageActionReturn> => {
const mode = parseApprovalModeArg(args);
// If no argument provided, open the dialog
if (!args.trim()) {
return {
type: 'dialog',
dialog: 'approval-mode',
};
}
// If invalid argument, return error message with valid options
if (!mode) {
return {
type: 'message',
messageType: 'error',
content: t('Invalid approval mode "{{arg}}". Valid modes: {{modes}}', {
arg: args.trim(),
modes: APPROVAL_MODES.join(', '),
}),
};
}
// Set the mode for current session only (not persisted)
const { config } = context.services;
if (config) {
try {
config.setApprovalMode(mode);
} catch (e) {
return {
type: 'message',
messageType: 'error',
content: (e as Error).message,
};
}
}
return {
type: 'message',
messageType: 'info',
content: t('Approval mode set to "{{mode}}"', { mode }),
};
},
_context: CommandContext,
_args: string,
): Promise<OpenDialogActionReturn> => ({
type: 'dialog',
dialog: 'approval-mode',
}),
};

View File

@@ -191,23 +191,11 @@ export const ideCommand = async (): Promise<SlashCommand> => {
kind: CommandKind.BUILT_IN,
action: async (context) => {
const installer = getIdeInstaller(currentIDE);
const isSandBox = !!process.env['SANDBOX'];
if (isSandBox) {
context.ui.addItem(
{
type: 'info',
text: `IDE integration needs to be installed on the host. If you have already installed it, you can directly connect the ide`,
},
Date.now(),
);
return;
}
if (!installer) {
const ideName = ideClient.getDetectedIdeDisplayName();
context.ui.addItem(
{
type: 'error',
text: `Automatic installation is not supported for ${ideName}. Please install the '${QWEN_CODE_COMPANION_EXTENSION_NAME}' extension manually from the marketplace.`,
text: `No installer is available for ${ideClient.getDetectedIdeDisplayName()}. Please install the '${QWEN_CODE_COMPANION_EXTENSION_NAME}' extension manually from the marketplace.`,
},
Date.now(),
);

View File

@@ -11,14 +11,9 @@ import type { SlashCommand, type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { MessageType } from '../types.js';
import type { LoadedSettings } from '../../config/settings.js';
import { readFile } from 'node:fs/promises';
import os from 'node:os';
import path from 'node:path';
import {
getErrorMessage,
loadServerHierarchicalMemory,
QWEN_DIR,
setGeminiMdFilename,
type FileDiscoveryService,
type LoadServerHierarchicalMemoryResponse,
} from '@qwen-code/qwen-code-core';
@@ -36,18 +31,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
};
});
vi.mock('node:fs/promises', () => {
const readFile = vi.fn();
return {
readFile,
default: {
readFile,
},
};
});
const mockLoadServerHierarchicalMemory = loadServerHierarchicalMemory as Mock;
const mockReadFile = readFile as unknown as Mock;
describe('memoryCommand', () => {
let mockContext: CommandContext;
@@ -68,10 +52,6 @@ describe('memoryCommand', () => {
let mockGetGeminiMdFileCount: Mock;
beforeEach(() => {
setGeminiMdFilename('QWEN.md');
mockReadFile.mockReset();
vi.restoreAllMocks();
showCommand = getSubCommand('show');
mockGetUserMemory = vi.fn();
@@ -122,52 +102,6 @@ describe('memoryCommand', () => {
expect.any(Number),
);
});
it('should show project memory from the configured context file', async () => {
const projectCommand = showCommand.subCommands?.find(
(cmd) => cmd.name === '--project',
);
if (!projectCommand?.action) throw new Error('Command has no action');
setGeminiMdFilename('AGENTS.md');
vi.spyOn(process, 'cwd').mockReturnValue('/test/project');
mockReadFile.mockResolvedValue('project memory');
await projectCommand.action(mockContext, '');
const expectedProjectPath = path.join('/test/project', 'AGENTS.md');
expect(mockReadFile).toHaveBeenCalledWith(expectedProjectPath, 'utf-8');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: expect.stringContaining(expectedProjectPath),
},
expect.any(Number),
);
});
it('should show global memory from the configured context file', async () => {
const globalCommand = showCommand.subCommands?.find(
(cmd) => cmd.name === '--global',
);
if (!globalCommand?.action) throw new Error('Command has no action');
setGeminiMdFilename('AGENTS.md');
vi.spyOn(os, 'homedir').mockReturnValue('/home/user');
mockReadFile.mockResolvedValue('global memory');
await globalCommand.action(mockContext, '');
const expectedGlobalPath = path.join('/home/user', QWEN_DIR, 'AGENTS.md');
expect(mockReadFile).toHaveBeenCalledWith(expectedGlobalPath, 'utf-8');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: expect.stringContaining('Global memory content'),
},
expect.any(Number),
);
});
});
describe('/memory add', () => {

View File

@@ -6,13 +6,12 @@
import {
getErrorMessage,
getCurrentGeminiMdFilename,
loadServerHierarchicalMemory,
QWEN_DIR,
} from '@qwen-code/qwen-code-core';
import path from 'node:path';
import os from 'node:os';
import fs from 'node:fs/promises';
import os from 'os';
import fs from 'fs/promises';
import { MessageType } from '../types.js';
import type { SlashCommand, SlashCommandActionReturn } from './types.js';
import { CommandKind } from './types.js';
@@ -57,12 +56,7 @@ export const memoryCommand: SlashCommand = {
kind: CommandKind.BUILT_IN,
action: async (context) => {
try {
const workingDir =
context.services.config?.getWorkingDir?.() ?? process.cwd();
const projectMemoryPath = path.join(
workingDir,
getCurrentGeminiMdFilename(),
);
const projectMemoryPath = path.join(process.cwd(), 'QWEN.md');
const memoryContent = await fs.readFile(
projectMemoryPath,
'utf-8',
@@ -110,7 +104,7 @@ export const memoryCommand: SlashCommand = {
const globalMemoryPath = path.join(
os.homedir(),
QWEN_DIR,
getCurrentGeminiMdFilename(),
'QWEN.md',
);
const globalMemoryContent = await fs.readFile(
globalMemoryPath,

View File

@@ -13,6 +13,12 @@ import {
type ContentGeneratorConfig,
type Config,
} from '@qwen-code/qwen-code-core';
import * as availableModelsModule from '../models/availableModels.js';
// Mock the availableModels module
vi.mock('../models/availableModels.js', () => ({
getAvailableModelsForAuthType: vi.fn(),
}));
// Helper function to create a mock config
function createMockConfig(
@@ -25,6 +31,9 @@ function createMockConfig(
describe('modelCommand', () => {
let mockContext: CommandContext;
const mockGetAvailableModelsForAuthType = vi.mocked(
availableModelsModule.getAvailableModelsForAuthType,
);
beforeEach(() => {
mockContext = createMockCommandContext();
@@ -78,6 +87,10 @@ describe('modelCommand', () => {
});
it('should return dialog action for QWEN_OAUTH auth type', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([
{ id: 'qwen3-coder-plus', label: 'qwen3-coder-plus' },
]);
const mockConfig = createMockConfig({
model: 'test-model',
authType: AuthType.QWEN_OAUTH,
@@ -92,7 +105,11 @@ describe('modelCommand', () => {
});
});
it('should return dialog action for USE_OPENAI auth type', async () => {
it('should return dialog action for USE_OPENAI auth type when model is available', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([
{ id: 'gpt-4', label: 'gpt-4' },
]);
const mockConfig = createMockConfig({
model: 'test-model',
authType: AuthType.USE_OPENAI,
@@ -107,7 +124,28 @@ describe('modelCommand', () => {
});
});
it('should return dialog action for unsupported auth types', async () => {
it('should return error for USE_OPENAI auth type when no model is available', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([]);
const mockConfig = createMockConfig({
model: 'test-model',
authType: AuthType.USE_OPENAI,
});
mockContext.services.config = mockConfig as Config;
const result = await modelCommand.action!(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content:
'No models available for the current authentication type (openai).',
});
});
it('should return error for unsupported auth types', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([]);
const mockConfig = createMockConfig({
model: 'test-model',
authType: 'UNSUPPORTED_AUTH_TYPE' as AuthType,
@@ -117,8 +155,10 @@ describe('modelCommand', () => {
const result = await modelCommand.action!(mockContext, '');
expect(result).toEqual({
type: 'dialog',
dialog: 'model',
type: 'message',
messageType: 'error',
content:
'No models available for the current authentication type (UNSUPPORTED_AUTH_TYPE).',
});
});

View File

@@ -11,6 +11,7 @@ import type {
MessageActionReturn,
} from './types.js';
import { CommandKind } from './types.js';
import { getAvailableModelsForAuthType } from '../models/availableModels.js';
import { t } from '../../i18n/index.js';
export const modelCommand: SlashCommand = {
@@ -29,7 +30,7 @@ export const modelCommand: SlashCommand = {
return {
type: 'message',
messageType: 'error',
content: t('Configuration not available.'),
content: 'Configuration not available.',
};
}
@@ -51,6 +52,22 @@ export const modelCommand: SlashCommand = {
};
}
const availableModels = getAvailableModelsForAuthType(authType);
if (availableModels.length === 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'No models available for the current authentication type ({{authType}}).',
{
authType,
},
),
};
}
// Trigger model selection dialog
return {
type: 'dialog',
dialog: 'model',

View File

@@ -1,132 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import {
CommandKind,
type CommandCompletionItem,
type CommandContext,
type SlashCommand,
} from './types.js';
import { MessageType, type HistoryItemSkillsList } from '../types.js';
import { t } from '../../i18n/index.js';
import { AsyncFzf } from 'fzf';
import type { SkillConfig } from '@qwen-code/qwen-code-core';
export const skillsCommand: SlashCommand = {
name: 'skills',
get description() {
return t('List available skills.');
},
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext, args?: string) => {
const rawArgs = args?.trim() ?? '';
const [skillName = ''] = rawArgs.split(/\s+/);
const skillManager = context.services.config?.getSkillManager();
if (!skillManager) {
context.ui.addItem(
{
type: MessageType.ERROR,
text: t('Could not retrieve skill manager.'),
},
Date.now(),
);
return;
}
const skills = await skillManager.listSkills();
if (skills.length === 0) {
context.ui.addItem(
{
type: MessageType.INFO,
text: t('No skills are currently available.'),
},
Date.now(),
);
return;
}
if (!skillName) {
const sortedSkills = [...skills].sort((left, right) =>
left.name.localeCompare(right.name),
);
const skillsListItem: HistoryItemSkillsList = {
type: MessageType.SKILLS_LIST,
skills: sortedSkills.map((skill) => ({ name: skill.name })),
};
context.ui.addItem(skillsListItem, Date.now());
return;
}
const normalizedName = skillName.toLowerCase();
const hasSkill = skills.some(
(skill) => skill.name.toLowerCase() === normalizedName,
);
if (!hasSkill) {
context.ui.addItem(
{
type: MessageType.ERROR,
text: t('Unknown skill: {{name}}', { name: skillName }),
},
Date.now(),
);
return;
}
const rawInput = context.invocation?.raw ?? `/skills ${rawArgs}`;
return {
type: 'submit_prompt',
content: [{ text: rawInput }],
};
},
completion: async (
context: CommandContext,
partialArg: string,
): Promise<CommandCompletionItem[]> => {
const skillManager = context.services.config?.getSkillManager();
if (!skillManager) {
return [];
}
const skills = await skillManager.listSkills();
const normalizedPartial = partialArg.trim();
const matches = await getSkillMatches(skills, normalizedPartial);
return matches.map((skill) => ({
value: skill.name,
description: skill.description,
}));
},
};
async function getSkillMatches(
skills: SkillConfig[],
query: string,
): Promise<SkillConfig[]> {
if (!query) {
return skills;
}
const names = skills.map((skill) => skill.name);
const skillMap = new Map(skills.map((skill) => [skill.name, skill]));
try {
const fzf = new AsyncFzf(names, {
fuzzy: 'v2',
casing: 'case-insensitive',
});
const results = (await fzf.find(query)) as Array<{ item: string }>;
return results
.map((result) => skillMap.get(result.item))
.filter((skill): skill is SkillConfig => !!skill);
} catch (error) {
console.error('[skillsCommand] Fuzzy match failed:', error);
const lowerQuery = query.toLowerCase();
return skills.filter((skill) =>
skill.name.toLowerCase().startsWith(lowerQuery),
);
}
}

View File

@@ -209,12 +209,6 @@ export enum CommandKind {
MCP_PROMPT = 'mcp-prompt',
}
export interface CommandCompletionItem {
value: string;
label?: string;
description?: string;
}
// The standardized contract for any command in the system.
export interface SlashCommand {
name: string;
@@ -240,7 +234,7 @@ export interface SlashCommand {
completion?: (
context: CommandContext,
partialArg: string,
) => Promise<Array<string | CommandCompletionItem> | null>;
) => Promise<string[]>;
subCommands?: SlashCommand[];
}

View File

@@ -54,7 +54,7 @@ export function ApprovalModeDialog({
}: ApprovalModeDialogProps): React.JSX.Element {
// Start with User scope by default
const [selectedScope, setSelectedScope] = useState<SettingScope>(
SettingScope.Workspace,
SettingScope.User,
);
// Track the currently highlighted approval mode

View File

@@ -25,6 +25,7 @@ import { useUIState } from '../contexts/UIStateContext.js';
import { useUIActions } from '../contexts/UIActionsContext.js';
import { useConfig } from '../contexts/ConfigContext.js';
import { useSettings } from '../contexts/SettingsContext.js';
import { SettingScope } from '../../config/settings.js';
import { AuthState } from '../types.js';
import { AuthType } from '@qwen-code/qwen-code-core';
import process from 'node:process';
@@ -201,7 +202,7 @@ export const DialogManager = ({
return (
<OpenAIKeyPrompt
onSubmit={(apiKey, baseUrl, model) => {
uiActions.handleAuthSelect(AuthType.USE_OPENAI, {
uiActions.handleAuthSelect(AuthType.USE_OPENAI, SettingScope.User, {
apiKey,
baseUrl,
model,

View File

@@ -30,7 +30,6 @@ import { Help } from './Help.js';
import type { SlashCommand } from '../commands/types.js';
import { ExtensionsList } from './views/ExtensionsList.js';
import { getMCPServerStatus } from '@qwen-code/qwen-code-core';
import { SkillsList } from './views/SkillsList.js';
import { ToolsList } from './views/ToolsList.js';
import { McpStatus } from './views/McpStatus.js';
@@ -154,9 +153,6 @@ const HistoryItemDisplayComponent: React.FC<HistoryItemDisplayProps> = ({
showDescriptions={itemForDisplay.showDescriptions}
/>
)}
{itemForDisplay.type === 'skills_list' && (
<SkillsList skills={itemForDisplay.skills} />
)}
{itemForDisplay.type === 'mcp_status' && (
<McpStatus {...itemForDisplay} serverStatus={getMCPServerStatus} />
)}

View File

@@ -10,11 +10,7 @@ import { ModelDialog } from './ModelDialog.js';
import { useKeypress } from '../hooks/useKeypress.js';
import { DescriptiveRadioButtonSelect } from './shared/DescriptiveRadioButtonSelect.js';
import { ConfigContext } from '../contexts/ConfigContext.js';
import { SettingsContext } from '../contexts/SettingsContext.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { AuthType } from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from '../../config/settings.js';
import { SettingScope } from '../../config/settings.js';
import {
AVAILABLE_MODELS_QWEN,
MAINLINE_CODER,
@@ -40,29 +36,18 @@ const renderComponent = (
};
const combinedProps = { ...defaultProps, ...props };
const mockSettings = {
isTrusted: true,
user: { settings: {} },
workspace: { settings: {} },
setValue: vi.fn(),
} as unknown as LoadedSettings;
const mockConfig = contextValue
? ({
// --- Functions used by ModelDialog ---
getModel: vi.fn(() => MAINLINE_CODER),
setModel: vi.fn().mockResolvedValue(undefined),
switchModel: vi.fn().mockResolvedValue(undefined),
setModel: vi.fn(),
getAuthType: vi.fn(() => 'qwen-oauth'),
// --- Functions used by ClearcutLogger ---
getUsageStatisticsEnabled: vi.fn(() => true),
getSessionId: vi.fn(() => 'mock-session-id'),
getDebugMode: vi.fn(() => false),
getContentGeneratorConfig: vi.fn(() => ({
authType: AuthType.QWEN_OAUTH,
model: MAINLINE_CODER,
})),
getContentGeneratorConfig: vi.fn(() => ({ authType: 'mock' })),
getUseSmartEdit: vi.fn(() => false),
getUseModelRouter: vi.fn(() => false),
getProxy: vi.fn(() => undefined),
@@ -73,27 +58,21 @@ const renderComponent = (
: undefined;
const renderResult = render(
<SettingsContext.Provider value={mockSettings}>
<ConfigContext.Provider value={mockConfig}>
<ModelDialog {...combinedProps} />
</ConfigContext.Provider>
</SettingsContext.Provider>,
<ConfigContext.Provider value={mockConfig}>
<ModelDialog {...combinedProps} />
</ConfigContext.Provider>,
);
return {
...renderResult,
props: combinedProps,
mockConfig,
mockSettings,
};
};
describe('<ModelDialog />', () => {
beforeEach(() => {
vi.clearAllMocks();
// Ensure env-based fallback models don't leak into this suite from the developer environment.
delete process.env['OPENAI_MODEL'];
delete process.env['ANTHROPIC_MODEL'];
});
afterEach(() => {
@@ -112,12 +91,8 @@ describe('<ModelDialog />', () => {
const props = mockedSelect.mock.calls[0][0];
expect(props.items).toHaveLength(AVAILABLE_MODELS_QWEN.length);
expect(props.items[0].value).toBe(
`${AuthType.QWEN_OAUTH}::${MAINLINE_CODER}`,
);
expect(props.items[1].value).toBe(
`${AuthType.QWEN_OAUTH}::${MAINLINE_VLM}`,
);
expect(props.items[0].value).toBe(MAINLINE_CODER);
expect(props.items[1].value).toBe(MAINLINE_VLM);
expect(props.showNumbers).toBe(true);
});
@@ -164,93 +139,16 @@ describe('<ModelDialog />', () => {
expect(mockedSelect).toHaveBeenCalledTimes(1);
});
it('calls config.switchModel and onClose when DescriptiveRadioButtonSelect.onSelect is triggered', async () => {
const { props, mockConfig, mockSettings } = renderComponent({}, {}); // Pass empty object for contextValue
it('calls config.setModel and onClose when DescriptiveRadioButtonSelect.onSelect is triggered', () => {
const { props, mockConfig } = renderComponent({}, {}); // Pass empty object for contextValue
const childOnSelect = mockedSelect.mock.calls[0][0].onSelect;
expect(childOnSelect).toBeDefined();
await childOnSelect(`${AuthType.QWEN_OAUTH}::${MAINLINE_CODER}`);
childOnSelect(MAINLINE_CODER);
expect(mockConfig?.switchModel).toHaveBeenCalledWith(
AuthType.QWEN_OAUTH,
MAINLINE_CODER,
undefined,
{
reason: 'user_manual',
context: 'Model switched via /model dialog',
},
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'model.name',
MAINLINE_CODER,
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'security.auth.selectedType',
AuthType.QWEN_OAUTH,
);
expect(props.onClose).toHaveBeenCalledTimes(1);
});
it('calls config.switchModel and persists authType+model when selecting a different authType', async () => {
const switchModel = vi.fn().mockResolvedValue(undefined);
const getAuthType = vi.fn(() => AuthType.USE_OPENAI);
const getAvailableModelsForAuthType = vi.fn((t: AuthType) => {
if (t === AuthType.USE_OPENAI) {
return [{ id: 'gpt-4', label: 'GPT-4', authType: t }];
}
if (t === AuthType.QWEN_OAUTH) {
return AVAILABLE_MODELS_QWEN.map((m) => ({
id: m.id,
label: m.label,
authType: AuthType.QWEN_OAUTH,
}));
}
return [];
});
const mockConfigWithSwitchAuthType = {
getAuthType,
getModel: vi.fn(() => 'gpt-4'),
getContentGeneratorConfig: vi.fn(() => ({
authType: AuthType.QWEN_OAUTH,
model: MAINLINE_CODER,
})),
// Add switchModel to the mock object (not the type)
switchModel,
getAvailableModelsForAuthType,
};
const { props, mockSettings } = renderComponent(
{},
// Cast to Config to bypass type checking, matching the runtime behavior
mockConfigWithSwitchAuthType as unknown as Partial<Config>,
);
const childOnSelect = mockedSelect.mock.calls[0][0].onSelect;
await childOnSelect(`${AuthType.QWEN_OAUTH}::${MAINLINE_CODER}`);
expect(switchModel).toHaveBeenCalledWith(
AuthType.QWEN_OAUTH,
MAINLINE_CODER,
{ requireCachedCredentials: true },
{
reason: 'user_manual',
context: 'AuthType+model switched via /model dialog',
},
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'model.name',
MAINLINE_CODER,
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'security.auth.selectedType',
AuthType.QWEN_OAUTH,
);
// Assert against the default mock provided by renderComponent
expect(mockConfig?.setModel).toHaveBeenCalledWith(MAINLINE_CODER);
expect(props.onClose).toHaveBeenCalledTimes(1);
});
@@ -295,25 +193,17 @@ describe('<ModelDialog />', () => {
it('updates initialIndex when config context changes', () => {
const mockGetModel = vi.fn(() => MAINLINE_CODER);
const mockGetAuthType = vi.fn(() => 'qwen-oauth');
const mockSettings = {
isTrusted: true,
user: { settings: {} },
workspace: { settings: {} },
setValue: vi.fn(),
} as unknown as LoadedSettings;
const { rerender } = render(
<SettingsContext.Provider value={mockSettings}>
<ConfigContext.Provider
value={
{
getModel: mockGetModel,
getAuthType: mockGetAuthType,
} as unknown as Config
}
>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>
</SettingsContext.Provider>,
<ConfigContext.Provider
value={
{
getModel: mockGetModel,
getAuthType: mockGetAuthType,
} as unknown as Config
}
>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>,
);
expect(mockedSelect.mock.calls[0][0].initialIndex).toBe(0);
@@ -325,11 +215,9 @@ describe('<ModelDialog />', () => {
} as unknown as Config;
rerender(
<SettingsContext.Provider value={mockSettings}>
<ConfigContext.Provider value={newMockConfig}>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>
</SettingsContext.Provider>,
<ConfigContext.Provider value={newMockConfig}>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>,
);
// Should be called at least twice: initial render + re-render after context change

View File

@@ -5,210 +5,52 @@
*/
import type React from 'react';
import { useCallback, useContext, useMemo, useState } from 'react';
import { useCallback, useContext, useMemo } from 'react';
import { Box, Text } from 'ink';
import {
AuthType,
ModelSlashCommandEvent,
logModelSlashCommand,
type ContentGeneratorConfig,
type ContentGeneratorConfigSource,
type ContentGeneratorConfigSources,
} from '@qwen-code/qwen-code-core';
import { useKeypress } from '../hooks/useKeypress.js';
import { theme } from '../semantic-colors.js';
import { DescriptiveRadioButtonSelect } from './shared/DescriptiveRadioButtonSelect.js';
import { ConfigContext } from '../contexts/ConfigContext.js';
import { UIStateContext } from '../contexts/UIStateContext.js';
import { useSettings } from '../contexts/SettingsContext.js';
import {
getAvailableModelsForAuthType,
MAINLINE_CODER,
} from '../models/availableModels.js';
import { getPersistScopeForModelSelection } from '../../config/modelProvidersScope.js';
import { t } from '../../i18n/index.js';
interface ModelDialogProps {
onClose: () => void;
}
function formatSourceBadge(
source: ContentGeneratorConfigSource | undefined,
): string | undefined {
if (!source) return undefined;
switch (source.kind) {
case 'cli':
return source.detail ? `CLI ${source.detail}` : 'CLI';
case 'env':
return source.envKey ? `ENV ${source.envKey}` : 'ENV';
case 'settings':
return source.settingsPath
? `Settings ${source.settingsPath}`
: 'Settings';
case 'modelProviders': {
const suffix =
source.authType && source.modelId
? `${source.authType}:${source.modelId}`
: source.authType
? `${source.authType}`
: source.modelId
? `${source.modelId}`
: '';
return suffix ? `ModelProviders ${suffix}` : 'ModelProviders';
}
case 'default':
return source.detail ? `Default ${source.detail}` : 'Default';
case 'computed':
return source.detail ? `Computed ${source.detail}` : 'Computed';
case 'programmatic':
return source.detail ? `Programmatic ${source.detail}` : 'Programmatic';
case 'unknown':
default:
return undefined;
}
}
function readSourcesFromConfig(config: unknown): ContentGeneratorConfigSources {
if (!config) {
return {};
}
const maybe = config as {
getContentGeneratorConfigSources?: () => ContentGeneratorConfigSources;
};
return maybe.getContentGeneratorConfigSources?.() ?? {};
}
function maskApiKey(apiKey: string | undefined): string {
if (!apiKey) return '(not set)';
const trimmed = apiKey.trim();
if (trimmed.length === 0) return '(not set)';
if (trimmed.length <= 6) return '***';
const head = trimmed.slice(0, 3);
const tail = trimmed.slice(-4);
return `${head}${tail}`;
}
function persistModelSelection(
settings: ReturnType<typeof useSettings>,
modelId: string,
): void {
const scope = getPersistScopeForModelSelection(settings);
settings.setValue(scope, 'model.name', modelId);
}
function persistAuthTypeSelection(
settings: ReturnType<typeof useSettings>,
authType: AuthType,
): void {
const scope = getPersistScopeForModelSelection(settings);
settings.setValue(scope, 'security.auth.selectedType', authType);
}
function ConfigRow({
label,
value,
badge,
}: {
label: string;
value: React.ReactNode;
badge?: string;
}): React.JSX.Element {
return (
<Box flexDirection="column">
<Box>
<Box minWidth={12} flexShrink={0}>
<Text color={theme.text.secondary}>{label}:</Text>
</Box>
<Box flexGrow={1} flexDirection="row" flexWrap="wrap">
<Text>{value}</Text>
</Box>
</Box>
{badge ? (
<Box>
<Box minWidth={12} flexShrink={0}>
<Text> </Text>
</Box>
<Box flexGrow={1}>
<Text color={theme.text.secondary}>{badge}</Text>
</Box>
</Box>
) : null}
</Box>
);
}
export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
const config = useContext(ConfigContext);
const uiState = useContext(UIStateContext);
const settings = useSettings();
// Local error state for displaying errors within the dialog
const [errorMessage, setErrorMessage] = useState<string | null>(null);
// Get auth type from config, default to QWEN_OAUTH if not available
const authType = config?.getAuthType() ?? AuthType.QWEN_OAUTH;
const authType = config?.getAuthType();
const effectiveConfig =
(config?.getContentGeneratorConfig?.() as
| ContentGeneratorConfig
| undefined) ?? undefined;
const sources = readSourcesFromConfig(config);
const availableModelEntries = useMemo(() => {
const allAuthTypes = Object.values(AuthType) as AuthType[];
const modelsByAuthType = allAuthTypes
.map((t) => ({
authType: t,
models: getAvailableModelsForAuthType(t, config ?? undefined),
}))
.filter((x) => x.models.length > 0);
// Fixed order: qwen-oauth first, then others in a stable order
const authTypeOrder: AuthType[] = [
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
];
// Filter to only include authTypes that have models
const availableAuthTypes = new Set(modelsByAuthType.map((x) => x.authType));
const orderedAuthTypes = authTypeOrder.filter((t) =>
availableAuthTypes.has(t),
);
return orderedAuthTypes.flatMap((t) => {
const models =
modelsByAuthType.find((x) => x.authType === t)?.models ?? [];
return models.map((m) => ({ authType: t, model: m }));
});
}, [config]);
// Get available models based on auth type
const availableModels = useMemo(
() => getAvailableModelsForAuthType(authType),
[authType],
);
const MODEL_OPTIONS = useMemo(
() =>
availableModelEntries.map(({ authType: t2, model }) => {
const value = `${t2}::${model.id}`;
const title = (
<Text>
<Text bold color={theme.text.accent}>
[{t2}]
</Text>
<Text>{` ${model.label}`}</Text>
</Text>
);
const description = model.description || '';
return {
value,
title,
description,
key: value,
};
}),
[availableModelEntries],
availableModels.map((model) => ({
value: model.id,
title: model.label,
description: model.description || '',
key: model.id,
})),
[availableModels],
);
const preferredModelId = config?.getModel() || MAINLINE_CODER;
const preferredKey = authType ? `${authType}::${preferredModelId}` : '';
// Determine the Preferred Model (read once when the dialog opens).
const preferredModel = config?.getModel() || MAINLINE_CODER;
useKeypress(
(key) => {
@@ -219,83 +61,25 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
{ isActive: true },
);
const initialIndex = useMemo(() => {
const index = MODEL_OPTIONS.findIndex(
(option) => option.value === preferredKey,
);
return index === -1 ? 0 : index;
}, [MODEL_OPTIONS, preferredKey]);
// Calculate the initial index based on the preferred model.
const initialIndex = useMemo(
() => MODEL_OPTIONS.findIndex((option) => option.value === preferredModel),
[MODEL_OPTIONS, preferredModel],
);
// Handle selection internally (Autonomous Dialog).
const handleSelect = useCallback(
async (selected: string) => {
// Clear any previous error
setErrorMessage(null);
const sep = '::';
const idx = selected.indexOf(sep);
const selectedAuthType = (
idx >= 0 ? selected.slice(0, idx) : authType
) as AuthType;
const modelId = idx >= 0 ? selected.slice(idx + sep.length) : selected;
(model: string) => {
if (config) {
try {
await config.switchModel(
selectedAuthType,
modelId,
selectedAuthType !== authType &&
selectedAuthType === AuthType.QWEN_OAUTH
? { requireCachedCredentials: true }
: undefined,
{
reason: 'user_manual',
context:
selectedAuthType === authType
? 'Model switched via /model dialog'
: 'AuthType+model switched via /model dialog',
},
);
} catch (e) {
const baseErrorMessage = e instanceof Error ? e.message : String(e);
setErrorMessage(
`Failed to switch model to '${modelId}'.\n\n${baseErrorMessage}`,
);
return;
}
const event = new ModelSlashCommandEvent(modelId);
config.setModel(model);
const event = new ModelSlashCommandEvent(model);
logModelSlashCommand(config, event);
const after = config.getContentGeneratorConfig?.() as
| ContentGeneratorConfig
| undefined;
const effectiveAuthType =
after?.authType ?? selectedAuthType ?? authType;
const effectiveModelId = after?.model ?? modelId;
persistModelSelection(settings, effectiveModelId);
persistAuthTypeSelection(settings, effectiveAuthType);
const baseUrl = after?.baseUrl ?? '(default)';
const maskedKey = maskApiKey(after?.apiKey);
uiState?.historyManager.addItem(
{
type: 'info',
text:
`authType: ${effectiveAuthType}\n` +
`Using model: ${effectiveModelId}\n` +
`Base URL: ${baseUrl}\n` +
`API key: ${maskedKey}`,
},
Date.now(),
);
}
onClose();
},
[authType, config, onClose, settings, uiState, setErrorMessage],
[config, onClose],
);
const hasModels = MODEL_OPTIONS.length > 0;
return (
<Box
borderStyle="round"
@@ -305,73 +89,14 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
width="100%"
>
<Text bold>{t('Select Model')}</Text>
<Box marginTop={1} flexDirection="column">
<Text color={theme.text.secondary}>
{t('Current (effective) configuration')}
</Text>
<Box flexDirection="column" marginTop={1}>
<ConfigRow label="AuthType" value={authType} />
<ConfigRow
label="Model"
value={effectiveConfig?.model ?? config?.getModel?.() ?? ''}
badge={formatSourceBadge(sources['model'])}
/>
{authType !== AuthType.QWEN_OAUTH && (
<>
<ConfigRow
label="Base URL"
value={effectiveConfig?.baseUrl ?? ''}
badge={formatSourceBadge(sources['baseUrl'])}
/>
<ConfigRow
label="API Key"
value={effectiveConfig?.apiKey ? t('(set)') : t('(not set)')}
badge={formatSourceBadge(sources['apiKey'])}
/>
</>
)}
</Box>
<Box marginTop={1}>
<DescriptiveRadioButtonSelect
items={MODEL_OPTIONS}
onSelect={handleSelect}
initialIndex={initialIndex}
showNumbers={true}
/>
</Box>
{!hasModels ? (
<Box marginTop={1} flexDirection="column">
<Text color={theme.status.warning}>
{t(
'No models available for the current authentication type ({{authType}}).',
{
authType: authType ? String(authType) : t('(none)'),
},
)}
</Text>
<Box marginTop={1}>
<Text color={theme.text.secondary}>
{t(
'Please configure models in settings.modelProviders or use environment variables.',
)}
</Text>
</Box>
</Box>
) : (
<Box marginTop={1}>
<DescriptiveRadioButtonSelect
items={MODEL_OPTIONS}
onSelect={handleSelect}
initialIndex={initialIndex}
showNumbers={true}
/>
</Box>
)}
{errorMessage && (
<Box marginTop={1} flexDirection="column" paddingX={1}>
<Text color={theme.status.error} wrap="wrap">
{errorMessage}
</Text>
</Box>
)}
<Box marginTop={1} flexDirection="column">
<Text color={theme.text.secondary}>{t('(Press Esc to close)')}</Text>
</Box>

View File

@@ -87,13 +87,7 @@ export async function showResumeSessionPicker(
let selectedId: string | undefined;
const { unmount, waitUntilExit } = render(
<KeypressProvider
kittyProtocolEnabled={false}
pasteWorkaround={
process.platform === 'win32' ||
parseInt(process.versions.node.split('.')[0], 10) < 20
}
>
<KeypressProvider kittyProtocolEnabled={false}>
<StandalonePickerScreen
sessionService={sessionService}
onSelect={(id) => {

View File

@@ -106,7 +106,7 @@ export function SuggestionsDisplay({
</Box>
{suggestion.description && (
<Box flexGrow={1} paddingLeft={2}>
<Box flexGrow={1} paddingLeft={3}>
<Text color={textColor} wrap="truncate">
{suggestion.description}
</Text>

View File

@@ -23,7 +23,7 @@ export const InfoMessage: React.FC<InfoMessageProps> = ({ text }) => {
const prefixWidth = prefix.length;
return (
<Box flexDirection="row" marginBottom={1}>
<Box flexDirection="row" marginTop={1}>
<Box width={prefixWidth}>
<Text color={theme.status.warning}>{prefix}</Text>
</Box>

View File

@@ -18,7 +18,7 @@ export const WarningMessage: React.FC<WarningMessageProps> = ({ text }) => {
const prefixWidth = 3;
return (
<Box flexDirection="row" marginBottom={1}>
<Box flexDirection="row" marginTop={1}>
<Box width={prefixWidth}>
<Text color={Colors.AccentYellow}>{prefix}</Text>
</Box>

View File

@@ -11,7 +11,7 @@ import { BaseSelectionList } from './BaseSelectionList.js';
import type { SelectionListItem } from '../../hooks/useSelectionList.js';
export interface DescriptiveRadioSelectItem<T> extends SelectionListItem<T> {
title: React.ReactNode;
title: string;
description: string;
}

View File

@@ -1,36 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type React from 'react';
import { Box, Text } from 'ink';
import { theme } from '../../semantic-colors.js';
import { type SkillDefinition } from '../../types.js';
import { t } from '../../../i18n/index.js';
interface SkillsListProps {
skills: readonly SkillDefinition[];
}
export const SkillsList: React.FC<SkillsListProps> = ({ skills }) => (
<Box flexDirection="column" marginBottom={1}>
<Text bold color={theme.text.primary}>
{t('Available skills:')}
</Text>
<Box height={1} />
{skills.length > 0 ? (
skills.map((skill) => (
<Box key={skill.name} flexDirection="row">
<Text color={theme.text.primary}>{' '}- </Text>
<Text bold color={theme.text.accent}>
{skill.name}
</Text>
</Box>
))
) : (
<Text color={theme.text.primary}> {t('No skills available')}</Text>
)}
</Box>
);

View File

@@ -30,6 +30,7 @@ export interface UIActions {
) => void;
handleAuthSelect: (
authType: AuthType | undefined,
scope: SettingScope,
credentials?: OpenAICredentials,
) => Promise<void>;
setAuthState: (state: AuthState) => void;

View File

@@ -25,6 +25,7 @@ export interface DialogCloseOptions {
isAuthDialogOpen: boolean;
handleAuthSelect: (
authType: AuthType | undefined,
scope: SettingScope,
credentials?: OpenAICredentials,
) => Promise<void>;
pendingAuthType: AuthType | undefined;

View File

@@ -912,7 +912,7 @@ export const useGeminiStream = (
// Reset quota error flag when starting a new query (not a continuation)
if (!options?.isContinuation) {
setModelSwitchedFromQuotaError(false);
// No quota-error / fallback routing mechanism currently; keep state minimal.
config.setQuotaErrorOccurred(false);
}
abortControllerRef.current = new AbortController();

View File

@@ -1,58 +1,21 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { useCallback } from 'react';
import { useStdin } from 'ink';
import type { EditorType } from '@qwen-code/qwen-code-core';
import {
editorCommands,
commandExists as coreCommandExists,
} from '@qwen-code/qwen-code-core';
import { spawnSync } from 'child_process';
import { useSettings } from '../contexts/SettingsContext.js';
/**
* Cache for command existence checks to avoid repeated execSync calls.
*/
const commandExistsCache = new Map<string, boolean>();
/**
* Check if a command exists in the system with caching.
* Results are cached to improve performance in test environments.
*/
function commandExists(cmd: string): boolean {
if (commandExistsCache.has(cmd)) {
return commandExistsCache.get(cmd)!;
}
const exists = coreCommandExists(cmd);
commandExistsCache.set(cmd, exists);
return exists;
}
/**
* Get the actual executable command for an editor type.
*/
function getExecutableCommand(editorType: EditorType): string {
const commandConfig = editorCommands[editorType];
const commands =
process.platform === 'win32' ? commandConfig.win32 : commandConfig.default;
const availableCommand = commands.find((cmd) => commandExists(cmd));
if (!availableCommand) {
throw new Error(
`No available editor command found for ${editorType}. ` +
`Tried: ${commands.join(', ')}. ` +
`Please install one of these editors or set a different preferredEditor in settings.`,
);
}
return availableCommand;
}
/**
* Determines the editor command to use based on user preferences and platform.
*/
function getEditorCommand(preferredEditor?: EditorType): string {
if (preferredEditor) {
return getExecutableCommand(preferredEditor);
return preferredEditor;
}
// Platform-specific defaults with UI preference for macOS
@@ -100,14 +63,8 @@ export function useLaunchEditor() {
try {
setRawMode?.(false);
// On Windows, .cmd and .bat files need shell: true
const needsShell =
process.platform === 'win32' &&
(editorCommand.endsWith('.cmd') || editorCommand.endsWith('.bat'));
const { status, error } = spawnSync(editorCommand, editorArgs, {
stdio: 'inherit',
shell: needsShell,
});
if (error) throw error;

View File

@@ -573,45 +573,6 @@ describe('useSlashCompletion', () => {
});
});
it('should map completion items with descriptions for argument suggestions', async () => {
const mockCompletionFn = vi.fn().mockResolvedValue([
{ value: 'pdf', description: 'Create PDF documents' },
{ value: 'xlsx', description: 'Work with spreadsheets' },
]);
const slashCommands = [
createTestCommand({
name: 'skills',
description: 'List available skills',
completion: mockCompletionFn,
}),
];
const { result } = renderHook(() =>
useTestHarnessForSlashCompletion(
true,
'/skills ',
slashCommands,
mockCommandContext,
),
);
await waitFor(() => {
expect(result.current.suggestions).toEqual([
{
label: 'pdf',
value: 'pdf',
description: 'Create PDF documents',
},
{
label: 'xlsx',
value: 'xlsx',
description: 'Work with spreadsheets',
},
]);
});
});
it('should call command.completion with an empty string when args start with a space', async () => {
const mockCompletionFn = vi
.fn()

View File

@@ -9,7 +9,6 @@ import { AsyncFzf } from 'fzf';
import type { Suggestion } from '../components/SuggestionsDisplay.js';
import {
CommandKind,
type CommandCompletionItem,
type CommandContext,
type SlashCommand,
} from '../commands/types.js';
@@ -216,9 +215,10 @@ function useCommandSuggestions(
)) || [];
if (!signal.aborted) {
const finalSuggestions = results
.map((item) => toSuggestion(item))
.filter((suggestion): suggestion is Suggestion => !!suggestion);
const finalSuggestions = results.map((s) => ({
label: s,
value: s,
}));
setSuggestions(finalSuggestions);
setIsLoading(false);
}
@@ -310,20 +310,6 @@ function useCommandSuggestions(
return { suggestions, isLoading };
}
function toSuggestion(item: string | CommandCompletionItem): Suggestion | null {
if (typeof item === 'string') {
return { label: item, value: item };
}
if (!item.value) {
return null;
}
return {
label: item.label ?? item.value,
value: item.value,
description: item.description,
};
}
function useCompletionPositions(
query: string | null,
parserResult: CommandParserResult,

View File

@@ -62,7 +62,7 @@ const mockConfig = {
getAllowedTools: vi.fn(() => []),
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getUseSmartEdit: () => false,
getUseModelRouter: () => false,

View File

@@ -1,205 +0,0 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
getAvailableModelsForAuthType,
getFilteredQwenModels,
getOpenAIAvailableModelFromEnv,
isVisionModel,
getDefaultVisionModel,
AVAILABLE_MODELS_QWEN,
MAINLINE_VLM,
MAINLINE_CODER,
} from './availableModels.js';
import { AuthType, type Config } from '@qwen-code/qwen-code-core';
describe('availableModels', () => {
describe('AVAILABLE_MODELS_QWEN', () => {
it('should include coder model', () => {
const coderModel = AVAILABLE_MODELS_QWEN.find(
(m) => m.id === MAINLINE_CODER,
);
expect(coderModel).toBeDefined();
expect(coderModel?.isVision).toBeFalsy();
});
it('should include vision model', () => {
const visionModel = AVAILABLE_MODELS_QWEN.find(
(m) => m.id === MAINLINE_VLM,
);
expect(visionModel).toBeDefined();
expect(visionModel?.isVision).toBe(true);
});
});
describe('getFilteredQwenModels', () => {
it('should return all models when vision preview is enabled', () => {
const models = getFilteredQwenModels(true);
expect(models.length).toBe(AVAILABLE_MODELS_QWEN.length);
});
it('should filter out vision models when preview is disabled', () => {
const models = getFilteredQwenModels(false);
expect(models.every((m) => !m.isVision)).toBe(true);
});
});
describe('getOpenAIAvailableModelFromEnv', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should return null when OPENAI_MODEL is not set', () => {
delete process.env['OPENAI_MODEL'];
expect(getOpenAIAvailableModelFromEnv()).toBeNull();
});
it('should return model from OPENAI_MODEL env var', () => {
process.env['OPENAI_MODEL'] = 'gpt-4-turbo';
const model = getOpenAIAvailableModelFromEnv();
expect(model?.id).toBe('gpt-4-turbo');
expect(model?.label).toBe('gpt-4-turbo');
});
it('should trim whitespace from env var', () => {
process.env['OPENAI_MODEL'] = ' gpt-4 ';
const model = getOpenAIAvailableModelFromEnv();
expect(model?.id).toBe('gpt-4');
});
});
describe('getAvailableModelsForAuthType', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should return hard-coded qwen models for qwen-oauth', () => {
const models = getAvailableModelsForAuthType(AuthType.QWEN_OAUTH);
expect(models).toEqual(AVAILABLE_MODELS_QWEN);
});
it('should return hard-coded qwen models even when config is provided', () => {
const mockConfig = {
getAvailableModels: vi
.fn()
.mockReturnValue([
{ id: 'custom', label: 'Custom', authType: AuthType.QWEN_OAUTH },
]),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.QWEN_OAUTH,
mockConfig,
);
expect(models).toEqual(AVAILABLE_MODELS_QWEN);
});
it('should use config.getAvailableModels for openai authType when available', () => {
const mockModels = [
{
id: 'gpt-4',
label: 'GPT-4',
description: 'Test',
authType: AuthType.USE_OPENAI,
isVision: false,
},
];
const getAvailableModelsForAuthType = vi.fn().mockReturnValue(mockModels);
const mockConfigWithMethod = {
// Prefer the newer API when available.
getAvailableModelsForAuthType,
};
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfigWithMethod as unknown as Config,
);
expect(getAvailableModelsForAuthType).toHaveBeenCalled();
expect(models[0].id).toBe('gpt-4');
});
it('should fallback to env var for openai when config returns empty', () => {
process.env['OPENAI_MODEL'] = 'fallback-model';
const mockConfig = {
getAvailableModelsForAuthType: vi.fn().mockReturnValue([]),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfig,
);
expect(models).toEqual([]);
});
it('should fallback to env var for openai when config throws', () => {
process.env['OPENAI_MODEL'] = 'fallback-model';
const mockConfig = {
getAvailableModelsForAuthType: vi.fn().mockImplementation(() => {
throw new Error('Registry not initialized');
}),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfig,
);
expect(models).toEqual([]);
});
it('should return env model for openai without config', () => {
process.env['OPENAI_MODEL'] = 'gpt-4-turbo';
const models = getAvailableModelsForAuthType(AuthType.USE_OPENAI);
expect(models[0].id).toBe('gpt-4-turbo');
});
it('should return empty array for openai without config or env', () => {
delete process.env['OPENAI_MODEL'];
const models = getAvailableModelsForAuthType(AuthType.USE_OPENAI);
expect(models).toEqual([]);
});
it('should return empty array for other auth types', () => {
const models = getAvailableModelsForAuthType(AuthType.USE_GEMINI);
expect(models).toEqual([]);
});
});
describe('isVisionModel', () => {
it('should return true for vision model', () => {
expect(isVisionModel(MAINLINE_VLM)).toBe(true);
});
it('should return false for non-vision model', () => {
expect(isVisionModel(MAINLINE_CODER)).toBe(false);
});
it('should return false for unknown model', () => {
expect(isVisionModel('unknown-model')).toBe(false);
});
});
describe('getDefaultVisionModel', () => {
it('should return the vision model ID', () => {
expect(getDefaultVisionModel()).toBe(MAINLINE_VLM);
});
});
});

View File

@@ -4,12 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import {
AuthType,
DEFAULT_QWEN_MODEL,
type Config,
type AvailableModel as CoreAvailableModel,
} from '@qwen-code/qwen-code-core';
import { AuthType, DEFAULT_QWEN_MODEL } from '@qwen-code/qwen-code-core';
import { t } from '../../i18n/index.js';
export type AvailableModel = {
@@ -62,78 +57,20 @@ export function getFilteredQwenModels(
*/
export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
const id = process.env['OPENAI_MODEL']?.trim();
return id
? {
id,
label: id,
get description() {
return t('Configured via OPENAI_MODEL environment variable');
},
}
: null;
return id ? { id, label: id } : null;
}
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
const id = process.env['ANTHROPIC_MODEL']?.trim();
return id
? {
id,
label: id,
get description() {
return t('Configured via ANTHROPIC_MODEL environment variable');
},
}
: null;
return id ? { id, label: id } : null;
}
/**
* Convert core AvailableModel to CLI AvailableModel format
*/
function convertCoreModelToCliModel(
coreModel: CoreAvailableModel,
): AvailableModel {
return {
id: coreModel.id,
label: coreModel.label,
description: coreModel.description,
isVision: coreModel.isVision ?? coreModel.capabilities?.vision ?? false,
};
}
/**
* Get available models for the given authType.
*
* If a Config object is provided, uses config.getAvailableModelsForAuthType().
* For qwen-oauth, always returns the hard-coded models.
* Falls back to environment variables only when no config is provided.
*/
export function getAvailableModelsForAuthType(
authType: AuthType,
config?: Config,
): AvailableModel[] {
// For qwen-oauth, always use hard-coded models, this aligns with the API gateway.
if (authType === AuthType.QWEN_OAUTH) {
return AVAILABLE_MODELS_QWEN;
}
// Use config's model registry when available
if (config) {
try {
const models = config.getAvailableModelsForAuthType(authType);
if (models.length > 0) {
return models.map(convertCoreModelToCliModel);
}
} catch {
// If config throws (e.g., not initialized), return empty array
}
// When a Config object is provided, we intentionally do NOT fall back to env-based
// "raw" models. These may reflect the currently effective config but should not be
// presented as selectable options in /model.
return [];
}
// Fall back to environment variables for specific auth types (no config provided)
switch (authType) {
case AuthType.QWEN_OAUTH:
return AVAILABLE_MODELS_QWEN;
case AuthType.USE_OPENAI: {
const openAIModel = getOpenAIAvailableModelFromEnv();
return openAIModel ? [openAIModel] : [];
@@ -143,10 +80,13 @@ export function getAvailableModelsForAuthType(
return anthropicModel ? [anthropicModel] : [];
}
default:
// For other auth types, return empty array for now
// This can be expanded later according to the design doc
return [];
}
}
/**
/**
* Hard code the default vision model as a string literal,
* until our coding model supports multimodal.

View File

@@ -201,21 +201,12 @@ export interface ToolDefinition {
description?: string;
}
export interface SkillDefinition {
name: string;
}
export type HistoryItemToolsList = HistoryItemBase & {
type: 'tools_list';
tools: ToolDefinition[];
showDescriptions: boolean;
};
export type HistoryItemSkillsList = HistoryItemBase & {
type: 'skills_list';
skills: SkillDefinition[];
};
// JSON-friendly types for using as a simple data model showing info about an
// MCP Server.
export interface JsonMcpTool {
@@ -277,7 +268,6 @@ export type HistoryItemWithoutId =
| HistoryItemCompression
| HistoryItemExtensionsList
| HistoryItemToolsList
| HistoryItemSkillsList
| HistoryItemMcpStatus;
export type HistoryItem = HistoryItemWithoutId & { id: number };
@@ -299,7 +289,6 @@ export enum MessageType {
SUMMARY = 'summary',
EXTENSIONS_LIST = 'extensions_list',
TOOLS_LIST = 'tools_list',
SKILLS_LIST = 'skills_list',
MCP_STATUS = 'mcp_status',
}

View File

@@ -1,133 +0,0 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import {
AuthType,
type ContentGeneratorConfig,
type ContentGeneratorConfigSources,
resolveModelConfig,
type ModelConfigSourcesInput,
} from '@qwen-code/qwen-code-core';
import type { Settings } from '../config/settings.js';
export interface CliGenerationConfigInputs {
argv: {
model?: string | undefined;
openaiApiKey?: string | undefined;
openaiBaseUrl?: string | undefined;
openaiLogging?: boolean | undefined;
openaiLoggingDir?: string | undefined;
};
settings: Settings;
selectedAuthType: AuthType | undefined;
/**
* Injectable env for testability. Defaults to process.env at callsites.
*/
env?: Record<string, string | undefined>;
}
export interface ResolvedCliGenerationConfig {
/** The resolved model id (may be empty string if not resolvable at CLI layer) */
model: string;
/** API key for OpenAI-compatible auth */
apiKey: string;
/** Base URL for OpenAI-compatible auth */
baseUrl: string;
/** The full generation config to pass to core Config */
generationConfig: Partial<ContentGeneratorConfig>;
/** Source attribution for each resolved field */
sources: ContentGeneratorConfigSources;
}
export function getAuthTypeFromEnv(): AuthType | undefined {
if (process.env['OPENAI_API_KEY']) {
return AuthType.USE_OPENAI;
}
if (process.env['QWEN_OAUTH']) {
return AuthType.QWEN_OAUTH;
}
if (process.env['GEMINI_API_KEY']) {
return AuthType.USE_GEMINI;
}
if (process.env['GOOGLE_API_KEY']) {
return AuthType.USE_VERTEX_AI;
}
if (process.env['ANTHROPIC_API_KEY']) {
return AuthType.USE_ANTHROPIC;
}
return undefined;
}
/**
* Unified resolver for CLI generation config.
*
* Precedence (for OpenAI auth):
* - model: argv.model > OPENAI_MODEL > QWEN_MODEL > settings.model.name
* - apiKey: argv.openaiApiKey > OPENAI_API_KEY > settings.security.auth.apiKey
* - baseUrl: argv.openaiBaseUrl > OPENAI_BASE_URL > settings.security.auth.baseUrl
*
* For non-OpenAI auth, only argv.model override is respected at CLI layer.
*/
export function resolveCliGenerationConfig(
inputs: CliGenerationConfigInputs,
): ResolvedCliGenerationConfig {
const { argv, settings, selectedAuthType } = inputs;
const env = inputs.env ?? (process.env as Record<string, string | undefined>);
const authType = selectedAuthType;
const configSources: ModelConfigSourcesInput = {
authType,
cli: {
model: argv.model,
apiKey: argv.openaiApiKey,
baseUrl: argv.openaiBaseUrl,
},
settings: {
model: settings.model?.name,
apiKey: settings.security?.auth?.apiKey,
baseUrl: settings.security?.auth?.baseUrl,
generationConfig: settings.model?.generationConfig as
| Partial<ContentGeneratorConfig>
| undefined,
},
env,
};
const resolved = resolveModelConfig(configSources);
// Log warnings if any
for (const warning of resolved.warnings) {
console.warn(`[modelProviderUtils] ${warning}`);
}
// Resolve OpenAI logging config (CLI-specific, not part of core resolver)
const enableOpenAILogging =
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging
: argv.openaiLogging) ?? false;
const openAILoggingDir =
argv.openaiLoggingDir || settings.model?.openAILoggingDir;
// Build the full generation config
// Note: we merge the resolved config with logging settings
const generationConfig: Partial<ContentGeneratorConfig> = {
...resolved.config,
enableOpenAILogging,
openAILoggingDir,
};
return {
model: resolved.config.model || '',
apiKey: resolved.config.apiKey || '',
baseUrl: resolved.config.baseUrl || '',
generationConfig,
sources: resolved.sources,
};
}

View File

@@ -8,6 +8,7 @@ import { exec, execSync, spawn, type ChildProcess } from 'node:child_process';
import os from 'node:os';
import path from 'node:path';
import fs from 'node:fs';
import { readFile } from 'node:fs/promises';
import { fileURLToPath } from 'node:url';
import { quote, parse } from 'shell-quote';
import {
@@ -49,16 +50,16 @@ const BUILTIN_SEATBELT_PROFILES = [
/**
* Determines whether the sandbox container should be run with the current user's UID and GID.
* This is often necessary on Linux systems when using rootful Docker without userns-remap
* configured, to avoid permission issues with
* This is often necessary on Linux systems (especially Debian/Ubuntu based) when using
* rootful Docker without userns-remap configured, to avoid permission issues with
* mounted volumes.
*
* The behavior is controlled by the `SANDBOX_SET_UID_GID` environment variable:
* - If `SANDBOX_SET_UID_GID` is "1" or "true", this function returns `true`.
* - If `SANDBOX_SET_UID_GID` is "0" or "false", this function returns `false`.
* - If `SANDBOX_SET_UID_GID` is not set:
* - On Linux, it defaults to `true`.
* - On other OSes, it defaults to `false`.
* - On Debian/Ubuntu Linux, it defaults to `true`.
* - On other OSes, or if OS detection fails, it defaults to `false`.
*
* For more context on running Docker containers as non-root, see:
* https://medium.com/redbubble/running-a-docker-container-as-a-non-root-user-7d2e00f8ee15
@@ -75,20 +76,31 @@ async function shouldUseCurrentUserInSandbox(): Promise<boolean> {
return false;
}
// If environment variable is not explicitly set, check for Debian/Ubuntu Linux
if (os.platform() === 'linux') {
const debugEnv = [process.env['DEBUG'], process.env['DEBUG_MODE']].some(
(v) => v === 'true' || v === '1',
);
if (debugEnv) {
// Use stderr so it doesn't clutter normal STDOUT output (e.g. in `--prompt` runs).
console.error(
'INFO: Using current user UID/GID in Linux sandbox. Set SANDBOX_SET_UID_GID=false to disable.',
try {
const osReleaseContent = await readFile('/etc/os-release', 'utf8');
if (
osReleaseContent.includes('ID=debian') ||
osReleaseContent.includes('ID=ubuntu') ||
osReleaseContent.match(/^ID_LIKE=.*debian.*/m) || // Covers derivatives
osReleaseContent.match(/^ID_LIKE=.*ubuntu.*/m) // Covers derivatives
) {
// note here and below we use console.error for informational messages on stderr
console.error(
'INFO: Defaulting to use current user UID/GID for Debian/Ubuntu-based Linux.',
);
return true;
}
} catch (_err) {
// Silently ignore if /etc/os-release is not found or unreadable.
// The default (false) will be applied in this case.
console.warn(
'Warning: Could not read /etc/os-release to auto-detect Debian/Ubuntu for UID/GID default.',
);
}
return true;
}
return false;
return false; // Default to false if no other condition is met
}
// docker does not allow container names to contain ':' or '/', so we

View File

@@ -57,7 +57,6 @@ describe('systemInfo', () => {
getModel: vi.fn().mockReturnValue('test-model'),
getIdeMode: vi.fn().mockReturnValue(true),
getSessionId: vi.fn().mockReturnValue('test-session-id'),
getAuthType: vi.fn().mockReturnValue('test-auth'),
getContentGeneratorConfig: vi.fn().mockReturnValue({
baseUrl: 'https://api.openai.com',
}),
@@ -274,9 +273,6 @@ describe('systemInfo', () => {
// Update the mock context to use OpenAI auth
mockContext.services.settings.merged.security!.auth!.selectedType =
AuthType.USE_OPENAI;
vi.mocked(mockContext.services.config!.getAuthType).mockReturnValue(
AuthType.USE_OPENAI,
);
const extendedInfo = await getExtendedSystemInfo(mockContext);

View File

@@ -115,7 +115,8 @@ export async function getSystemInfo(
const sandboxEnv = getSandboxEnv();
const modelVersion = context.services.config?.getModel() || 'Unknown';
const cliVersion = await getCliVersion();
const selectedAuthType = context.services.config?.getAuthType() || '';
const selectedAuthType =
context.services.settings.merged.security?.auth?.selectedType || '';
const ideClient = await getIdeClientName(context);
const sessionId = context.services.config?.getSessionId() || 'unknown';

View File

@@ -14,20 +14,6 @@ import * as JsonOutputAdapterModule from './nonInteractive/io/JsonOutputAdapter.
import * as StreamJsonOutputAdapterModule from './nonInteractive/io/StreamJsonOutputAdapter.js';
import * as cleanupModule from './utils/cleanup.js';
// Helper to create a mock Config with modelsConfig
function createMockConfig(overrides?: Partial<Config>): Config {
return {
refreshAuth: vi.fn().mockResolvedValue('refreshed'),
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
getContentGeneratorConfig: vi.fn().mockReturnValue({ authType: undefined }),
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.QWEN_OAUTH),
},
...overrides,
} as unknown as Config;
}
describe('validateNonInterActiveAuth', () => {
let originalEnvGeminiApiKey: string | undefined;
let originalEnvVertexAi: string | undefined;
@@ -121,20 +107,17 @@ describe('validateNonInterActiveAuth', () => {
vi.restoreAllMocks();
});
it('exits if validateAuthMethod fails for default auth type', async () => {
// Mock validateAuthMethod to return error (e.g., missing API key)
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue(
'Missing API key for authentication',
);
const nonInteractiveConfig = createMockConfig({
it('exits if no auth type is configured or env vars set', async () => {
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.QWEN_OAUTH),
},
});
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
undefined,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -144,21 +127,22 @@ describe('validateNonInterActiveAuth', () => {
expect((e as Error).message).toContain('process.exit(1) called');
}
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Missing API key'),
expect.stringContaining('Please set an Auth method'),
);
expect(processExitSpy).toHaveBeenCalledWith(1);
});
it('uses USE_OPENAI if OPENAI_API_KEY is set', async () => {
process.env['OPENAI_API_KEY'] = 'fake-openai-key';
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.USE_OPENAI),
},
});
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
await validateNonInteractiveAuth(
undefined,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -167,14 +151,15 @@ describe('validateNonInterActiveAuth', () => {
});
it('uses configured QWEN_OAUTH if provided', async () => {
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.QWEN_OAUTH),
},
});
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
await validateNonInteractiveAuth(
AuthType.QWEN_OAUTH,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -185,11 +170,16 @@ describe('validateNonInterActiveAuth', () => {
it('exits if validateAuthMethod returns error', async () => {
// Mock validateAuthMethod to return error
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue('Auth error!');
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
});
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
AuthType.USE_GEMINI,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -207,13 +197,14 @@ describe('validateNonInterActiveAuth', () => {
const validateAuthMethodSpy = vi
.spyOn(auth, 'validateAuthMethod')
.mockReturnValue('Auth error!');
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
});
} as unknown as Config;
// Even with validation errors, it should not exit
// because validation is skipped when useExternalAuth is true.
// Even with an invalid auth type, it should not exit
// because validation is skipped.
await validateNonInteractiveAuth(
'invalid-auth-type' as AuthType,
true, // useExternalAuth = true
nonInteractiveConfig,
mockSettings,
@@ -222,8 +213,8 @@ describe('validateNonInterActiveAuth', () => {
expect(validateAuthMethodSpy).not.toHaveBeenCalled();
expect(consoleErrorSpy).not.toHaveBeenCalled();
expect(processExitSpy).not.toHaveBeenCalled();
// refreshAuth is called with the authType from config.modelsConfig.getCurrentAuthType()
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.QWEN_OAUTH);
// We still expect refreshAuth to be called with the (invalid) type
expect(refreshAuthMock).toHaveBeenCalledWith('invalid-auth-type');
});
it('uses enforcedAuthType if provided', async () => {
@@ -231,14 +222,11 @@ describe('validateNonInterActiveAuth', () => {
mockSettings.merged.security!.auth!.selectedType = AuthType.USE_OPENAI;
// Set required env var for USE_OPENAI to ensure enforcedAuthType takes precedence
process.env['OPENAI_API_KEY'] = 'fake-key';
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.USE_OPENAI),
},
});
} as unknown as Config;
await validateNonInteractiveAuth(
AuthType.USE_OPENAI,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -249,15 +237,16 @@ describe('validateNonInterActiveAuth', () => {
it('exits if currentAuthType does not match enforcedAuthType', async () => {
mockSettings.merged.security!.auth!.enforcedType = AuthType.QWEN_OAUTH;
process.env['OPENAI_API_KEY'] = 'fake-key';
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.USE_OPENAI),
},
});
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
AuthType.USE_OPENAI,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -290,21 +279,18 @@ describe('validateNonInterActiveAuth', () => {
);
});
it('emits error result and exits when validateAuthMethod fails', async () => {
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue(
'Missing API key for authentication',
);
const nonInteractiveConfig = createMockConfig({
it('emits error result and exits when no auth is configured', async () => {
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.JSON),
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.QWEN_OAUTH),
},
});
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
undefined,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -316,7 +302,9 @@ describe('validateNonInterActiveAuth', () => {
expect(emitResultMock).toHaveBeenCalledWith({
isError: true,
errorMessage: expect.stringContaining('Missing API key'),
errorMessage: expect.stringContaining(
'Please set an Auth method in your',
),
durationMs: 0,
apiDurationMs: 0,
numTurns: 0,
@@ -331,17 +319,17 @@ describe('validateNonInterActiveAuth', () => {
mockSettings.merged.security!.auth!.enforcedType = AuthType.QWEN_OAUTH;
process.env['OPENAI_API_KEY'] = 'fake-key';
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.JSON),
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.USE_OPENAI),
},
});
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
undefined,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -366,21 +354,21 @@ describe('validateNonInterActiveAuth', () => {
expect(consoleErrorSpy).not.toHaveBeenCalled();
});
it('emits error result and exits when API key validation fails', async () => {
it('emits error result and exits when validateAuthMethod fails', async () => {
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue('Auth error!');
process.env['OPENAI_API_KEY'] = 'fake-key';
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.JSON),
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.USE_OPENAI),
},
});
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
AuthType.USE_OPENAI,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -425,22 +413,19 @@ describe('validateNonInterActiveAuth', () => {
);
});
it('emits error result and exits when validateAuthMethod fails', async () => {
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue(
'Missing API key for authentication',
);
const nonInteractiveConfig = createMockConfig({
it('emits error result and exits when no auth is configured', async () => {
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.STREAM_JSON),
getIncludePartialMessages: vi.fn().mockReturnValue(false),
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.QWEN_OAUTH),
},
});
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
undefined,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -452,7 +437,9 @@ describe('validateNonInterActiveAuth', () => {
expect(emitResultMock).toHaveBeenCalledWith({
isError: true,
errorMessage: expect.stringContaining('Missing API key'),
errorMessage: expect.stringContaining(
'Please set an Auth method in your',
),
durationMs: 0,
apiDurationMs: 0,
numTurns: 0,
@@ -467,18 +454,18 @@ describe('validateNonInterActiveAuth', () => {
mockSettings.merged.security!.auth!.enforcedType = AuthType.QWEN_OAUTH;
process.env['OPENAI_API_KEY'] = 'fake-key';
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.STREAM_JSON),
getIncludePartialMessages: vi.fn().mockReturnValue(false),
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.USE_OPENAI),
},
});
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
undefined,
undefined,
nonInteractiveConfig,
mockSettings,
@@ -503,22 +490,22 @@ describe('validateNonInterActiveAuth', () => {
expect(consoleErrorSpy).not.toHaveBeenCalled();
});
it('emits error result and exits when API key validation fails', async () => {
it('emits error result and exits when validateAuthMethod fails', async () => {
vi.spyOn(auth, 'validateAuthMethod').mockReturnValue('Auth error!');
process.env['OPENAI_API_KEY'] = 'fake-key';
const nonInteractiveConfig = createMockConfig({
const nonInteractiveConfig = {
refreshAuth: refreshAuthMock,
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.STREAM_JSON),
getIncludePartialMessages: vi.fn().mockReturnValue(false),
modelsConfig: {
getModel: vi.fn().mockReturnValue('default-model'),
getCurrentAuthType: vi.fn().mockReturnValue(AuthType.USE_OPENAI),
},
});
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ authType: undefined }),
} as unknown as Config;
try {
await validateNonInteractiveAuth(
AuthType.USE_OPENAI,
undefined,
nonInteractiveConfig,
mockSettings,

View File

@@ -5,42 +5,69 @@
*/
import type { Config } from '@qwen-code/qwen-code-core';
import { OutputFormat } from '@qwen-code/qwen-code-core';
import { AuthType, OutputFormat } from '@qwen-code/qwen-code-core';
import { USER_SETTINGS_PATH } from './config/settings.js';
import { validateAuthMethod } from './config/auth.js';
import { type LoadedSettings } from './config/settings.js';
import { JsonOutputAdapter } from './nonInteractive/io/JsonOutputAdapter.js';
import { StreamJsonOutputAdapter } from './nonInteractive/io/StreamJsonOutputAdapter.js';
import { runExitCleanup } from './utils/cleanup.js';
function getAuthTypeFromEnv(): AuthType | undefined {
if (process.env['OPENAI_API_KEY']) {
return AuthType.USE_OPENAI;
}
if (process.env['QWEN_OAUTH']) {
return AuthType.QWEN_OAUTH;
}
if (process.env['GEMINI_API_KEY']) {
return AuthType.USE_GEMINI;
}
if (process.env['GOOGLE_API_KEY']) {
return AuthType.USE_VERTEX_AI;
}
if (process.env['ANTHROPIC_API_KEY']) {
return AuthType.USE_ANTHROPIC;
}
return undefined;
}
export async function validateNonInteractiveAuth(
configuredAuthType: AuthType | undefined,
useExternalAuth: boolean | undefined,
nonInteractiveConfig: Config,
settings: LoadedSettings,
): Promise<Config> {
try {
// Get the actual authType from config which has already resolved CLI args, env vars, and settings
const authType = nonInteractiveConfig.modelsConfig.getCurrentAuthType();
if (!authType) {
throw new Error(
'No auth type is selected. Please configure an auth type (e.g. via settings or `--auth-type`) before running in non-interactive mode.',
);
}
const resolvedAuthType: NonNullable<typeof authType> = authType;
const enforcedType = settings.merged.security?.auth?.enforcedType;
if (enforcedType && enforcedType !== resolvedAuthType) {
const message = `The configured auth type is ${enforcedType}, but the current auth type is ${resolvedAuthType}. Please re-authenticate with the correct type.`;
if (enforcedType) {
const currentAuthType = getAuthTypeFromEnv();
if (currentAuthType !== enforcedType) {
const message = `The configured auth type is ${enforcedType}, but the current auth type is ${currentAuthType}. Please re-authenticate with the correct type.`;
throw new Error(message);
}
}
const effectiveAuthType =
enforcedType || configuredAuthType || getAuthTypeFromEnv();
if (!effectiveAuthType) {
const message = `Please set an Auth method in your ${USER_SETTINGS_PATH} or specify one of the following environment variables before running: QWEN_OAUTH, OPENAI_API_KEY`;
throw new Error(message);
}
const authType: AuthType = effectiveAuthType as AuthType;
if (!useExternalAuth) {
const err = validateAuthMethod(resolvedAuthType, nonInteractiveConfig);
const err = validateAuthMethod(String(authType));
if (err != null) {
throw new Error(err);
}
}
await nonInteractiveConfig.refreshAuth(resolvedAuthType);
await nonInteractiveConfig.refreshAuth(authType);
return nonInteractiveConfig;
} catch (error) {
const outputFormat = nonInteractiveConfig.getOutputFormat();

View File

@@ -8,8 +8,12 @@ export * from './src/index.js';
export { Storage } from './src/config/storage.js';
export {
DEFAULT_QWEN_MODEL,
DEFAULT_QWEN_FLASH_MODEL,
DEFAULT_QWEN_EMBEDDING_MODEL,
DEFAULT_GEMINI_MODEL,
DEFAULT_GEMINI_MODEL_AUTO,
DEFAULT_GEMINI_FLASH_MODEL,
DEFAULT_GEMINI_FLASH_LITE_MODEL,
DEFAULT_GEMINI_EMBEDDING_MODEL,
} from './src/config/models.js';
export {
serializeTerminalToObject,

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.7.0",
"version": "0.6.0-preview.3",
"description": "Qwen Code Core",
"repository": {
"type": "git",
@@ -27,6 +27,7 @@
"@google/genai": "1.30.0",
"@modelcontextprotocol/sdk": "^1.25.1",
"@opentelemetry/api": "^1.9.0",
"async-mutex": "^0.5.0",
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
"@opentelemetry/exporter-logs-otlp-http": "^0.203.0",
"@opentelemetry/exporter-metrics-otlp-grpc": "^0.203.0",
@@ -39,9 +40,7 @@
"@xterm/headless": "5.5.0",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.0",
"async-mutex": "^0.5.0",
"chardet": "^2.1.0",
"chokidar": "^4.0.3",
"diff": "^7.0.0",
"dotenv": "^17.1.0",
"fast-levenshtein": "^2.0.6",

View File

@@ -15,16 +15,10 @@ import {
DEFAULT_OTLP_ENDPOINT,
QwenLogger,
} from '../telemetry/index.js';
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../core/contentGenerator.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
import {
AuthType,
createContentGenerator,
createContentGeneratorConfig,
resolveContentGeneratorConfigWithSources,
} from '../core/contentGenerator.js';
import { GeminiClient } from '../core/client.js';
import { GitService } from '../services/gitService.js';
@@ -214,19 +208,6 @@ describe('Server Config (config.ts)', () => {
vi.spyOn(QwenLogger.prototype, 'logStartSessionEvent').mockImplementation(
async () => undefined,
);
// Setup default mock for resolveContentGeneratorConfigWithSources
vi.mocked(resolveContentGeneratorConfigWithSources).mockImplementation(
(_config, authType, generationConfig) => ({
config: {
...generationConfig,
authType,
model: generationConfig?.model || MODEL,
apiKey: 'test-key',
} as ContentGeneratorConfig,
sources: {},
}),
);
});
describe('initialize', () => {
@@ -274,28 +255,31 @@ describe('Server Config (config.ts)', () => {
const mockContentConfig = {
apiKey: 'test-key',
model: 'qwen3-coder-plus',
authType,
};
vi.mocked(resolveContentGeneratorConfigWithSources).mockReturnValue({
config: mockContentConfig as ContentGeneratorConfig,
sources: {},
});
vi.mocked(createContentGeneratorConfig).mockReturnValue(
mockContentConfig,
);
// Set fallback mode to true to ensure it gets reset
config.setFallbackMode(true);
expect(config.isInFallbackMode()).toBe(true);
await config.refreshAuth(authType);
expect(resolveContentGeneratorConfigWithSources).toHaveBeenCalledWith(
expect(createContentGeneratorConfig).toHaveBeenCalledWith(
config,
authType,
expect.objectContaining({
{
model: MODEL,
}),
expect.anything(),
expect.anything(),
baseUrl: undefined,
},
);
// Verify that contentGeneratorConfig is updated
expect(config.getContentGeneratorConfig()).toEqual(mockContentConfig);
expect(GeminiClient).toHaveBeenCalledWith(config);
// Verify that fallback mode is reset
expect(config.isInFallbackMode()).toBe(false);
});
it('should not strip thoughts when switching from Vertex to GenAI', async () => {
@@ -316,129 +300,6 @@ describe('Server Config (config.ts)', () => {
});
});
describe('model switching optimization (QWEN_OAUTH)', () => {
it('should switch qwen-oauth model in-place without refreshing auth when safe', async () => {
const config = new Config(baseParams);
const mockContentConfig: ContentGeneratorConfig = {
authType: AuthType.QWEN_OAUTH,
model: 'coder-model',
apiKey: 'QWEN_OAUTH_DYNAMIC_TOKEN',
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
timeout: 60000,
maxRetries: 3,
} as ContentGeneratorConfig;
vi.mocked(resolveContentGeneratorConfigWithSources).mockImplementation(
(_config, authType, generationConfig) => ({
config: {
...mockContentConfig,
authType,
model: generationConfig?.model ?? mockContentConfig.model,
} as ContentGeneratorConfig,
sources: {},
}),
);
vi.mocked(createContentGenerator).mockResolvedValue({
generateContent: vi.fn(),
generateContentStream: vi.fn(),
countTokens: vi.fn(),
embedContent: vi.fn(),
} as unknown as ContentGenerator);
// Establish initial qwen-oauth content generator config/content generator.
await config.refreshAuth(AuthType.QWEN_OAUTH);
// Spy after initial refresh to ensure model switch does not re-trigger refreshAuth.
const refreshSpy = vi.spyOn(config, 'refreshAuth');
await config.switchModel(AuthType.QWEN_OAUTH, 'vision-model');
expect(config.getModel()).toBe('vision-model');
expect(refreshSpy).not.toHaveBeenCalled();
// Called once during initial refreshAuth + once during handleModelChange diffing.
expect(
vi.mocked(resolveContentGeneratorConfigWithSources),
).toHaveBeenCalledTimes(2);
expect(vi.mocked(createContentGenerator)).toHaveBeenCalledTimes(1);
});
});
describe('model switching with different credentials (OpenAI)', () => {
it('should refresh auth when switching to model with different envKey', async () => {
// This test verifies the fix for switching between modelProvider models
// with different envKeys (e.g., deepseek-chat with DEEPSEEK_API_KEY)
const configWithModelProviders = new Config({
...baseParams,
authType: AuthType.USE_OPENAI,
modelProvidersConfig: {
openai: [
{
id: 'model-a',
name: 'Model A',
baseUrl: 'https://api.example.com/v1',
envKey: 'API_KEY_A',
},
{
id: 'model-b',
name: 'Model B',
baseUrl: 'https://api.example.com/v1',
envKey: 'API_KEY_B',
},
],
},
});
const mockContentConfigA: ContentGeneratorConfig = {
authType: AuthType.USE_OPENAI,
model: 'model-a',
apiKey: 'key-a',
baseUrl: 'https://api.example.com/v1',
} as ContentGeneratorConfig;
const mockContentConfigB: ContentGeneratorConfig = {
authType: AuthType.USE_OPENAI,
model: 'model-b',
apiKey: 'key-b',
baseUrl: 'https://api.example.com/v1',
} as ContentGeneratorConfig;
vi.mocked(resolveContentGeneratorConfigWithSources).mockImplementation(
(_config, _authType, generationConfig) => {
const model = generationConfig?.model;
return {
config:
model === 'model-b' ? mockContentConfigB : mockContentConfigA,
sources: {},
};
},
);
vi.mocked(createContentGenerator).mockResolvedValue({
generateContent: vi.fn(),
generateContentStream: vi.fn(),
countTokens: vi.fn(),
embedContent: vi.fn(),
} as unknown as ContentGenerator);
// Initialize with model-a
await configWithModelProviders.refreshAuth(AuthType.USE_OPENAI);
// Spy on refreshAuth to verify it's called when switching to model-b
const refreshSpy = vi.spyOn(configWithModelProviders, 'refreshAuth');
// Switch to model-b (different envKey)
await configWithModelProviders.switchModel(
AuthType.USE_OPENAI,
'model-b',
);
// Should trigger full refresh because envKey changed
expect(refreshSpy).toHaveBeenCalledWith(AuthType.USE_OPENAI);
expect(configWithModelProviders.getModel()).toBe('model-b');
});
});
it('Config constructor should store userMemory correctly', () => {
const config = new Config(baseParams);

View File

@@ -16,8 +16,9 @@ import { ProxyAgent, setGlobalDispatcher } from 'undici';
import type {
ContentGenerator,
ContentGeneratorConfig,
AuthType,
} from '../core/contentGenerator.js';
import type { ContentGeneratorConfigSources } from '../core/contentGenerator.js';
import type { FallbackModelHandler } from '../fallback/types.js';
import type { MCPOAuthConfig } from '../mcp/oauth-provider.js';
import type { ShellExecutionConfig } from '../services/shellExecutionService.js';
import type { AnyToolInvocation } from '../tools/tools.js';
@@ -26,9 +27,8 @@ import type { AnyToolInvocation } from '../tools/tools.js';
import { BaseLlmClient } from '../core/baseLlmClient.js';
import { GeminiClient } from '../core/client.js';
import {
AuthType,
createContentGenerator,
resolveContentGeneratorConfigWithSources,
createContentGeneratorConfig,
} from '../core/contentGenerator.js';
import { tokenLimit } from '../core/tokenLimits.js';
@@ -94,7 +94,7 @@ import {
DEFAULT_FILE_FILTERING_OPTIONS,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
} from './constants.js';
import { DEFAULT_QWEN_EMBEDDING_MODEL } from './models.js';
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
import { Storage } from './storage.js';
import { ChatRecordingService } from '../services/chatRecordingService.js';
import {
@@ -103,12 +103,6 @@ import {
} from '../services/sessionService.js';
import { randomUUID } from 'node:crypto';
import {
ModelsConfig,
type ModelProvidersConfig,
type AvailableModel,
} from '../models/index.js';
// Re-export types
export type { AnyToolInvocation, FileFilteringOptions, MCPOAuthConfig };
export {
@@ -324,11 +318,6 @@ export interface ConfigParameters {
ideMode?: boolean;
authType?: AuthType;
generationConfig?: Partial<ContentGeneratorConfig>;
/**
* Optional source map for generationConfig fields (e.g. CLI/env/settings attribution).
* This is used to produce per-field source badges in the UI.
*/
generationConfigSources?: ContentGeneratorConfigSources;
cliVersion?: string;
loadMemoryFromIncludeDirectories?: boolean;
chatRecording?: boolean;
@@ -364,8 +353,6 @@ export interface ConfigParameters {
sdkMode?: boolean;
sessionSubagents?: SubagentConfig[];
channel?: string;
/** Model providers configuration grouped by authType */
modelProvidersConfig?: ModelProvidersConfig;
}
function normalizeConfigOutputFormat(
@@ -407,12 +394,9 @@ export class Config {
private skillManager!: SkillManager;
private fileSystemService: FileSystemService;
private contentGeneratorConfig!: ContentGeneratorConfig;
private contentGeneratorConfigSources: ContentGeneratorConfigSources = {};
private contentGenerator!: ContentGenerator;
private _generationConfig: Partial<ContentGeneratorConfig>;
private readonly embeddingModel: string;
private _modelsConfig!: ModelsConfig;
private readonly modelProvidersConfig?: ModelProvidersConfig;
private readonly sandbox: SandboxConfig | undefined;
private readonly targetDir: string;
private workspaceContext: WorkspaceContext;
@@ -461,6 +445,7 @@ export class Config {
private readonly folderTrust: boolean;
private ideMode: boolean;
private inFallbackMode = false;
private readonly maxSessionTurns: number;
private readonly sessionTokenLimit: number;
private readonly listExtensions: boolean;
@@ -469,6 +454,8 @@ export class Config {
name: string;
extensionName: string;
}>;
fallbackModelHandler?: FallbackModelHandler;
private quotaErrorOccurred: boolean = false;
private readonly summarizeToolOutput:
| Record<string, SummarizeToolOutputSettings>
| undefined;
@@ -583,7 +570,13 @@ export class Config {
this.folderTrustFeature = params.folderTrustFeature ?? false;
this.folderTrust = params.folderTrust ?? false;
this.ideMode = params.ideMode ?? false;
this.modelProvidersConfig = params.modelProvidersConfig;
this._generationConfig = {
model: params.model,
...(params.generationConfig || {}),
baseUrl: params.generationConfig?.baseUrl,
};
this.contentGeneratorConfig = this
._generationConfig as ContentGeneratorConfig;
this.cliVersion = params.cliVersion;
this.chatRecordingEnabled = params.chatRecording ?? true;
@@ -626,22 +619,6 @@ export class Config {
setGeminiMdFilename(params.contextFileName);
}
// Create ModelsConfig for centralized model management
// Prefer params.authType over generationConfig.authType because:
// - params.authType preserves undefined (user hasn't selected yet)
// - generationConfig.authType may have a default value from resolvers
this._modelsConfig = new ModelsConfig({
initialAuthType: params.authType ?? params.generationConfig?.authType,
modelProvidersConfig: this.modelProvidersConfig,
generationConfig: {
model: params.model,
...(params.generationConfig || {}),
baseUrl: params.generationConfig?.baseUrl,
},
generationConfigSources: params.generationConfigSources,
onModelChange: this.handleModelChange.bind(this),
});
if (this.telemetrySettings.enabled) {
initializeTelemetry(this);
}
@@ -673,7 +650,6 @@ export class Config {
this.promptRegistry = new PromptRegistry();
this.subagentManager = new SubagentManager(this);
this.skillManager = new SkillManager(this);
await this.skillManager.startWatching();
// Load session subagents if they were provided before initialization
if (this.sessionSubagents.length > 0) {
@@ -693,61 +669,45 @@ export class Config {
return this.contentGenerator;
}
/**
* Get the ModelsConfig instance for model-related operations.
* External code (e.g., CLI) can use this to access model configuration.
*/
get modelsConfig(): ModelsConfig {
return this._modelsConfig;
}
/**
* Updates the credentials in the generation config.
* Exclusive for `OpenAIKeyPrompt` to update credentials via `/auth`
* Delegates to ModelsConfig.
* This is needed when credentials are set after Config construction.
*/
updateCredentials(credentials: {
apiKey?: string;
baseUrl?: string;
model?: string;
}): void {
this._modelsConfig.updateCredentials(credentials);
if (credentials.apiKey) {
this._generationConfig.apiKey = credentials.apiKey;
}
if (credentials.baseUrl) {
this._generationConfig.baseUrl = credentials.baseUrl;
}
if (credentials.model) {
this._generationConfig.model = credentials.model;
}
}
/**
* Refresh authentication and rebuild ContentGenerator.
*/
async refreshAuth(authMethod: AuthType, isInitialAuth?: boolean) {
// Sync modelsConfig state for this auth refresh
const modelId = this._modelsConfig.getModel();
this._modelsConfig.syncAfterAuthRefresh(authMethod, modelId);
// Check and consume cached credentials flag
const requireCached =
this._modelsConfig.consumeRequireCachedCredentialsFlag();
const { config, sources } = resolveContentGeneratorConfigWithSources(
const newContentGeneratorConfig = createContentGeneratorConfig(
this,
authMethod,
this._modelsConfig.getGenerationConfig(),
this._modelsConfig.getGenerationConfigSources(),
{
strictModelProvider:
this._modelsConfig.isStrictModelProviderSelection(),
},
this._generationConfig,
);
const newContentGeneratorConfig = config;
this.contentGenerator = await createContentGenerator(
newContentGeneratorConfig,
this,
requireCached ? true : isInitialAuth,
isInitialAuth,
);
// Only assign to instance properties after successful initialization
this.contentGeneratorConfig = newContentGeneratorConfig;
this.contentGeneratorConfigSources = sources;
// Initialize BaseLlmClient now that the ContentGenerator is available
this.baseLlmClient = new BaseLlmClient(this.contentGenerator, this);
// Reset the session flag since we're explicitly changing auth and using default model
this.inFallbackMode = false;
}
/**
@@ -774,13 +734,6 @@ export class Config {
return this.sessionId;
}
/**
* Releases resources owned by the config instance.
*/
async shutdown(): Promise<void> {
this.skillManager?.stopWatching();
}
/**
* Starts a new session and resets session-scoped services.
*/
@@ -814,125 +767,31 @@ export class Config {
return this.contentGeneratorConfig;
}
getContentGeneratorConfigSources(): ContentGeneratorConfigSources {
// If contentGeneratorConfigSources is empty (before initializeAuth),
// get sources from ModelsConfig
if (
Object.keys(this.contentGeneratorConfigSources).length === 0 &&
this._modelsConfig
) {
return this._modelsConfig.getGenerationConfigSources();
}
return this.contentGeneratorConfigSources;
}
getModel(): string {
return this.contentGeneratorConfig?.model || this._modelsConfig.getModel();
return this.contentGeneratorConfig?.model || DEFAULT_QWEN_MODEL;
}
/**
* Set model programmatically (e.g., VLM auto-switch, fallback).
* Delegates to ModelsConfig.
*/
async setModel(
newModel: string,
metadata?: { reason?: string; context?: string },
_metadata?: { reason?: string; context?: string },
): Promise<void> {
await this._modelsConfig.setModel(newModel, metadata);
// Also update contentGeneratorConfig for hot-update compatibility
if (this.contentGeneratorConfig) {
this.contentGeneratorConfig.model = newModel;
}
// TODO: Log _metadata for telemetry if needed
// This _metadata can be used for tracking model switches (reason, context)
}
/**
* Handle model change from ModelsConfig.
* This updates the content generator config with the new model settings.
*/
private async handleModelChange(
authType: AuthType,
requiresRefresh: boolean,
): Promise<void> {
if (!this.contentGeneratorConfig) {
return;
}
// Hot update path: only supported for qwen-oauth.
// For other auth types we always refresh to recreate the ContentGenerator.
//
// Rationale:
// - Non-qwen providers may need to re-validate credentials / baseUrl / envKey.
// - ModelsConfig.applyResolvedModelDefaults can clear or change credentials sources.
// - Refresh keeps runtime behavior consistent and centralized.
if (authType === AuthType.QWEN_OAUTH && !requiresRefresh) {
const { config, sources } = resolveContentGeneratorConfigWithSources(
this,
authType,
this._modelsConfig.getGenerationConfig(),
this._modelsConfig.getGenerationConfigSources(),
{
strictModelProvider:
this._modelsConfig.isStrictModelProviderSelection(),
},
);
// Hot-update fields (qwen-oauth models share the same auth + client).
this.contentGeneratorConfig.model = config.model;
this.contentGeneratorConfig.samplingParams = config.samplingParams;
this.contentGeneratorConfig.disableCacheControl =
config.disableCacheControl;
if ('model' in sources) {
this.contentGeneratorConfigSources['model'] = sources['model'];
}
if ('samplingParams' in sources) {
this.contentGeneratorConfigSources['samplingParams'] =
sources['samplingParams'];
}
if ('disableCacheControl' in sources) {
this.contentGeneratorConfigSources['disableCacheControl'] =
sources['disableCacheControl'];
}
return;
}
// Full refresh path
await this.refreshAuth(authType);
isInFallbackMode(): boolean {
return this.inFallbackMode;
}
/**
* Get available models for the current authType.
* Delegates to ModelsConfig.
*/
getAvailableModels(): AvailableModel[] {
return this._modelsConfig.getAvailableModels();
setFallbackMode(active: boolean): void {
this.inFallbackMode = active;
}
/**
* Get available models for a specific authType.
* Delegates to ModelsConfig.
*/
getAvailableModelsForAuthType(authType: AuthType): AvailableModel[] {
return this._modelsConfig.getAvailableModelsForAuthType(authType);
}
/**
* Switch authType+model via registry-backed selection.
* This triggers a refresh of the ContentGenerator when required (always on authType changes).
* For qwen-oauth model switches that are hot-update safe, this may update in place.
*
* @param authType - Target authentication type
* @param modelId - Target model ID
* @param options - Additional options like requireCachedCredentials
* @param metadata - Metadata for logging/tracking
*/
async switchModel(
authType: AuthType,
modelId: string,
options?: { requireCachedCredentials?: boolean },
metadata?: { reason?: string; context?: string },
): Promise<void> {
await this._modelsConfig.switchModel(authType, modelId, options, metadata);
setFallbackModelHandler(handler: FallbackModelHandler): void {
this.fallbackModelHandler = handler;
}
getMaxSessionTurns(): number {
@@ -943,6 +802,14 @@ export class Config {
return this.sessionTokenLimit;
}
setQuotaErrorOccurred(value: boolean): void {
this.quotaErrorOccurred = value;
}
getQuotaErrorOccurred(): boolean {
return this.quotaErrorOccurred;
}
getEmbeddingModel(): string {
return this.embeddingModel;
}
@@ -1284,7 +1151,7 @@ export class Config {
}
getAuthType(): AuthType | undefined {
return this.contentGeneratorConfig?.authType;
return this.contentGeneratorConfig.authType;
}
getCliVersion(): string | undefined {

View File

@@ -0,0 +1,99 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { Config } from './config.js';
import { DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_FLASH_MODEL } from './models.js';
import fs from 'node:fs';
vi.mock('node:fs');
describe('Flash Model Fallback Configuration', () => {
let config: Config;
beforeEach(() => {
vi.mocked(fs.existsSync).mockReturnValue(true);
vi.mocked(fs.statSync).mockReturnValue({
isDirectory: () => true,
} as fs.Stats);
config = new Config({
targetDir: '/test',
debugMode: false,
cwd: '/test',
model: DEFAULT_GEMINI_MODEL,
});
// Initialize contentGeneratorConfig for testing
(
config as unknown as { contentGeneratorConfig: unknown }
).contentGeneratorConfig = {
model: DEFAULT_GEMINI_MODEL,
authType: 'gemini-api-key',
};
});
// These tests do not actually test fallback. isInFallbackMode() only returns true,
// when setFallbackMode is marked as true. This is to decouple setting a model
// with the fallback mechanism. This will be necessary we introduce more
// intelligent model routing.
describe('setModel', () => {
it('should only mark as switched if contentGeneratorConfig exists', async () => {
// Create config without initializing contentGeneratorConfig
const newConfig = new Config({
targetDir: '/test',
debugMode: false,
cwd: '/test',
model: DEFAULT_GEMINI_MODEL,
});
// Should not crash when contentGeneratorConfig is undefined
await newConfig.setModel(DEFAULT_GEMINI_FLASH_MODEL);
expect(newConfig.isInFallbackMode()).toBe(false);
});
});
describe('getModel', () => {
it('should return contentGeneratorConfig model if available', async () => {
// Simulate initialized content generator config
await config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
expect(config.getModel()).toBe(DEFAULT_GEMINI_FLASH_MODEL);
});
it('should fall back to initial model if contentGeneratorConfig is not available', () => {
// Test with fresh config where contentGeneratorConfig might not be set
const newConfig = new Config({
targetDir: '/test',
debugMode: false,
cwd: '/test',
model: 'custom-model',
});
expect(newConfig.getModel()).toBe('custom-model');
});
});
describe('isInFallbackMode', () => {
it('should start as false for new session', () => {
expect(config.isInFallbackMode()).toBe(false);
});
it('should remain false if no model switch occurs', () => {
// Perform other operations that don't involve model switching
expect(config.isInFallbackMode()).toBe(false);
});
it('should persist switched state throughout session', async () => {
await config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
// Setting state for fallback mode as is expected of clients
config.setFallbackMode(true);
expect(config.isInFallbackMode()).toBe(true);
// Should remain true even after getting model
config.getModel();
expect(config.isInFallbackMode()).toBe(true);
});
});
});

View File

@@ -0,0 +1,83 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import {
getEffectiveModel,
DEFAULT_GEMINI_MODEL,
DEFAULT_GEMINI_FLASH_MODEL,
DEFAULT_GEMINI_FLASH_LITE_MODEL,
} from './models.js';
describe('getEffectiveModel', () => {
describe('When NOT in fallback mode', () => {
const isInFallbackMode = false;
it('should return the Pro model when Pro is requested', () => {
const model = getEffectiveModel(isInFallbackMode, DEFAULT_GEMINI_MODEL);
expect(model).toBe(DEFAULT_GEMINI_MODEL);
});
it('should return the Flash model when Flash is requested', () => {
const model = getEffectiveModel(
isInFallbackMode,
DEFAULT_GEMINI_FLASH_MODEL,
);
expect(model).toBe(DEFAULT_GEMINI_FLASH_MODEL);
});
it('should return the Lite model when Lite is requested', () => {
const model = getEffectiveModel(
isInFallbackMode,
DEFAULT_GEMINI_FLASH_LITE_MODEL,
);
expect(model).toBe(DEFAULT_GEMINI_FLASH_LITE_MODEL);
});
it('should return a custom model name when requested', () => {
const customModel = 'custom-model-v1';
const model = getEffectiveModel(isInFallbackMode, customModel);
expect(model).toBe(customModel);
});
});
describe('When IN fallback mode', () => {
const isInFallbackMode = true;
it('should downgrade the Pro model to the Flash model', () => {
const model = getEffectiveModel(isInFallbackMode, DEFAULT_GEMINI_MODEL);
expect(model).toBe(DEFAULT_GEMINI_FLASH_MODEL);
});
it('should return the Flash model when Flash is requested', () => {
const model = getEffectiveModel(
isInFallbackMode,
DEFAULT_GEMINI_FLASH_MODEL,
);
expect(model).toBe(DEFAULT_GEMINI_FLASH_MODEL);
});
it('should HONOR the Lite model when Lite is requested', () => {
const model = getEffectiveModel(
isInFallbackMode,
DEFAULT_GEMINI_FLASH_LITE_MODEL,
);
expect(model).toBe(DEFAULT_GEMINI_FLASH_LITE_MODEL);
});
it('should HONOR any model with "lite" in its name', () => {
const customLiteModel = 'gemini-2.5-custom-lite-vNext';
const model = getEffectiveModel(isInFallbackMode, customLiteModel);
expect(model).toBe(customLiteModel);
});
it('should downgrade any other custom model to the Flash model', () => {
const customModel = 'custom-model-v1-unlisted';
const model = getEffectiveModel(isInFallbackMode, customModel);
expect(model).toBe(DEFAULT_GEMINI_FLASH_MODEL);
});
});
});

View File

@@ -7,3 +7,46 @@
export const DEFAULT_QWEN_MODEL = 'coder-model';
export const DEFAULT_QWEN_FLASH_MODEL = 'coder-model';
export const DEFAULT_QWEN_EMBEDDING_MODEL = 'text-embedding-v4';
export const DEFAULT_GEMINI_MODEL = 'coder-model';
export const DEFAULT_GEMINI_FLASH_MODEL = 'gemini-2.5-flash';
export const DEFAULT_GEMINI_FLASH_LITE_MODEL = 'gemini-2.5-flash-lite';
export const DEFAULT_GEMINI_MODEL_AUTO = 'auto';
export const DEFAULT_GEMINI_EMBEDDING_MODEL = 'gemini-embedding-001';
// Some thinking models do not default to dynamic thinking which is done by a value of -1
export const DEFAULT_THINKING_MODE = -1;
/**
* Determines the effective model to use, applying fallback logic if necessary.
*
* When fallback mode is active, this function enforces the use of the standard
* fallback model. However, it makes an exception for "lite" models (any model
* with "lite" in its name), allowing them to be used to preserve cost savings.
* This ensures that "pro" models are always downgraded, while "lite" model
* requests are honored.
*
* @param isInFallbackMode Whether the application is in fallback mode.
* @param requestedModel The model that was originally requested.
* @returns The effective model name.
*/
export function getEffectiveModel(
isInFallbackMode: boolean,
requestedModel: string,
): string {
// If we are not in fallback mode, simply use the requested model.
if (!isInFallbackMode) {
return requestedModel;
}
// If a "lite" model is requested, honor it. This allows for variations of
// lite models without needing to list them all as constants.
if (requestedModel.includes('lite')) {
return requestedModel;
}
// Default fallback for Gemini CLI.
return DEFAULT_GEMINI_FLASH_MODEL;
}

View File

@@ -32,7 +32,7 @@ import {
type ChatCompressionInfo,
} from './turn.js';
import { getCoreSystemPrompt } from './prompts.js';
import { DEFAULT_QWEN_FLASH_MODEL } from '../config/models.js';
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
import { setSimulate429 } from '../utils/testUtils.js';
import { tokenLimit } from './tokenLimits.js';
@@ -302,6 +302,8 @@ describe('Gemini Client (client.ts)', () => {
getFileService: vi.fn().mockReturnValue(fileService),
getMaxSessionTurns: vi.fn().mockReturnValue(0),
getSessionTokenLimit: vi.fn().mockReturnValue(32000),
getQuotaErrorOccurred: vi.fn().mockReturnValue(false),
setQuotaErrorOccurred: vi.fn(),
getNoBrowser: vi.fn().mockReturnValue(false),
getUsageStatisticsEnabled: vi.fn().mockReturnValue(true),
getApprovalMode: vi.fn().mockReturnValue(ApprovalMode.DEFAULT),
@@ -315,6 +317,8 @@ describe('Gemini Client (client.ts)', () => {
getModelRouterService: vi.fn().mockReturnValue({
route: vi.fn().mockResolvedValue({ model: 'default-routed-model' }),
}),
isInFallbackMode: vi.fn().mockReturnValue(false),
setFallbackMode: vi.fn(),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
getChatCompression: vi.fn().mockReturnValue(undefined),
getSkipNextSpeakerCheck: vi.fn().mockReturnValue(false),
@@ -1058,18 +1062,26 @@ describe('Gemini Client (client.ts)', () => {
// Assert
expect(ideContextStore.get).toHaveBeenCalled();
const expectedContext = `Here is the user's editor context. This is for your information only.
Active file:
Path: /path/to/active/file.ts
Cursor: line 5, character 10
Selected text:
const expectedContext = `
Here is the user's editor context as a JSON object. This is for your information only.
\`\`\`json
${JSON.stringify(
{
activeFile: {
path: '/path/to/active/file.ts',
cursor: {
line: 5,
character: 10,
},
selectedText: 'hello',
},
otherOpenFiles: ['/path/to/recent/file1.ts', '/path/to/recent/file2.ts'],
},
null,
2,
)}
\`\`\`
hello
\`\`\`
Other open files:
- /path/to/recent/file1.ts
- /path/to/recent/file2.ts`;
`.trim();
const expectedRequest = [{ text: expectedContext }];
expect(mockChat.addHistory).toHaveBeenCalledWith({
role: 'user',
@@ -1169,14 +1181,25 @@ Other open files:
// Assert
expect(ideContextStore.get).toHaveBeenCalled();
const expectedContext = `Here is the user's editor context. This is for your information only.
Active file:
Path: /path/to/active/file.ts
Cursor: line 5, character 10
Selected text:
const expectedContext = `
Here is the user's editor context as a JSON object. This is for your information only.
\`\`\`json
${JSON.stringify(
{
activeFile: {
path: '/path/to/active/file.ts',
cursor: {
line: 5,
character: 10,
},
selectedText: 'hello',
},
},
null,
2,
)}
\`\`\`
hello
\`\`\``;
`.trim();
const expectedRequest = [{ text: expectedContext }];
expect(mockChat.addHistory).toHaveBeenCalledWith({
role: 'user',
@@ -1235,10 +1258,18 @@ hello
// Assert
expect(ideContextStore.get).toHaveBeenCalled();
const expectedContext = `Here is the user's editor context. This is for your information only.
Other open files:
- /path/to/recent/file1.ts
- /path/to/recent/file2.ts`;
const expectedContext = `
Here is the user's editor context as a JSON object. This is for your information only.
\`\`\`json
${JSON.stringify(
{
otherOpenFiles: ['/path/to/recent/file1.ts', '/path/to/recent/file2.ts'],
},
null,
2,
)}
\`\`\`
`.trim();
const expectedRequest = [{ text: expectedContext }];
expect(mockChat.addHistory).toHaveBeenCalledWith({
role: 'user',
@@ -1755,9 +1786,11 @@ Other open files:
// Also verify it's the full context, not a delta.
const call = mockChat.addHistory.mock.calls[0][0];
const contextText = call.parts[0].text;
// Verify it contains the active file information in plain text format
expect(contextText).toContain('Active file:');
expect(contextText).toContain('Path: /path/to/active/file.ts');
const contextJson = JSON.parse(
contextText.match(/```json\n(.*)\n```/s)![1],
);
expect(contextJson).toHaveProperty('activeFile');
expect(contextJson.activeFile.path).toBe('/path/to/active/file.ts');
});
});
@@ -1960,7 +1993,7 @@ Other open files:
);
expect(contextCall).toBeDefined();
expect(JSON.stringify(contextCall![0])).toContain(
"Here is the user's editor context.",
"Here is the user's editor context as a JSON object",
);
// Check that the sent context is the new one (fileB.ts)
expect(JSON.stringify(contextCall![0])).toContain('fileB.ts');
@@ -1996,7 +2029,9 @@ Other open files:
// Assert: Full context for fileA.ts was sent and stored.
const initialCall = vi.mocked(mockChat.addHistory!).mock.calls[0][0];
expect(JSON.stringify(initialCall)).toContain("user's editor context.");
expect(JSON.stringify(initialCall)).toContain(
"user's editor context as a JSON object",
);
expect(JSON.stringify(initialCall)).toContain('fileA.ts');
// This implicitly tests that `lastSentIdeContext` is now set internally by the client.
vi.mocked(mockChat.addHistory!).mockClear();
@@ -2094,9 +2129,9 @@ Other open files:
const finalCall = vi.mocked(mockChat.addHistory!).mock.calls[0][0];
expect(JSON.stringify(finalCall)).toContain('summary of changes');
// The delta should reflect fileA being closed and fileC being opened.
expect(JSON.stringify(finalCall)).toContain('Files closed');
expect(JSON.stringify(finalCall)).toContain('filesClosed');
expect(JSON.stringify(finalCall)).toContain('fileA.ts');
expect(JSON.stringify(finalCall)).toContain('Active file changed');
expect(JSON.stringify(finalCall)).toContain('activeFileChanged');
expect(JSON.stringify(finalCall)).toContain('fileC.ts');
});
});
@@ -2227,12 +2262,12 @@ Other open files:
contents,
generationConfig,
abortSignal,
DEFAULT_QWEN_FLASH_MODEL,
DEFAULT_GEMINI_FLASH_MODEL,
);
expect(mockContentGenerator.generateContent).toHaveBeenCalledWith(
expect.objectContaining({
model: DEFAULT_QWEN_FLASH_MODEL,
model: DEFAULT_GEMINI_FLASH_MODEL,
config: expect.objectContaining({
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
@@ -2255,7 +2290,7 @@ Other open files:
contents,
{},
new AbortController().signal,
DEFAULT_QWEN_FLASH_MODEL,
DEFAULT_GEMINI_FLASH_MODEL,
);
expect(mockContentGenerator.generateContent).not.toHaveBeenCalledWith({
@@ -2265,7 +2300,7 @@ Other open files:
});
expect(mockContentGenerator.generateContent).toHaveBeenCalledWith(
{
model: DEFAULT_QWEN_FLASH_MODEL,
model: DEFAULT_GEMINI_FLASH_MODEL,
config: expect.any(Object),
contents,
},
@@ -2273,7 +2308,28 @@ Other open files:
);
});
// Note: there is currently no "fallback mode" model routing; the model used
// is always the one explicitly requested by the caller.
it('should use the Flash model when fallback mode is active', async () => {
const contents = [{ role: 'user', parts: [{ text: 'hello' }] }];
const generationConfig = { temperature: 0.5 };
const abortSignal = new AbortController().signal;
const requestedModel = 'gemini-2.5-pro'; // A non-flash model
// Mock config to be in fallback mode
vi.spyOn(client['config'], 'isInFallbackMode').mockReturnValue(true);
await client.generateContent(
contents,
generationConfig,
abortSignal,
requestedModel,
);
expect(mockGenerateContentFn).toHaveBeenCalledWith(
expect.objectContaining({
model: DEFAULT_GEMINI_FLASH_MODEL,
}),
'test-session-id',
);
});
});
});

View File

@@ -15,6 +15,7 @@ import type {
// Config
import { ApprovalMode, type Config } from '../config/config.js';
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
// Core modules
import type { ContentGenerator } from './contentGenerator.js';
@@ -218,48 +219,42 @@ export class GeminiClient {
}
if (forceFullContext || !this.lastSentIdeContext) {
// Send full context as plain text
// Send full context as JSON
const openFiles = currentIdeContext.workspaceState?.openFiles || [];
const activeFile = openFiles.find((f) => f.isActive);
const otherOpenFiles = openFiles
.filter((f) => !f.isActive)
.map((f) => f.path);
const contextLines: string[] = [];
const contextData: Record<string, unknown> = {};
if (activeFile) {
contextLines.push('Active file:');
contextLines.push(` Path: ${activeFile.path}`);
if (activeFile.cursor) {
contextLines.push(
` Cursor: line ${activeFile.cursor.line}, character ${activeFile.cursor.character}`,
);
}
if (activeFile.selectedText) {
contextLines.push(' Selected text:');
contextLines.push('```');
contextLines.push(activeFile.selectedText);
contextLines.push('```');
}
contextData['activeFile'] = {
path: activeFile.path,
cursor: activeFile.cursor
? {
line: activeFile.cursor.line,
character: activeFile.cursor.character,
}
: undefined,
selectedText: activeFile.selectedText || undefined,
};
}
if (otherOpenFiles.length > 0) {
if (contextLines.length > 0) {
contextLines.push('');
}
contextLines.push('Other open files:');
for (const filePath of otherOpenFiles) {
contextLines.push(` - ${filePath}`);
}
contextData['otherOpenFiles'] = otherOpenFiles;
}
if (contextLines.length === 0) {
if (Object.keys(contextData).length === 0) {
return { contextParts: [], newIdeContext: currentIdeContext };
}
const jsonString = JSON.stringify(contextData, null, 2);
const contextParts = [
"Here is the user's editor context. This is for your information only.",
contextLines.join('\n'),
"Here is the user's editor context as a JSON object. This is for your information only.",
'```json',
jsonString,
'```',
];
if (this.config.getDebugMode()) {
@@ -270,8 +265,9 @@ export class GeminiClient {
newIdeContext: currentIdeContext,
};
} else {
// Calculate and send delta as plain text
const changeLines: string[] = [];
// Calculate and send delta as JSON
const delta: Record<string, unknown> = {};
const changes: Record<string, unknown> = {};
const lastFiles = new Map(
(this.lastSentIdeContext.workspaceState?.openFiles || []).map(
@@ -292,10 +288,7 @@ export class GeminiClient {
}
}
if (openedFiles.length > 0) {
changeLines.push('Files opened:');
for (const filePath of openedFiles) {
changeLines.push(` - ${filePath}`);
}
changes['filesOpened'] = openedFiles;
}
const closedFiles: string[] = [];
@@ -305,13 +298,7 @@ export class GeminiClient {
}
}
if (closedFiles.length > 0) {
if (changeLines.length > 0) {
changeLines.push('');
}
changeLines.push('Files closed:');
for (const filePath of closedFiles) {
changeLines.push(` - ${filePath}`);
}
changes['filesClosed'] = closedFiles;
}
const lastActiveFile = (
@@ -323,22 +310,16 @@ export class GeminiClient {
if (currentActiveFile) {
if (!lastActiveFile || lastActiveFile.path !== currentActiveFile.path) {
if (changeLines.length > 0) {
changeLines.push('');
}
changeLines.push('Active file changed:');
changeLines.push(` Path: ${currentActiveFile.path}`);
if (currentActiveFile.cursor) {
changeLines.push(
` Cursor: line ${currentActiveFile.cursor.line}, character ${currentActiveFile.cursor.character}`,
);
}
if (currentActiveFile.selectedText) {
changeLines.push(' Selected text:');
changeLines.push('```');
changeLines.push(currentActiveFile.selectedText);
changeLines.push('```');
}
changes['activeFileChanged'] = {
path: currentActiveFile.path,
cursor: currentActiveFile.cursor
? {
line: currentActiveFile.cursor.line,
character: currentActiveFile.cursor.character,
}
: undefined,
selectedText: currentActiveFile.selectedText || undefined,
};
} else {
const lastCursor = lastActiveFile.cursor;
const currentCursor = currentActiveFile.cursor;
@@ -348,50 +329,42 @@ export class GeminiClient {
lastCursor.line !== currentCursor.line ||
lastCursor.character !== currentCursor.character)
) {
if (changeLines.length > 0) {
changeLines.push('');
}
changeLines.push('Cursor moved:');
changeLines.push(` Path: ${currentActiveFile.path}`);
changeLines.push(
` New position: line ${currentCursor.line}, character ${currentCursor.character}`,
);
changes['cursorMoved'] = {
path: currentActiveFile.path,
cursor: {
line: currentCursor.line,
character: currentCursor.character,
},
};
}
const lastSelectedText = lastActiveFile.selectedText || '';
const currentSelectedText = currentActiveFile.selectedText || '';
if (lastSelectedText !== currentSelectedText) {
if (changeLines.length > 0) {
changeLines.push('');
}
changeLines.push('Selection changed:');
changeLines.push(` Path: ${currentActiveFile.path}`);
if (currentSelectedText) {
changeLines.push(' Selected text:');
changeLines.push('```');
changeLines.push(currentSelectedText);
changeLines.push('```');
} else {
changeLines.push(' Selected text: (none)');
}
changes['selectionChanged'] = {
path: currentActiveFile.path,
selectedText: currentSelectedText,
};
}
}
} else if (lastActiveFile) {
if (changeLines.length > 0) {
changeLines.push('');
}
changeLines.push('Active file changed:');
changeLines.push(' No active file');
changeLines.push(` Previous path: ${lastActiveFile.path}`);
changes['activeFileChanged'] = {
path: null,
previousPath: lastActiveFile.path,
};
}
if (changeLines.length === 0) {
if (Object.keys(changes).length === 0) {
return { contextParts: [], newIdeContext: currentIdeContext };
}
delta['changes'] = changes;
const jsonString = JSON.stringify(delta, null, 2);
const contextParts = [
"Here is a summary of changes in the user's editor context. This is for your information only.",
changeLines.join('\n'),
"Here is a summary of changes in the user's editor context, in JSON format. This is for your information only.",
'```json',
jsonString,
'```',
];
if (this.config.getDebugMode()) {
@@ -569,6 +542,11 @@ export class GeminiClient {
}
}
if (!turn.pendingToolCalls.length && signal && !signal.aborted) {
// Check if next speaker check is needed
if (this.config.getQuotaErrorOccurred()) {
return turn;
}
if (this.config.getSkipNextSpeakerCheck()) {
return turn;
}
@@ -624,11 +602,14 @@ export class GeminiClient {
};
const apiCall = () => {
currentAttemptModel = model;
const modelToUse = this.config.isInFallbackMode()
? DEFAULT_GEMINI_FLASH_MODEL
: model;
currentAttemptModel = modelToUse;
return this.getContentGeneratorOrFail().generateContent(
{
model,
model: modelToUse,
config: requestConfig,
contents,
},

View File

@@ -5,11 +5,7 @@
*/
import { describe, it, expect, vi } from 'vitest';
import {
createContentGenerator,
createContentGeneratorConfig,
AuthType,
} from './contentGenerator.js';
import { createContentGenerator, AuthType } from './contentGenerator.js';
import { GoogleGenAI } from '@google/genai';
import type { Config } from '../config/config.js';
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
@@ -82,32 +78,3 @@ describe('createContentGenerator', () => {
expect(generator).toBeInstanceOf(LoggingContentGenerator);
});
});
describe('createContentGeneratorConfig', () => {
const mockConfig = {
getProxy: () => undefined,
} as unknown as Config;
it('should preserve provided fields and set authType for QWEN_OAUTH', () => {
const cfg = createContentGeneratorConfig(mockConfig, AuthType.QWEN_OAUTH, {
model: 'vision-model',
apiKey: 'QWEN_OAUTH_DYNAMIC_TOKEN',
});
expect(cfg.authType).toBe(AuthType.QWEN_OAUTH);
expect(cfg.model).toBe('vision-model');
expect(cfg.apiKey).toBe('QWEN_OAUTH_DYNAMIC_TOKEN');
});
it('should not warn or fallback for QWEN_OAUTH (resolution handled by ModelConfigResolver)', () => {
const warnSpy = vi
.spyOn(console, 'warn')
.mockImplementation(() => undefined);
const cfg = createContentGeneratorConfig(mockConfig, AuthType.QWEN_OAUTH, {
model: 'some-random-model',
});
expect(cfg.model).toBe('some-random-model');
expect(cfg.apiKey).toBeUndefined();
expect(warnSpy).not.toHaveBeenCalled();
warnSpy.mockRestore();
});
});

View File

@@ -12,24 +12,9 @@ import type {
GenerateContentParameters,
GenerateContentResponse,
} from '@google/genai';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
import type { Config } from '../config/config.js';
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
import type {
ConfigSource,
ConfigSourceKind,
ConfigSources,
} from '../utils/configResolver.js';
import {
getDefaultApiKeyEnvVar,
getDefaultModelEnvVar,
MissingAnthropicBaseUrlEnvError,
MissingApiKeyError,
MissingBaseUrlError,
MissingModelError,
StrictMissingCredentialsError,
StrictMissingModelIdError,
} from '../models/modelConfigErrors.js';
import { PROVIDER_SOURCED_FIELDS } from '../models/modelsConfig.js';
/**
* Interface abstracting the core functionalities for generating content and counting tokens.
@@ -63,7 +48,6 @@ export enum AuthType {
export type ContentGeneratorConfig = {
model: string;
apiKey?: string;
apiKeyEnvKey?: string;
baseUrl?: string;
vertexai?: boolean;
authType?: AuthType | undefined;
@@ -93,178 +77,102 @@ export type ContentGeneratorConfig = {
schemaCompliance?: 'auto' | 'openapi_30';
};
// Keep the public ContentGeneratorConfigSources API, but reuse the generic
// source-tracking types from utils/configResolver to avoid duplication.
export type ContentGeneratorConfigSourceKind = ConfigSourceKind;
export type ContentGeneratorConfigSource = ConfigSource;
export type ContentGeneratorConfigSources = ConfigSources;
export type ResolvedContentGeneratorConfig = {
config: ContentGeneratorConfig;
sources: ContentGeneratorConfigSources;
};
function setSource(
sources: ContentGeneratorConfigSources,
path: string,
source: ContentGeneratorConfigSource,
): void {
sources[path] = source;
}
function getSeedSource(
seed: ContentGeneratorConfigSources | undefined,
path: string,
): ContentGeneratorConfigSource | undefined {
return seed?.[path];
}
/**
* Resolve ContentGeneratorConfig while tracking the source of each effective field.
*
* This function now primarily validates and finalizes the configuration that has
* already been resolved by ModelConfigResolver. The env fallback logic has been
* moved to the unified resolver to eliminate duplication.
*
* Note: The generationConfig passed here should already be fully resolved with
* proper source tracking from the caller (CLI/SDK layer).
*/
export function resolveContentGeneratorConfigWithSources(
config: Config,
authType: AuthType | undefined,
generationConfig?: Partial<ContentGeneratorConfig>,
seedSources?: ContentGeneratorConfigSources,
options?: { strictModelProvider?: boolean },
): ResolvedContentGeneratorConfig {
const sources: ContentGeneratorConfigSources = { ...(seedSources || {}) };
const strictModelProvider = options?.strictModelProvider === true;
// Build config with computed fields
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
...(generationConfig || {}),
authType,
proxy: config?.getProxy(),
};
// Set sources for computed fields
setSource(sources, 'authType', {
kind: 'computed',
detail: 'provided by caller',
});
if (config?.getProxy()) {
setSource(sources, 'proxy', {
kind: 'computed',
detail: 'Config.getProxy()',
});
}
// Preserve seed sources for fields that were passed in
const seedOrUnknown = (path: string): ContentGeneratorConfigSource =>
getSeedSource(seedSources, path) ?? { kind: 'unknown' };
for (const field of PROVIDER_SOURCED_FIELDS) {
if (generationConfig && field in generationConfig && !sources[field]) {
setSource(sources, field, seedOrUnknown(field));
}
}
// Validate required fields based on authType. This does not perform any
// fallback resolution (resolution is handled by ModelConfigResolver).
const validation = validateModelConfig(
newContentGeneratorConfig as ContentGeneratorConfig,
strictModelProvider,
);
if (!validation.valid) {
throw new Error(validation.errors.map((e) => e.message).join('\n'));
}
return {
config: newContentGeneratorConfig as ContentGeneratorConfig,
sources,
};
}
export interface ModelConfigValidationResult {
valid: boolean;
errors: Error[];
}
/**
* Validate a resolved model configuration.
* This is the single validation entry point used across Core.
*/
export function validateModelConfig(
config: ContentGeneratorConfig,
isStrictModelProvider: boolean = false,
): ModelConfigValidationResult {
const errors: Error[] = [];
// Qwen OAuth doesn't need validation - it uses dynamic tokens
if (config.authType === AuthType.QWEN_OAUTH) {
return { valid: true, errors: [] };
}
// API key is required for all other auth types
if (!config.apiKey) {
if (isStrictModelProvider) {
errors.push(
new StrictMissingCredentialsError(
config.authType,
config.model,
config.apiKeyEnvKey,
),
);
} else {
const envKey =
config.apiKeyEnvKey || getDefaultApiKeyEnvVar(config.authType);
errors.push(
new MissingApiKeyError({
authType: config.authType,
model: config.model,
baseUrl: config.baseUrl,
envKey,
}),
);
}
}
// Model is required
if (!config.model) {
if (isStrictModelProvider) {
errors.push(new StrictMissingModelIdError(config.authType));
} else {
const envKey = getDefaultModelEnvVar(config.authType);
errors.push(new MissingModelError({ authType: config.authType, envKey }));
}
}
// Explicit baseUrl is required for Anthropic; Migrated from existing code.
if (config.authType === AuthType.USE_ANTHROPIC && !config.baseUrl) {
if (isStrictModelProvider) {
errors.push(
new MissingBaseUrlError({
authType: config.authType,
model: config.model,
}),
);
} else if (config.authType === AuthType.USE_ANTHROPIC) {
errors.push(new MissingAnthropicBaseUrlEnvError());
}
}
return { valid: errors.length === 0, errors };
}
export function createContentGeneratorConfig(
config: Config,
authType: AuthType | undefined,
generationConfig?: Partial<ContentGeneratorConfig>,
): ContentGeneratorConfig {
return resolveContentGeneratorConfigWithSources(
config,
let newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
...(generationConfig || {}),
authType,
generationConfig,
).config;
proxy: config?.getProxy(),
};
if (authType === AuthType.QWEN_OAUTH) {
// For Qwen OAuth, we'll handle the API key dynamically in createContentGenerator
// Set a special marker to indicate this is Qwen OAuth
return {
...newContentGeneratorConfig,
model: DEFAULT_QWEN_MODEL,
apiKey: 'QWEN_OAUTH_DYNAMIC_TOKEN',
} as ContentGeneratorConfig;
}
if (authType === AuthType.USE_OPENAI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['OPENAI_API_KEY'],
baseUrl:
newContentGeneratorConfig.baseUrl || process.env['OPENAI_BASE_URL'],
model: newContentGeneratorConfig.model || process.env['OPENAI_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('OPENAI_API_KEY environment variable not found.');
}
return {
...newContentGeneratorConfig,
model: newContentGeneratorConfig?.model || 'qwen3-coder-plus',
} as ContentGeneratorConfig;
}
if (authType === AuthType.USE_ANTHROPIC) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey:
newContentGeneratorConfig.apiKey || process.env['ANTHROPIC_API_KEY'],
baseUrl:
newContentGeneratorConfig.baseUrl || process.env['ANTHROPIC_BASE_URL'],
model: newContentGeneratorConfig.model || process.env['ANTHROPIC_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.baseUrl) {
throw new Error('ANTHROPIC_BASE_URL environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('ANTHROPIC_MODEL environment variable not found.');
}
}
if (authType === AuthType.USE_GEMINI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['GEMINI_API_KEY'],
model: newContentGeneratorConfig.model || process.env['GEMINI_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('GEMINI_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('GEMINI_MODEL environment variable not found.');
}
}
if (authType === AuthType.USE_VERTEX_AI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['GOOGLE_API_KEY'],
model: newContentGeneratorConfig.model || process.env['GOOGLE_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('GOOGLE_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('GOOGLE_MODEL environment variable not found.');
}
}
return newContentGeneratorConfig as ContentGeneratorConfig;
}
export async function createContentGenerator(
@@ -272,12 +180,11 @@ export async function createContentGenerator(
gcConfig: Config,
isInitialAuth?: boolean,
): Promise<ContentGenerator> {
const validation = validateModelConfig(config, false);
if (!validation.valid) {
throw new Error(validation.errors.map((e) => e.message).join('\n'));
}
if (config.authType === AuthType.USE_OPENAI) {
if (!config.apiKey) {
throw new Error('OPENAI_API_KEY environment variable not found.');
}
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
const { createOpenAIContentGenerator } = await import(
'./openaiContentGenerator/index.js'
@@ -316,6 +223,10 @@ export async function createContentGenerator(
}
if (config.authType === AuthType.USE_ANTHROPIC) {
if (!config.apiKey) {
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
}
const { createAnthropicContentGenerator } = await import(
'./anthropicContentGenerator/index.js'
);

View File

@@ -240,7 +240,7 @@ describe('CoreToolScheduler', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -318,7 +318,7 @@ describe('CoreToolScheduler', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -497,7 +497,7 @@ describe('CoreToolScheduler', () => {
getExcludeTools: () => ['write_file', 'edit', 'run_shell_command'],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -584,7 +584,7 @@ describe('CoreToolScheduler', () => {
getExcludeTools: () => ['write_file', 'edit'], // Different excluded tools
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -674,7 +674,7 @@ describe('CoreToolScheduler with payload', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -1001,7 +1001,7 @@ describe('CoreToolScheduler edit cancellation', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -1108,7 +1108,7 @@ describe('CoreToolScheduler YOLO mode', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -1258,7 +1258,7 @@ describe('CoreToolScheduler cancellation during executing with live output', ()
getApprovalMode: () => ApprovalMode.DEFAULT,
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getToolRegistry: () => mockToolRegistry,
getShellExecutionConfig: () => ({
@@ -1350,7 +1350,7 @@ describe('CoreToolScheduler request queueing', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -1482,7 +1482,7 @@ describe('CoreToolScheduler request queueing', () => {
getToolRegistry: () => toolRegistry,
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 80,
@@ -1586,7 +1586,7 @@ describe('CoreToolScheduler request queueing', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -1854,7 +1854,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,
@@ -1975,7 +1975,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
getAllowedTools: () => [],
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,

View File

@@ -824,6 +824,7 @@ export class CoreToolScheduler {
*/
const shouldAutoDeny =
!this.config.isInteractive() &&
!this.config.getIdeMode() &&
!this.config.getExperimentalZedIntegration() &&
this.config.getInputFormat() !== InputFormat.STREAM_JSON;

View File

@@ -20,6 +20,7 @@ import {
} from './geminiChat.js';
import type { Config } from '../config/config.js';
import { setSimulate429 } from '../utils/testUtils.js';
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
import { AuthType } from './contentGenerator.js';
import { type RetryOptions } from '../utils/retry.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
@@ -111,11 +112,15 @@ describe('GeminiChat', () => {
getUsageStatisticsEnabled: () => true,
getDebugMode: () => false,
getContentGeneratorConfig: vi.fn().mockReturnValue({
authType: 'gemini', // Ensure this is set for fallback tests
authType: 'gemini-api-key', // Ensure this is set for fallback tests
model: 'test-model',
}),
getModel: vi.fn().mockReturnValue('gemini-pro'),
setModel: vi.fn(),
isInFallbackMode: vi.fn().mockReturnValue(false),
getQuotaErrorOccurred: vi.fn().mockReturnValue(false),
setQuotaErrorOccurred: vi.fn(),
flashFallbackHandler: undefined,
getProjectRoot: vi.fn().mockReturnValue('/test/project/root'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
storage: {
@@ -1344,8 +1349,9 @@ describe('GeminiChat', () => {
],
} as unknown as GenerateContentResponse;
it('should pass the requested model through to generateContentStream', async () => {
it('should use the FLASH model when in fallback mode (sendMessageStream)', async () => {
vi.mocked(mockConfig.getModel).mockReturnValue('gemini-pro');
vi.mocked(mockConfig.isInFallbackMode).mockReturnValue(true);
vi.mocked(mockContentGenerator.generateContentStream).mockImplementation(
async () =>
(async function* () {
@@ -1364,7 +1370,7 @@ describe('GeminiChat', () => {
expect(mockContentGenerator.generateContentStream).toHaveBeenCalledWith(
expect.objectContaining({
model: 'test-model',
model: DEFAULT_GEMINI_FLASH_MODEL,
}),
'prompt-id-res3',
);
@@ -1416,6 +1422,9 @@ describe('GeminiChat', () => {
authType,
});
const isInFallbackModeSpy = vi.spyOn(mockConfig, 'isInFallbackMode');
isInFallbackModeSpy.mockReturnValue(false);
vi.mocked(mockContentGenerator.generateContentStream)
.mockRejectedValueOnce(error429) // Attempt 1 fails
.mockResolvedValueOnce(
@@ -1432,7 +1441,10 @@ describe('GeminiChat', () => {
})(),
);
mockHandleFallback.mockImplementation(async () => true);
mockHandleFallback.mockImplementation(async () => {
isInFallbackModeSpy.mockReturnValue(true);
return true; // Signal retry
});
const stream = await chat.sendMessageStream(
'test-model',

View File

@@ -19,6 +19,10 @@ import type {
import { ApiError, createUserContent } from '@google/genai';
import { retryWithBackoff } from '../utils/retry.js';
import type { Config } from '../config/config.js';
import {
DEFAULT_GEMINI_FLASH_MODEL,
getEffectiveModel,
} from '../config/models.js';
import { hasCycleInSchema } from '../tools/tools.js';
import type { StructuredError } from './turn.js';
import {
@@ -348,15 +352,31 @@ export class GeminiChat {
params: SendMessageParameters,
prompt_id: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const apiCall = () =>
this.config.getContentGenerator().generateContentStream(
const apiCall = () => {
const modelToUse = getEffectiveModel(
this.config.isInFallbackMode(),
model,
);
if (
this.config.getQuotaErrorOccurred() &&
modelToUse === DEFAULT_GEMINI_FLASH_MODEL
) {
throw new Error(
'Please submit a new query to continue with the Flash model.',
);
}
return this.config.getContentGenerator().generateContentStream(
{
model,
model: modelToUse,
contents: requestContents,
config: { ...this.generationConfig, ...params.config },
},
prompt_id,
);
};
const onPersistent429Callback = async (
authType?: string,
error?: unknown,

View File

@@ -47,7 +47,7 @@ describe('executeToolCall', () => {
getDebugMode: () => false,
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'gemini',
authType: 'gemini-api-key',
}),
getShellExecutionConfig: () => ({
terminalWidth: 90,

View File

@@ -207,27 +207,6 @@ describe('OpenAIContentConverter', () => {
expect.objectContaining({ text: 'visible text' }),
);
});
it('should not throw when streaming chunk has no delta', () => {
const chunk = converter.convertOpenAIChunkToGemini({
object: 'chat.completion.chunk',
id: 'chunk-2',
created: 456,
choices: [
{
index: 0,
// Some OpenAI-compatible providers may omit delta entirely.
delta: undefined,
finish_reason: null,
logprobs: null,
},
],
model: 'gpt-test',
} as unknown as OpenAI.Chat.ChatCompletionChunk);
const parts = chunk.candidates?.[0]?.content?.parts;
expect(parts).toEqual([]);
});
});
describe('convertGeminiToolsToOpenAI', () => {

View File

@@ -93,14 +93,6 @@ export class OpenAIContentConverter {
this.schemaCompliance = schemaCompliance;
}
/**
* Update the model used for response metadata (modelVersion/logging) and any
* model-specific conversion behavior.
*/
setModel(model: string): void {
this.model = model;
}
/**
* Reset streaming tool calls parser for new stream processing
* This should be called at the beginning of each stream to prevent
@@ -760,8 +752,6 @@ export class OpenAIContentConverter {
usage.prompt_tokens_details?.cached_tokens ??
extendedUsage.cached_tokens ??
0;
const thinkingTokens =
usage.completion_tokens_details?.reasoning_tokens || 0;
// If we only have total tokens but no breakdown, estimate the split
// Typically input is ~70% and output is ~30% for most conversations
@@ -779,7 +769,6 @@ export class OpenAIContentConverter {
candidatesTokenCount: finalCompletionTokens,
totalTokenCount: totalTokens,
cachedContentTokenCount: cachedTokens,
thoughtsTokenCount: thinkingTokens,
};
}
@@ -799,7 +788,7 @@ export class OpenAIContentConverter {
const parts: Part[] = [];
const reasoningText = (choice.delta as ExtendedCompletionChunkDelta)
?.reasoning_content;
.reasoning_content;
if (reasoningText) {
parts.push({ text: reasoningText, thought: true });
}

View File

@@ -46,7 +46,6 @@ describe('ContentGenerationPipeline', () => {
// Mock converter
mockConverter = {
setModel: vi.fn(),
convertGeminiRequestToOpenAI: vi.fn(),
convertOpenAIResponseToGemini: vi.fn(),
convertOpenAIChunkToGemini: vi.fn(),
@@ -100,7 +99,6 @@ describe('ContentGenerationPipeline', () => {
describe('constructor', () => {
it('should initialize with correct configuration', () => {
expect(mockProvider.buildClient).toHaveBeenCalled();
// Converter is constructed once and the model is updated per-request via setModel().
expect(OpenAIContentConverter).toHaveBeenCalledWith(
'test-model',
undefined,
@@ -146,9 +144,6 @@ describe('ContentGenerationPipeline', () => {
// Assert
expect(result).toBe(mockGeminiResponse);
expect(
(mockConverter as unknown as { setModel: Mock }).setModel,
).toHaveBeenCalledWith('test-model');
expect(mockConverter.convertGeminiRequestToOpenAI).toHaveBeenCalledWith(
request,
);
@@ -169,53 +164,6 @@ describe('ContentGenerationPipeline', () => {
);
});
it('should ignore request.model override and always use configured model', async () => {
// Arrange
const request: GenerateContentParameters = {
model: 'override-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
};
const userPromptId = 'test-prompt-id';
const mockMessages = [
{ role: 'user', content: 'Hello' },
] as OpenAI.Chat.ChatCompletionMessageParam[];
const mockOpenAIResponse = {
id: 'response-id',
choices: [
{ message: { content: 'Hello response' }, finish_reason: 'stop' },
],
created: Date.now(),
model: 'override-model',
} as OpenAI.Chat.ChatCompletion;
const mockGeminiResponse = new GenerateContentResponse();
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue(
mockMessages,
);
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
mockGeminiResponse,
);
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockOpenAIResponse,
);
// Act
const result = await pipeline.execute(request, userPromptId);
// Assert
expect(result).toBe(mockGeminiResponse);
expect(
(mockConverter as unknown as { setModel: Mock }).setModel,
).toHaveBeenCalledWith('test-model');
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
model: 'test-model',
}),
expect.any(Object),
);
});
it('should handle tools in request', async () => {
// Arrange
const request: GenerateContentParameters = {
@@ -269,9 +217,6 @@ describe('ContentGenerationPipeline', () => {
// Assert
expect(result).toBe(mockGeminiResponse);
expect(
(mockConverter as unknown as { setModel: Mock }).setModel,
).toHaveBeenCalledWith('test-model');
expect(mockConverter.convertGeminiToolsToOpenAI).toHaveBeenCalledWith(
request.config!.tools,
);

View File

@@ -40,16 +40,10 @@ export class ContentGenerationPipeline {
request: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> {
// For OpenAI-compatible providers, the configured model is the single source of truth.
// We intentionally ignore request.model because upstream callers may pass a model string
// that is not valid/available for the OpenAI-compatible backend.
const effectiveModel = this.contentGeneratorConfig.model;
this.converter.setModel(effectiveModel);
return this.executeWithErrorHandling(
request,
userPromptId,
false,
effectiveModel,
async (openaiRequest) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
@@ -70,13 +64,10 @@ export class ContentGenerationPipeline {
request: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const effectiveModel = this.contentGeneratorConfig.model;
this.converter.setModel(effectiveModel);
return this.executeWithErrorHandling(
request,
userPromptId,
true,
effectiveModel,
async (openaiRequest, context) => {
// Stage 1: Create OpenAI stream
const stream = (await this.client.chat.completions.create(
@@ -233,13 +224,12 @@ export class ContentGenerationPipeline {
request: GenerateContentParameters,
userPromptId: string,
streaming: boolean = false,
effectiveModel: string,
): Promise<OpenAI.Chat.ChatCompletionCreateParams> {
const messages = this.converter.convertGeminiRequestToOpenAI(request);
// Apply provider-specific enhancements
const baseRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: effectiveModel,
model: this.contentGeneratorConfig.model,
messages,
...this.buildGenerateContentConfig(request),
};
@@ -327,22 +317,15 @@ export class ContentGenerationPipeline {
}
private buildReasoningConfig(): Record<string, unknown> {
// Reasoning configuration for OpenAI-compatible endpoints is highly fragmented.
// For example, across common providers and models:
//
// - deepseek-reasoner — thinking is enabled by default and cannot be disabled
// - glm-4.7 — thinking is enabled by default; can be disabled via `extra_body.thinking.enabled`
// - kimi-k2-thinking — thinking is enabled by default and cannot be disabled
// - gpt-5.x series — thinking is enabled by default; can be disabled via `reasoning.effort`
// - qwen3 series — model-dependent; can be manually disabled via `extra_body.enable_thinking`
//
// Given this inconsistency, we choose not to set any reasoning config here and
// instead rely on each models default behavior.
const reasoning = this.contentGeneratorConfig.reasoning;
// We plan to introduce provider- and model-specific settings to enable more
// fine-grained control over reasoning configuration.
if (reasoning === false) {
return {};
}
return {};
return {
reasoning_effort: reasoning?.effort ?? 'medium',
};
}
/**
@@ -352,24 +335,18 @@ export class ContentGenerationPipeline {
request: GenerateContentParameters,
userPromptId: string,
isStreaming: boolean,
effectiveModel: string,
executor: (
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
context: RequestContext,
) => Promise<T>,
): Promise<T> {
const context = this.createRequestContext(
userPromptId,
isStreaming,
effectiveModel,
);
const context = this.createRequestContext(userPromptId, isStreaming);
try {
const openaiRequest = await this.buildRequest(
request,
userPromptId,
isStreaming,
effectiveModel,
);
const result = await executor(openaiRequest, context);
@@ -401,11 +378,10 @@ export class ContentGenerationPipeline {
private createRequestContext(
userPromptId: string,
isStreaming: boolean,
effectiveModel: string,
): RequestContext {
return {
userPromptId,
model: effectiveModel,
model: this.contentGeneratorConfig.model,
authType: this.contentGeneratorConfig.authType || 'unknown',
startTime: Date.now(),
duration: 0,

View File

@@ -58,6 +58,8 @@ export class DefaultOpenAICompatibleProvider
}
getDefaultGenerationConfig(): GenerateContentConfig {
return {};
return {
topP: 0.95,
};
}
}

View File

@@ -0,0 +1,23 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Defines the intent returned by the UI layer during a fallback scenario.
*/
export type FallbackIntent =
| 'retry' // Immediately retry the current request with the fallback model.
| 'stop' // Switch to fallback for future requests, but stop the current request.
| 'auth'; // Stop the current request; user intends to change authentication.
/**
* The interface for the handler provided by the UI layer (e.g., the CLI)
* to interact with the user during a fallback scenario.
*/
export type FallbackModelHandler = (
failedModel: string,
fallbackModel: string,
error?: unknown,
) => Promise<FallbackIntent | null>;

View File

@@ -9,30 +9,6 @@ export * from './config/config.js';
export * from './output/types.js';
export * from './output/json-formatter.js';
// Export models
export {
type ModelCapabilities,
type ModelGenerationConfig,
type ModelConfig as ProviderModelConfig,
type ModelProvidersConfig,
type ResolvedModelConfig,
type AvailableModel,
type ModelSwitchMetadata,
QWEN_OAUTH_MODELS,
ModelRegistry,
ModelsConfig,
type ModelsConfigOptions,
type OnModelChangeCallback,
// Model configuration resolver
resolveModelConfig,
validateModelConfig,
type ModelConfigSourcesInput,
type ModelConfigCliInput,
type ModelConfigSettingsInput,
type ModelConfigResolutionResult,
type ModelConfigValidationResult,
} from './models/index.js';
// Export Core Logic
export * from './core/client.js';
export * from './core/contentGenerator.js';
@@ -45,6 +21,8 @@ export * from './core/geminiRequest.js';
export * from './core/coreToolScheduler.js';
export * from './core/nonInteractiveToolExecutor.js';
export * from './fallback/types.js';
export * from './qwen/qwenOAuth2.js';
// Export utilities
@@ -60,7 +38,6 @@ export * from './utils/quotaErrorDetection.js';
export * from './utils/fileUtils.js';
export * from './utils/retry.js';
export * from './utils/shell-utils.js';
export * from './utils/tool-utils.js';
export * from './utils/terminalSerializer.js';
export * from './utils/systemEncoding.js';
export * from './utils/textUtils.js';
@@ -77,9 +54,6 @@ export * from './utils/projectSummary.js';
export * from './utils/promptIdContext.js';
export * from './utils/thoughtUtils.js';
// Config resolution utilities
export * from './utils/configResolver.js';
// Export services
export * from './services/fileDiscoveryService.js';
export * from './services/gitService.js';

Some files were not shown because too many files have changed in this diff Show More