Compare commits

..

46 Commits

Author SHA1 Message Date
github-actions[bot]
00a7530458 chore(release): v0.6.0-nightly.20251228.2bc80795 2025-12-28 00:14:32 +00:00
pomelo
2bc8079519 Merge pull request #1332 from QwenLM/fix-language
Fix multi-language and documentation related issues.
2025-12-26 23:02:37 +08:00
pomelo-nwu
25dbe98e6e fix(cli): prevent HTML comment escape by sanitizing --!> and --> 2025-12-26 22:45:35 +08:00
pomelo-nwu
e5dbd69899 feat: fix ci 2025-12-26 22:38:44 +08:00
Mingholy
17eb20c134 Merge pull request #1322 from QwenLM/mingholy/feat/headless-slash-commands
feat: support /compress and /summary commands for non-interactive & ACP
2025-12-26 18:10:55 +08:00
mingholy.lmh
5d59ceb6f3 fix: explicit output if command is not supported 2025-12-26 17:55:03 +08:00
mingholy.lmh
7f645b9726 fix: wrong slash_command in systemMessage 2025-12-26 17:55:03 +08:00
mingholy.lmh
8c109be48c refactor: unified allow list of supported commands in ACP or non-interactive mode 2025-12-26 17:55:03 +08:00
mingholy.lmh
e9a1d9a927 fix: failed unit test cases 2025-12-26 17:55:02 +08:00
mingholy.lmh
8aceddffa2 feat: support /compress and /summary commands for non-interactive & ACP
integration
2025-12-26 17:55:02 +08:00
tanzhenxin
cebe0448d0 Merge pull request #1327 from QwenLM/feat/vscode-ide-companion-context-left
context left on vscode ide companion
2025-12-26 17:26:35 +08:00
tanzhenxin
919560e3a4 Merge pull request #1345 from QwenLM/feat/vscode-ida-companion-bash-toolcall-click-2
feat(vscode-ide-companion): in/output part in the bash toolcall can be clicked to open a temporary file
2025-12-26 16:55:35 +08:00
Mingholy
26bd4f882d Merge pull request #1334 from QwenLM/mingholy/chore/skip-bumping-unstable-sdk-version
chore: improve release-sdk workflow
2025-12-26 16:21:53 +08:00
tanzhenxin
3787e95572 Merge pull request #1349 from QwenLM/fix/integration-test-3
fix one flaky integration test
2025-12-26 09:43:02 +08:00
tanzhenxin
7233d37bd1 fix one flaky integration test 2025-12-26 09:20:24 +08:00
yiliang114
93dcca5147 fix(vscode-ide-companion): fix test 2025-12-26 00:28:45 +08:00
cwtuan
f7d04323f3 Enhance VS Code extension description with download link (#1341)
Updated the VS Code extension note with a download link for the Qwen Code Companion.
2025-12-25 23:58:52 +08:00
yiliang114
9a27857f10 feat(vscode-ide-companion): support context left 2025-12-25 23:53:55 +08:00
yiliang114
452f4f3c0e Merge branch 'main' of https://github.com/QwenLM/qwen-code into feat/vscode-ide-companion-context-left 2025-12-25 23:51:57 +08:00
yiliang114
5cc01e5e09 feat(vscode-ide-companion): support context left 2025-12-25 23:51:50 +08:00
yiliang114
ac0be9fb84 feat(vscode-ide-companion): in/output part in the bash toolcall can be clicked to open a temporary file. 2025-12-25 16:59:32 +08:00
tanzhenxin
257c6705e1 Merge pull request #1343 from QwenLM/fix/integration-test-2
fix one flaky integration test
2025-12-25 16:08:54 +08:00
tanzhenxin
27e7438b75 fix one flaky integration test 2025-12-25 16:08:06 +08:00
tanzhenxin
8a3ff8db12 Merge pull request #1340 from QwenLM/feat/anthropic-provider-1
Follow up on pr #1331
2025-12-25 15:44:52 +08:00
tanzhenxin
26f8b67d4f add missing file 2025-12-25 15:24:56 +08:00
tanzhenxin
b64d636280 anthropic provider support follow-up 2025-12-25 15:24:42 +08:00
tanzhenxin
781c57b438 Merge pull request #1331 from QwenLM/feat/support-anthropic-provider
feat: add Anthropic provider, normalize auth/env config, and centralize logging
2025-12-25 11:44:38 +08:00
mingholy.lmh
c81c24d45d chore: improve release-sdk workflow 2025-12-25 10:46:57 +08:00
tanzhenxin
c53bdde747 support reasoning.budget_tokens config option 2025-12-25 10:18:38 +08:00
tanzhenxin
99db18069d add interleaved-thinking-2025-05-14 beta header for anthropic content generator 2025-12-25 09:42:06 +08:00
tanzhenxin
a0a5b831d4 add a few more tests 2025-12-24 20:54:40 +08:00
tanzhenxin
8f74dd224c add tests for loggingContentGenerator 2025-12-24 19:41:46 +08:00
tanzhenxin
b931d28f35 feat(core,cli): add Anthropic provider, normalize auth/env config, and centralize logging 2025-12-24 19:00:56 +08:00
mingholy.lmh
4407597794 chore: skip bumping sdk version when release nightly/preview or dry run 2025-12-24 18:12:23 +08:00
pomelo-nwu
101bd5f9b3 i18n: fix missing translations for /clear command 2025-12-24 17:28:49 +08:00
pomelo-nwu
61c626b618 fix: check-i18n script 2025-12-24 17:22:21 +08:00
pomelo-nwu
a28278e950 feat: update code 2025-12-24 17:12:27 +08:00
yiliang114
90bf101040 chore(vscode-ide-companion): simplify the implementation of context remaining 2025-12-24 14:29:25 +08:00
pomelo
a8f7bab544 Merge pull request #1293 from fazilus/feat/russian
feat(i18n): update Russian translation with new strings
2025-12-24 11:38:29 +08:00
pomelo-nwu
4ca62ba836 feat: adjust code 2025-12-24 10:26:30 +08:00
yiliang114
660901e1fd Merge branch 'main' of https://github.com/QwenLM/qwen-code into feat/vscode-ide-companion-context-left 2025-12-24 10:10:32 +08:00
yiliang114
8e64c5acaf feat(vscode-ide-companion): support context left 2025-12-24 01:09:21 +08:00
pomelo
398a1044ce Merge pull request #1247 from afarber/1244-language-output-default
feat(i18n): auto-detect LLM output language from system locale
2025-12-23 15:49:06 +08:00
Alexander Farber
f07259a7c9 Add German UI language support and normalize locale codes for LLM output 2025-12-20 10:21:16 +01:00
Alexander Farber
4d9f25e9fe Auto-detect LLM output language from system locale on first startup 2025-12-20 10:21:16 +01:00
Fazil
15efeb0107 feat(i18n): update Russian translation with new strings 2025-12-18 15:14:08 +03:00
123 changed files with 6737 additions and 3399 deletions

View File

@@ -91,6 +91,8 @@ jobs:
with:
node-version-file: '.nvmrc'
cache: 'npm'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Install Dependencies'
run: |-
@@ -126,6 +128,14 @@ jobs:
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
MANUAL_VERSION: '${{ inputs.version }}'
- name: 'Set SDK package version (local only)'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
# Ensure the package version matches the computed release version.
# This is required for nightly/preview because npm does not allow re-publishing the same version.
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Build CLI Bundle'
run: |
npm run build
@@ -158,7 +168,21 @@ jobs:
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
- name: 'Create and switch to a release branch'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'release_branch'
env:
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
@@ -167,50 +191,22 @@ jobs:
git switch -c "${BRANCH_NAME}"
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
- name: 'Update package version'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
# Use npm workspaces so the root lockfile is updated consistently.
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Commit and Conditionally Push package version'
- name: 'Commit and Push package version (stable only)'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
# Only persist version bumps after a successful publish.
git add packages/sdk-typescript/package.json package-lock.json
if git diff --staged --quiet; then
echo "No version changes to commit"
else
git commit -m "chore(release): sdk-typescript ${RELEASE_TAG}"
fi
if [[ "${IS_DRY_RUN}" == "false" ]]; then
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
else
echo "Dry run enabled. Skipping push."
fi
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Configure npm for publishing'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
- name: 'Create GitHub Release and Tag'
if: |-
@@ -220,16 +216,29 @@ jobs:
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
REF: '${{ github.event.inputs.ref || github.sha }}'
run: |-
# For stable releases, use the release branch; for nightly/preview, use the current ref
if [[ "${IS_NIGHTLY}" == "true" || "${IS_PREVIEW}" == "true" ]]; then
TARGET="${REF}"
PRERELEASE_FLAG="--prerelease"
else
TARGET="${RELEASE_BRANCH}"
PRERELEASE_FLAG=""
fi
gh release create "sdk-typescript-${RELEASE_TAG}" \
--target "$RELEASE_BRANCH" \
--target "${TARGET}" \
--title "SDK TypeScript Release ${RELEASE_TAG}" \
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
--generate-notes
--generate-notes \
${PRERELEASE_FLAG}
- name: 'Create PR to merge release branch into main'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' }}
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'pr'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
@@ -251,7 +260,7 @@ jobs:
- name: 'Wait for CI checks to complete'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' }}
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
@@ -262,7 +271,7 @@ jobs:
- name: 'Enable auto-merge for release PR'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' }}
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'

View File

@@ -10,4 +10,5 @@ export default {
mcp: 'MCP',
'token-caching': 'Token Caching',
sandbox: 'Sandboxing',
language: 'i18n',
};

View File

@@ -48,7 +48,7 @@ Commands specifically for controlling interface and output language.
| → `ui [language]` | Set UI interface language | `/language ui zh-CN` |
| → `output [language]` | Set LLM output language | `/language output Chinese` |
- Available UI languages: `zh-CN` (Simplified Chinese), `en-US` (English)
- Available built-in UI languages: `zh-CN` (Simplified Chinese), `en-US` (English), `ru-RU` (Russian), `de-DE` (German)
- Output language examples: `Chinese`, `English`, `Japanese`, etc.
### 1.4 Tool and Model Management
@@ -72,17 +72,16 @@ Commands for managing AI tools and models.
Commands for obtaining information and performing system settings.
| Command | Description | Usage Examples |
| --------------- | ----------------------------------------------- | ------------------------------------------------ |
| `/help` | Display help information for available commands | `/help` or `/?` |
| `/about` | Display version information | `/about` |
| `/stats` | Display detailed statistics for current session | `/stats` |
| `/settings` | Open settings editor | `/settings` |
| `/auth` | Change authentication method | `/auth` |
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
| `/copy` | Copy last output content to clipboard | `/copy` |
| `/quit-confirm` | Show confirmation dialog before quitting | `/quit-confirm` (shortcut: press `Ctrl+C` twice) |
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
| Command | Description | Usage Examples |
| ----------- | ----------------------------------------------- | -------------------------------- |
| `/help` | Display help information for available commands | `/help` or `/?` |
| `/about` | Display version information | `/about` |
| `/stats` | Display detailed statistics for current session | `/stats` |
| `/settings` | Open settings editor | `/settings` |
| `/auth` | Change authentication method | `/auth` |
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
| `/copy` | Copy last output content to clipboard | `/copy` |
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
### 1.6 Common Shortcuts

View File

@@ -0,0 +1,136 @@
# Internationalization (i18n) & Language
Qwen Code is built for multilingual workflows: it supports UI localization (i18n/l10n) in the CLI, lets you choose the assistant output language, and allows custom UI language packs.
## Overview
From a user point of view, Qwen Codes “internationalization” spans multiple layers:
| Capability / Setting | What it controls | Where stored |
| ------------------------ | ---------------------------------------------------------------------- | ---------------------------- |
| `/language ui` | Terminal UI text (menus, system messages, prompts) | `~/.qwen/settings.json` |
| `/language output` | Language the AI responds in (an output preference, not UI translation) | `~/.qwen/output-language.md` |
| Custom UI language packs | Overrides/extends built-in UI translations | `~/.qwen/locales/*.js` |
## UI Language
This is the CLIs UI localization layer (i18n/l10n): it controls the language of menus, prompts, and system messages.
### Setting the UI Language
Use the `/language ui` command:
```bash
/language ui zh-CN # Chinese
/language ui en-US # English
/language ui ru-RU # Russian
/language ui de-DE # German
```
Aliases are also supported:
```bash
/language ui zh # Chinese
/language ui en # English
/language ui ru # Russian
/language ui de # German
```
### Auto-detection
On first startup, Qwen Code detects your system locale and sets the UI language automatically.
Detection priority:
1. `QWEN_CODE_LANG` environment variable
2. `LANG` environment variable
3. System locale via JavaScript Intl API
4. Default: English
## LLM Output Language
The LLM output language controls what language the AI assistant responds in, regardless of what language you type your questions in.
### How It Works
The LLM output language is controlled by a rule file at `~/.qwen/output-language.md`. This file is automatically included in the LLM's context during startup, instructing it to respond in the specified language.
### Auto-detection
On first startup, if no `output-language.md` file exists, Qwen Code automatically creates one based on your system locale. For example:
- System locale `zh` creates a rule for Chinese responses
- System locale `en` creates a rule for English responses
- System locale `ru` creates a rule for Russian responses
- System locale `de` creates a rule for German responses
### Manual Setting
Use `/language output <language>` to change:
```bash
/language output Chinese
/language output English
/language output Japanese
/language output German
```
Any language name works. The LLM will be instructed to respond in that language.
> [!note]
>
> After changing the output language, restart Qwen Code for the change to take effect.
### File Location
```
~/.qwen/output-language.md
```
## Configuration
### Via Settings Dialog
1. Run `/settings`
2. Find "Language" under General
3. Select your preferred UI language
### Via Environment Variable
```bash
export QWEN_CODE_LANG=zh
```
This influences auto-detection on first startup (if you havent set a UI language and no `output-language.md` file exists yet).
## Custom Language Packs
For UI translations, you can create custom language packs in `~/.qwen/locales/`:
- Example: `~/.qwen/locales/es.js` for Spanish
- Example: `~/.qwen/locales/fr.js` for French
User directory takes precedence over built-in translations.
> [!tip]
>
> Contributions are welcome! If youd like to improve built-in translations or add new languages.
> For a concrete example, see [PR #1238: feat(i18n): add Russian language support](https://github.com/QwenLM/qwen-code/pull/1238).
### Language Pack Format
```javascript
// ~/.qwen/locales/es.js
export default {
Hello: 'Hola',
Settings: 'Configuracion',
// ... more translations
};
```
## Related Commands
- `/language` - Show current language settings
- `/language ui [lang]` - Set UI language
- `/language output <language>` - Set LLM output language
- `/settings` - Open settings dialog

View File

@@ -1,4 +1,6 @@
# Qwen Code overview
[![@qwen-code/qwen-code downloads](https://img.shields.io/npm/dw/@qwen-code/qwen-code.svg)](https://npm-compare.com/@qwen-code/qwen-code)
[![@qwen-code/qwen-code version](https://img.shields.io/npm/v/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
> Learn about Qwen Code, Qwen's agentic coding tool that lives in your terminal and helps you turn ideas into code faster than ever before.
@@ -46,7 +48,7 @@ You'll be prompted to log in on first use. That's it! [Continue with Quickstart
> [!note]
>
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. You can search for **Qwen Code** in the VS Code Marketplace and download it.
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. Download and install the [Qwen Code Companion](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion) now.
## What Qwen Code does for you

View File

@@ -24,6 +24,8 @@ export default tseslint.config(
'.integration-tests/**',
'packages/**/.integration-test/**',
'dist/**',
'docs-site/.next/**',
'docs-site/out/**',
],
},
eslint.configs.recommended,

View File

@@ -5,8 +5,6 @@
*/
import { describe, it, expect } from 'vitest';
import { existsSync } from 'node:fs';
import * as path from 'node:path';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe('file-system', () => {
@@ -202,8 +200,8 @@ describe('file-system', () => {
const readAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'read_file',
);
const writeAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'write_file',
const editAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'edit_file',
);
const successfulReplace = toolLogs.find(
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
@@ -226,15 +224,15 @@ describe('file-system', () => {
// CRITICAL: Verify that no matter what the model did, it never successfully
// wrote or replaced anything.
if (writeAttempt) {
if (editAttempt) {
console.error(
'A write_file attempt was made when no file should be written.',
'A edit_file attempt was made when no file should be written.',
);
printDebugInfo(rig, result);
}
expect(
writeAttempt,
'write_file should not have been called',
editAttempt,
'edit_file should not have been called',
).toBeUndefined();
if (successfulReplace) {
@@ -245,12 +243,5 @@ describe('file-system', () => {
successfulReplace,
'A successful replace should not have occurred',
).toBeUndefined();
// Final verification: ensure the file was not created.
const filePath = path.join(rig.testDir!, fileName);
const fileExists = existsSync(filePath);
expect(fileExists, 'The non-existent file should not be created').toBe(
false,
);
});
});

View File

@@ -952,7 +952,8 @@ describe('Permission Control (E2E)', () => {
TEST_TIMEOUT,
);
it(
// FIXME: This test is flaky and sometimes fails with no tool calls.
it.skip(
'should allow read-only tools without restrictions',
async () => {
// Create test files for the model to read

View File

@@ -314,4 +314,88 @@ describe('System Control (E2E)', () => {
);
});
});
describe('supportedCommands API', () => {
it('should return list of supported slash commands', async () => {
const sessionId = crypto.randomUUID();
const generator = (async function* () {
yield {
type: 'user',
session_id: sessionId,
message: { role: 'user', content: 'Hello' },
parent_tool_use_id: null,
} as SDKUserMessage;
})();
const q = query({
prompt: generator,
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
model: 'qwen3-max',
debug: false,
},
});
try {
const result = await q.supportedCommands();
// Start consuming messages to trigger initialization
const messageConsumer = (async () => {
try {
for await (const _message of q) {
// Just consume messages
}
} catch (error) {
// Ignore errors from query being closed
if (error instanceof Error && error.message !== 'Query is closed') {
throw error;
}
}
})();
// Verify result structure
expect(result).toBeDefined();
expect(result).toHaveProperty('commands');
expect(Array.isArray(result?.['commands'])).toBe(true);
const commands = result?.['commands'] as string[];
// Verify default allowed built-in commands are present
expect(commands).toContain('init');
expect(commands).toContain('summary');
expect(commands).toContain('compress');
// Verify commands are sorted
const sortedCommands = [...commands].sort();
expect(commands).toEqual(sortedCommands);
// Verify all commands are strings
commands.forEach((cmd) => {
expect(typeof cmd).toBe('string');
expect(cmd.length).toBeGreaterThan(0);
});
await q.close();
await messageConsumer;
} catch (error) {
await q.close();
throw error;
}
});
it('should throw error when supportedCommands is called on closed query', async () => {
const q = query({
prompt: 'Hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
model: 'qwen3-max',
},
});
await q.close();
await expect(q.supportedCommands()).rejects.toThrow('Query is closed');
});
});
});

155
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"workspaces": [
"packages/*"
],
@@ -134,6 +134,36 @@
"node": ">=6.0.0"
}
},
"node_modules/@anthropic-ai/sdk": {
"version": "0.36.3",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.36.3.tgz",
"integrity": "sha512-+c0mMLxL/17yFZ4P5+U6bTWiCSFZUKJddrv01ud2aFBWnTPLdRncYV76D3q1tqfnL7aCnhRtykFnoCFzvr4U3Q==",
"license": "MIT",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
}
},
"node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
"version": "18.19.130",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
"integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
"license": "MIT",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@anthropic-ai/sdk/node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/@asamuzakjp/css-color": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz",
@@ -3822,6 +3852,16 @@
"undici-types": "~6.21.0"
}
},
"node_modules/@types/node-fetch": {
"version": "2.6.13",
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz",
"integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
"license": "MIT",
"dependencies": {
"@types/node": "*",
"form-data": "^4.0.4"
}
},
"node_modules/@types/normalize-package-data": {
"version": "2.4.4",
"resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz",
@@ -4820,7 +4860,6 @@
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
"dev": true,
"license": "MIT",
"dependencies": {
"event-target-shim": "^5.0.0"
@@ -4907,6 +4946,18 @@
"node": ">= 14"
}
},
"node_modules/agentkeepalive": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
"integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
"license": "MIT",
"dependencies": {
"humanize-ms": "^1.2.1"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
@@ -5478,7 +5529,6 @@
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
"dev": true,
"license": "MIT"
},
"node_modules/atomically": {
@@ -6437,7 +6487,6 @@
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"dev": true,
"license": "MIT",
"dependencies": {
"delayed-stream": "~1.0.0"
@@ -7063,7 +7112,6 @@
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.4.0"
@@ -7576,7 +7624,6 @@
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
"dev": true,
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
@@ -8106,7 +8153,6 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
@@ -8652,7 +8698,6 @@
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
"dev": true,
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
@@ -8665,11 +8710,16 @@
"node": ">= 6"
}
},
"node_modules/form-data-encoder": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
"license": "MIT"
},
"node_modules/form-data/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dev": true,
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
@@ -8678,6 +8728,28 @@
"node": ">= 0.6"
}
},
"node_modules/formdata-node": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
"license": "MIT",
"dependencies": {
"node-domexception": "1.0.0",
"web-streams-polyfill": "4.0.0-beta.3"
},
"engines": {
"node": ">= 12.20"
}
},
"node_modules/formdata-node/node_modules/web-streams-polyfill": {
"version": "4.0.0-beta.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/formdata-polyfill": {
"version": "4.0.10",
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
@@ -9262,7 +9334,6 @@
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
"dev": true,
"license": "MIT",
"dependencies": {
"has-symbols": "^1.0.3"
@@ -9441,6 +9512,15 @@
"node": ">=16.17.0"
}
},
"node_modules/humanize-ms": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
"license": "MIT",
"dependencies": {
"ms": "^2.0.0"
}
},
"node_modules/husky": {
"version": "9.1.7",
"resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz",
@@ -11940,6 +12020,48 @@
"node": ">=10.5.0"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/node-fetch/node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"license": "MIT"
},
"node_modules/node-fetch/node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"license": "BSD-2-Clause"
},
"node_modules/node-fetch/node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"license": "MIT",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/node-pty": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-pty/-/node-pty-1.0.0.tgz",
@@ -17194,7 +17316,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"dependencies": {
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
@@ -17831,9 +17953,10 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"hasInstallScript": true,
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
"@google/genai": "1.30.0",
"@modelcontextprotocol/sdk": "^1.25.1",
"@opentelemetry/api": "^1.9.0",
@@ -18470,7 +18593,7 @@
},
"packages/sdk-typescript": {
"name": "@qwen-code/sdk",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.1.0",
"license": "Apache-2.0",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",
@@ -21290,7 +21413,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -21302,7 +21425,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251225.9f65bd3b"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251228.2bc80795"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -33,7 +33,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251225.9f65bd3b"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251228.2bc80795"
},
"dependencies": {
"@google/genai": "1.30.0",

View File

@@ -98,6 +98,14 @@ export class AgentSideConnection implements Client {
);
}
/**
* Sends a custom notification to the client.
* Used for extension-specific notifications that are not part of the core ACP protocol.
*/
async sendCustomNotification<T>(method: string, params: T): Promise<void> {
return await this.#connection.sendNotification(method, params);
}
/**
* Request permission before running a tool
*
@@ -374,6 +382,7 @@ export interface Client {
): Promise<schema.RequestPermissionResponse>;
sessionUpdate(params: schema.SessionNotification): Promise<void>;
authenticateUpdate(params: schema.AuthenticateUpdate): Promise<void>;
sendCustomNotification<T>(method: string, params: T): Promise<void>;
writeTextFile(
params: schema.WriteTextFileRequest,
): Promise<schema.WriteTextFileResponse>;

View File

@@ -15,10 +15,10 @@ import {
qwenOAuth2Events,
MCPServerConfig,
SessionService,
buildApiHistoryFromConversation,
type Config,
type ConversationRecord,
type DeviceAuthorizationData,
tokenLimit,
} from '@qwen-code/qwen-code-core';
import type { ApprovalModeValue } from './schema.js';
import * as acp from './acp.js';
@@ -165,9 +165,30 @@ class GeminiAgent {
this.setupFileSystem(config);
const session = await this.createAndStoreSession(config);
const configuredModel = (
config.getModel() ||
this.config.getModel() ||
''
).trim();
const modelId = configuredModel || 'default';
const modelName = configuredModel || modelId;
return {
sessionId: session.getId(),
models: {
currentModelId: modelId,
availableModels: [
{
modelId,
name: modelName,
description: null,
_meta: {
contextLimit: tokenLimit(modelId),
},
},
],
_meta: null,
},
};
}
@@ -327,12 +348,20 @@ class GeminiAgent {
const sessionId = config.getSessionId();
const geminiClient = config.getGeminiClient();
const history = conversation
? buildApiHistoryFromConversation(conversation)
: undefined;
const chat = history
? await geminiClient.startChat(history)
: await geminiClient.startChat();
// Use GeminiClient to manage chat lifecycle properly
// This ensures geminiClient.chat is in sync with the session's chat
//
// Note: When loading a session, config.initialize() has already been called
// in newSessionConfig(), which in turn calls geminiClient.initialize().
// The GeminiClient.initialize() method checks config.getResumedSessionData()
// and automatically loads the conversation history into the chat instance.
// So we only need to initialize if it hasn't been done yet.
if (!geminiClient.isInitialized()) {
await geminiClient.initialize();
}
// Now get the chat instance that's managed by GeminiClient
const chat = geminiClient.getChat();
const session = new Session(
sessionId,

View File

@@ -93,6 +93,7 @@ export type ModeInfo = z.infer<typeof modeInfoSchema>;
export type ModesData = z.infer<typeof modesDataSchema>;
export type AgentInfo = z.infer<typeof agentInfoSchema>;
export type ModelInfo = z.infer<typeof modelInfoSchema>;
export type PromptCapabilities = z.infer<typeof promptCapabilitiesSchema>;
@@ -254,8 +255,26 @@ export const authenticateUpdateSchema = z.object({
export type AuthenticateUpdate = z.infer<typeof authenticateUpdateSchema>;
export const acpMetaSchema = z.record(z.unknown()).nullable().optional();
export const modelIdSchema = z.string();
export const modelInfoSchema = z.object({
_meta: acpMetaSchema,
description: z.string().nullable().optional(),
modelId: modelIdSchema,
name: z.string(),
});
export const sessionModelStateSchema = z.object({
_meta: acpMetaSchema,
availableModels: z.array(modelInfoSchema),
currentModelId: modelIdSchema,
});
export const newSessionResponseSchema = z.object({
sessionId: z.string(),
models: sessionModelStateSchema,
});
export const loadSessionResponseSchema = z.null();
@@ -514,6 +533,13 @@ export const currentModeUpdateSchema = z.object({
export type CurrentModeUpdate = z.infer<typeof currentModeUpdateSchema>;
export const currentModelUpdateSchema = z.object({
sessionUpdate: z.literal('current_model_update'),
model: modelInfoSchema,
});
export type CurrentModelUpdate = z.infer<typeof currentModelUpdateSchema>;
export const sessionUpdateSchema = z.union([
z.object({
content: contentBlockSchema,
@@ -555,6 +581,7 @@ export const sessionUpdateSchema = z.union([
sessionUpdate: z.literal('plan'),
}),
currentModeUpdateSchema,
currentModelUpdateSchema,
availableCommandsUpdateSchema,
]);

View File

@@ -41,9 +41,11 @@ import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { z } from 'zod';
import { getErrorMessage } from '../../utils/errors.js';
import { normalizePartList } from '../../utils/nonInteractiveHelpers.js';
import {
handleSlashCommand,
getAvailableCommands,
type NonInteractiveSlashCommandResult,
} from '../../nonInteractiveCliCommands.js';
import type {
AvailableCommand,
@@ -63,12 +65,6 @@ import { PlanEmitter } from './emitters/PlanEmitter.js';
import { MessageEmitter } from './emitters/MessageEmitter.js';
import { SubAgentTracker } from './SubAgentTracker.js';
/**
* Built-in commands that are allowed in ACP integration mode.
* Only safe, read-only commands that don't require interactive UI.
*/
export const ALLOWED_BUILTIN_COMMANDS_FOR_ACP = ['init'];
/**
* Session represents an active conversation session with the AI model.
* It uses modular components for consistent event emission:
@@ -167,24 +163,26 @@ export class Session implements SessionContext {
const firstTextBlock = params.prompt.find((block) => block.type === 'text');
const inputText = firstTextBlock?.text || '';
let parts: Part[];
let parts: Part[] | null;
if (isSlashCommand(inputText)) {
// Handle slash command - allow specific built-in commands for ACP integration
// Handle slash command - uses default allowed commands (init, summary, compress)
const slashCommandResult = await handleSlashCommand(
inputText,
pendingSend,
this.config,
this.settings,
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
);
if (slashCommandResult) {
// Use the result from the slash command
parts = slashCommandResult as Part[];
} else {
// Slash command didn't return a prompt, continue with normal processing
parts = await this.#resolvePrompt(params.prompt, pendingSend.signal);
parts = await this.#processSlashCommandResult(
slashCommandResult,
params.prompt,
);
// If parts is null, the command was fully handled (e.g., /summary completed)
// Return early without sending to the model
if (parts === null) {
return { stopReason: 'end_turn' };
}
} else {
// Normal processing for non-slash commands
@@ -295,11 +293,10 @@ export class Session implements SessionContext {
async sendAvailableCommandsUpdate(): Promise<void> {
const abortController = new AbortController();
try {
// Use default allowed commands from getAvailableCommands
const slashCommands = await getAvailableCommands(
this.config,
this.settings,
abortController.signal,
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
);
// Convert SlashCommand[] to AvailableCommand[] format for ACP protocol
@@ -647,6 +644,103 @@ export class Session implements SessionContext {
}
}
/**
* Processes the result of a slash command execution.
*
* Supported result types in ACP mode:
* - submit_prompt: Submits content to the model
* - stream_messages: Streams multiple messages to the client (ACP-specific)
* - unsupported: Command cannot be executed in ACP mode
* - no_command: No command was found, use original prompt
*
* Note: 'message' type is not supported in ACP mode - commands should use
* 'stream_messages' instead for consistent async handling.
*
* @param result The result from handleSlashCommand
* @param originalPrompt The original prompt blocks
* @returns Parts to use for the prompt, or null if command was handled without needing model interaction
*/
async #processSlashCommandResult(
result: NonInteractiveSlashCommandResult,
originalPrompt: acp.ContentBlock[],
): Promise<Part[] | null> {
switch (result.type) {
case 'submit_prompt':
// Command wants to submit a prompt to the model
// Convert PartListUnion to Part[]
return normalizePartList(result.content);
case 'message': {
// 'message' type is not ideal for ACP mode, but we handle it for compatibility
// by converting it to a stream_messages-like notification
await this.client.sendCustomNotification('_qwencode/slash_command', {
sessionId: this.sessionId,
command: originalPrompt
.filter((block) => block.type === 'text')
.map((block) => (block.type === 'text' ? block.text : ''))
.join(' '),
messageType: result.messageType,
message: result.content || '',
});
if (result.messageType === 'error') {
// Throw error to stop execution
throw new Error(result.content || 'Slash command failed.');
}
// For info messages, return null to indicate command was handled
return null;
}
case 'stream_messages': {
// Command returns multiple messages via async generator (ACP-preferred)
const command = originalPrompt
.filter((block) => block.type === 'text')
.map((block) => (block.type === 'text' ? block.text : ''))
.join(' ');
// Stream all messages to the client
for await (const msg of result.messages) {
await this.client.sendCustomNotification('_qwencode/slash_command', {
sessionId: this.sessionId,
command,
messageType: msg.messageType,
message: msg.content,
});
// If we encounter an error message, throw after sending
if (msg.messageType === 'error') {
throw new Error(msg.content || 'Slash command failed.');
}
}
// All messages sent successfully, return null to indicate command was handled
return null;
}
case 'unsupported': {
// Command returned an unsupported result type
const unsupportedError = `Slash command not supported in ACP integration: ${result.reason}`;
throw new Error(unsupportedError);
}
case 'no_command':
// No command was found or executed, use original prompt
return originalPrompt.map((block) => {
if (block.type === 'text') {
return { text: block.text };
}
throw new Error(`Unsupported block type: ${block.type}`);
});
default: {
// Exhaustiveness check
const _exhaustive: never = result;
const unknownError = `Unknown slash command result type: ${(_exhaustive as NonInteractiveSlashCommandResult).type}`;
throw new Error(unknownError);
}
}
}
async #resolvePrompt(
message: acp.ContentBlock[],
abortSignal: AbortSignal,

View File

@@ -26,6 +26,20 @@ export function validateAuthMethod(authMethod: string): string | null {
return null;
}
if (authMethod === AuthType.USE_ANTHROPIC) {
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
if (!hasApiKey) {
return 'ANTHROPIC_API_KEY environment variable not found.';
}
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
if (!hasBaseUrl) {
return 'ANTHROPIC_BASE_URL environment variable not found.';
}
return null;
}
if (authMethod === AuthType.USE_GEMINI) {
const hasApiKey = process.env['GEMINI_API_KEY'];
if (!hasApiKey) {

View File

@@ -2114,7 +2114,14 @@ describe('loadCliConfig model selection', () => {
});
it('always prefers model from argvs', async () => {
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{
@@ -2134,7 +2141,14 @@ describe('loadCliConfig model selection', () => {
});
it('selects the model from argvs if provided', async () => {
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{

View File

@@ -468,6 +468,7 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
type: 'string',
choices: [
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.QWEN_OAUTH,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
@@ -876,11 +877,30 @@ export async function loadCliConfig(
);
}
const selectedAuthType =
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType;
const apiKey =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey
: '') || '';
const baseUrl =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl
: '') || '';
const resolvedModel =
argv.model ||
process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name;
(selectedAuthType === AuthType.USE_OPENAI
? process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name
: '') ||
'';
const sandboxConfig = await loadSandboxConfig(settings, argv);
const screenReader =
@@ -967,23 +987,15 @@ export async function loadCliConfig(
extensions: allExtensions,
blockedMcpServers,
noBrowser: !!process.env['NO_BROWSER'],
authType:
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType,
authType: selectedAuthType,
inputFormat,
outputFormat,
includePartialMessages,
generationConfig: {
...(settings.model?.generationConfig || {}),
model: resolvedModel,
apiKey:
argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey,
baseUrl:
argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl,
apiKey,
baseUrl,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging

View File

@@ -15,6 +15,7 @@ import { type LoadedSettings, SettingScope } from '../config/settings.js';
import { performInitialAuth } from './auth.js';
import { validateTheme } from './theme.js';
import { initializeI18n } from '../i18n/index.js';
import { initializeLlmOutputLanguage } from '../ui/commands/languageCommand.js';
export interface InitializationResult {
authError: string | null;
@@ -41,6 +42,9 @@ export async function initializeApp(
'auto';
await initializeI18n(languageSetting);
// Auto-detect and set LLM output language on first use
initializeLlmOutputLanguage();
const authType = settings.merged.security?.auth?.selectedType;
const authError = await performInitialAuth(config, authType);

View File

@@ -1,6 +1,6 @@
/**
* @license
* Copyright 2025 Qwen
* Copyright 2025 Qwen team
* SPDX-License-Identifier: Apache-2.0
*/
@@ -8,15 +8,21 @@ import * as fs from 'node:fs';
import * as path from 'node:path';
import { fileURLToPath, pathToFileURL } from 'node:url';
import { homedir } from 'node:os';
import {
type SupportedLanguage,
getLanguageNameFromLocale,
} from './languages.js';
export type SupportedLanguage = 'en' | 'zh' | 'ru' | string; // Allow custom language codes
export type { SupportedLanguage };
export { getLanguageNameFromLocale };
// State
let currentLanguage: SupportedLanguage = 'en';
let translations: Record<string, string> = {};
let translations: Record<string, string | string[]> = {};
// Cache
type TranslationDict = Record<string, string>;
type TranslationValue = string | string[];
type TranslationDict = Record<string, TranslationValue>;
const translationCache: Record<string, TranslationDict> = {};
const loadingPromises: Record<string, Promise<TranslationDict>> = {};
@@ -52,11 +58,13 @@ export function detectSystemLanguage(): SupportedLanguage {
if (envLang?.startsWith('zh')) return 'zh';
if (envLang?.startsWith('en')) return 'en';
if (envLang?.startsWith('ru')) return 'ru';
if (envLang?.startsWith('de')) return 'de';
try {
const locale = Intl.DateTimeFormat().resolvedOptions().locale;
if (locale.startsWith('zh')) return 'zh';
if (locale.startsWith('ru')) return 'ru';
if (locale.startsWith('de')) return 'de';
} catch {
// Fallback to default
}
@@ -224,9 +232,25 @@ export function getCurrentLanguage(): SupportedLanguage {
export function t(key: string, params?: Record<string, string>): string {
const translation = translations[key] ?? key;
if (Array.isArray(translation)) {
return key;
}
return interpolate(translation, params);
}
/**
* Get a translation that is an array of strings.
* @param key The translation key
* @returns The array of strings, or an empty array if not found or not an array
*/
export function ta(key: string): string[] {
const translation = translations[key];
if (Array.isArray(translation)) {
return translation;
}
return [];
}
export async function initializeI18n(
lang?: SupportedLanguage | 'auto',
): Promise<void> {

View File

@@ -0,0 +1,48 @@
/**
* @license
* Copyright 2025 Qwen team
* SPDX-License-Identifier: Apache-2.0
*/
export type SupportedLanguage = 'en' | 'zh' | 'ru' | 'de' | string;
export interface LanguageDefinition {
/** The internal locale code used by the i18n system (e.g., 'en', 'zh'). */
code: SupportedLanguage;
/** The standard name used in UI settings (e.g., 'en-US', 'zh-CN'). */
id: string;
/** The full English name of the language (e.g., 'English', 'Chinese'). */
fullName: string;
}
export const SUPPORTED_LANGUAGES: readonly LanguageDefinition[] = [
{
code: 'en',
id: 'en-US',
fullName: 'English',
},
{
code: 'zh',
id: 'zh-CN',
fullName: 'Chinese',
},
{
code: 'ru',
id: 'ru-RU',
fullName: 'Russian',
},
{
code: 'de',
id: 'de-DE',
fullName: 'German',
},
];
/**
* Maps a locale code to its English language name.
* Used for LLM output language instructions.
*/
export function getLanguageNameFromLocale(locale: SupportedLanguage): string {
const lang = SUPPORTED_LANGUAGES.find((l) => l.code === locale);
return lang?.fullName || 'English';
}

View File

@@ -102,8 +102,8 @@ export default {
'Theme "{{themeName}}" not found.': 'Theme "{{themeName}}" not found.',
'Theme "{{themeName}}" not found in selected scope.':
'Theme "{{themeName}}" not found in selected scope.',
'clear the screen and conversation history':
'clear the screen and conversation history',
'Clear conversation history and free up context':
'Clear conversation history and free up context',
'Compresses the context by replacing it with a summary.':
'Compresses the context by replacing it with a summary.',
'open full Qwen Code documentation in your browser':
@@ -258,6 +258,8 @@ export default {
', Tab to change focus': ', Tab to change focus',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.',
'The command "/{{command}}" is not supported in non-interactive mode.':
'The command "/{{command}}" is not supported in non-interactive mode.',
// ============================================================================
// Settings Labels
// ============================================================================
@@ -590,6 +592,12 @@ export default {
'No conversation found to summarize.': 'No conversation found to summarize.',
'Failed to generate project context summary: {{error}}':
'Failed to generate project context summary: {{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'Saved project summary to {{filePathForDisplay}}.',
'Saving project summary...': 'Saving project summary...',
'Generating project summary...': 'Generating project summary...',
'Failed to generate summary - no text content received from LLM response':
'Failed to generate summary - no text content received from LLM response',
// ============================================================================
// Commands - Model
@@ -604,9 +612,10 @@ export default {
// ============================================================================
// Commands - Clear
// ============================================================================
'Clearing terminal and resetting chat.':
'Clearing terminal and resetting chat.',
'Clearing terminal.': 'Clearing terminal.',
'Starting a new session, resetting chat, and clearing terminal.':
'Starting a new session, resetting chat, and clearing terminal.',
'Starting a new session and clearing.':
'Starting a new session and clearing.',
// ============================================================================
// Commands - Compress
@@ -927,192 +936,138 @@ export default {
// ============================================================================
'Waiting for user confirmation...': 'Waiting for user confirmation...',
'(esc to cancel, {{time}})': '(esc to cancel, {{time}})',
"I'm Feeling Lucky": "I'm Feeling Lucky",
'Shipping awesomeness... ': 'Shipping awesomeness... ',
'Painting the serifs back on...': 'Painting the serifs back on...',
'Navigating the slime mold...': 'Navigating the slime mold...',
'Consulting the digital spirits...': 'Consulting the digital spirits...',
'Reticulating splines...': 'Reticulating splines...',
'Warming up the AI hamsters...': 'Warming up the AI hamsters...',
'Asking the magic conch shell...': 'Asking the magic conch shell...',
'Generating witty retort...': 'Generating witty retort...',
'Polishing the algorithms...': 'Polishing the algorithms...',
"Don't rush perfection (or my code)...":
// ============================================================================
// Loading Phrases
// ============================================================================
WITTY_LOADING_PHRASES: [
"I'm Feeling Lucky",
'Shipping awesomeness... ',
'Painting the serifs back on...',
'Navigating the slime mold...',
'Consulting the digital spirits...',
'Reticulating splines...',
'Warming up the AI hamsters...',
'Asking the magic conch shell...',
'Generating witty retort...',
'Polishing the algorithms...',
"Don't rush perfection (or my code)...",
'Brewing fresh bytes...': 'Brewing fresh bytes...',
'Counting electrons...': 'Counting electrons...',
'Engaging cognitive processors...': 'Engaging cognitive processors...',
'Checking for syntax errors in the universe...':
'Brewing fresh bytes...',
'Counting electrons...',
'Engaging cognitive processors...',
'Checking for syntax errors in the universe...',
'One moment, optimizing humor...': 'One moment, optimizing humor...',
'Shuffling punchlines...': 'Shuffling punchlines...',
'Untangling neural nets...': 'Untangling neural nets...',
'Compiling brilliance...': 'Compiling brilliance...',
'Loading wit.exe...': 'Loading wit.exe...',
'Summoning the cloud of wisdom...': 'Summoning the cloud of wisdom...',
'Preparing a witty response...': 'Preparing a witty response...',
"Just a sec, I'm debugging reality...":
'One moment, optimizing humor...',
'Shuffling punchlines...',
'Untangling neural nets...',
'Compiling brilliance...',
'Loading wit.exe...',
'Summoning the cloud of wisdom...',
'Preparing a witty response...',
"Just a sec, I'm debugging reality...",
'Confuzzling the options...': 'Confuzzling the options...',
'Tuning the cosmic frequencies...': 'Tuning the cosmic frequencies...',
'Crafting a response worthy of your patience...':
'Confuzzling the options...',
'Tuning the cosmic frequencies...',
'Crafting a response worthy of your patience...',
'Compiling the 1s and 0s...': 'Compiling the 1s and 0s...',
'Resolving dependencies... and existential crises...':
'Compiling the 1s and 0s...',
'Resolving dependencies... and existential crises...',
'Defragmenting memories... both RAM and personal...':
'Defragmenting memories... both RAM and personal...',
'Rebooting the humor module...': 'Rebooting the humor module...',
'Caching the essentials (mostly cat memes)...':
'Rebooting the humor module...',
'Caching the essentials (mostly cat memes)...',
'Optimizing for ludicrous speed': 'Optimizing for ludicrous speed',
"Swapping bits... don't tell the bytes...":
'Optimizing for ludicrous speed',
"Swapping bits... don't tell the bytes...",
'Garbage collecting... be right back...':
'Garbage collecting... be right back...',
'Assembling the interwebs...': 'Assembling the interwebs...',
'Converting coffee into code...': 'Converting coffee into code...',
'Updating the syntax for reality...': 'Updating the syntax for reality...',
'Rewiring the synapses...': 'Rewiring the synapses...',
'Looking for a misplaced semicolon...':
'Assembling the interwebs...',
'Converting coffee into code...',
'Updating the syntax for reality...',
'Rewiring the synapses...',
'Looking for a misplaced semicolon...',
"Greasin' the cogs of the machine...": "Greasin' the cogs of the machine...",
'Pre-heating the servers...': 'Pre-heating the servers...',
'Calibrating the flux capacitor...': 'Calibrating the flux capacitor...',
'Engaging the improbability drive...': 'Engaging the improbability drive...',
'Channeling the Force...': 'Channeling the Force...',
'Aligning the stars for optimal response...':
"Greasin' the cogs of the machine...",
'Pre-heating the servers...',
'Calibrating the flux capacitor...',
'Engaging the improbability drive...',
'Channeling the Force...',
'Aligning the stars for optimal response...',
'So say we all...': 'So say we all...',
'Loading the next great idea...': 'Loading the next great idea...',
"Just a moment, I'm in the zone...": "Just a moment, I'm in the zone...",
'Preparing to dazzle you with brilliance...':
'So say we all...',
'Loading the next great idea...',
"Just a moment, I'm in the zone...",
'Preparing to dazzle you with brilliance...',
"Just a tick, I'm polishing my wit...":
"Just a tick, I'm polishing my wit...",
"Hold tight, I'm crafting a masterpiece...":
"Hold tight, I'm crafting a masterpiece...",
"Just a jiffy, I'm debugging the universe...":
"Just a jiffy, I'm debugging the universe...",
"Just a moment, I'm aligning the pixels...":
"Just a moment, I'm aligning the pixels...",
"Just a sec, I'm optimizing the humor...":
"Just a sec, I'm optimizing the humor...",
"Just a moment, I'm tuning the algorithms...":
"Just a moment, I'm tuning the algorithms...",
'Warp speed engaged...': 'Warp speed engaged...',
'Mining for more Dilithium crystals...':
'Warp speed engaged...',
'Mining for more Dilithium crystals...',
"Don't panic...": "Don't panic...",
'Following the white rabbit...': 'Following the white rabbit...',
'The truth is in here... somewhere...':
"Don't panic...",
'Following the white rabbit...',
'The truth is in here... somewhere...',
'Blowing on the cartridge...': 'Blowing on the cartridge...',
'Loading... Do a barrel roll!': 'Loading... Do a barrel roll!',
'Waiting for the respawn...': 'Waiting for the respawn...',
'Finishing the Kessel Run in less than 12 parsecs...':
'Blowing on the cartridge...',
'Loading... Do a barrel roll!',
'Waiting for the respawn...',
'Finishing the Kessel Run in less than 12 parsecs...',
"The cake is not a lie, it's just still loading...":
"The cake is not a lie, it's just still loading...",
'Fiddling with the character creation screen...':
'Fiddling with the character creation screen...',
"Just a moment, I'm finding the right meme...":
"Just a moment, I'm finding the right meme...",
"Pressing 'A' to continue...": "Pressing 'A' to continue...",
'Herding digital cats...': 'Herding digital cats...',
'Polishing the pixels...': 'Polishing the pixels...',
'Finding a suitable loading screen pun...':
"Pressing 'A' to continue...",
'Herding digital cats...',
'Polishing the pixels...',
'Finding a suitable loading screen pun...',
'Distracting you with this witty phrase...':
'Distracting you with this witty phrase...',
'Almost there... probably...': 'Almost there... probably...',
'Our hamsters are working as fast as they can...':
'Almost there... probably...',
'Our hamsters are working as fast as they can...',
'Giving Cloudy a pat on the head...': 'Giving Cloudy a pat on the head...',
'Petting the cat...': 'Petting the cat...',
'Rickrolling my boss...': 'Rickrolling my boss...',
'Never gonna give you up, never gonna let you down...':
'Giving Cloudy a pat on the head...',
'Petting the cat...',
'Rickrolling my boss...',
'Never gonna give you up, never gonna let you down...',
'Slapping the bass...': 'Slapping the bass...',
'Tasting the snozberries...': 'Tasting the snozberries...',
"I'm going the distance, I'm going for speed...":
'Slapping the bass...',
'Tasting the snozberries...',
"I'm going the distance, I'm going for speed...",
'Is this the real life? Is this just fantasy?...':
'Is this the real life? Is this just fantasy?...',
"I've got a good feeling about this...":
"I've got a good feeling about this...",
'Poking the bear...': 'Poking the bear...',
'Doing research on the latest memes...':
'Poking the bear...',
'Doing research on the latest memes...',
'Figuring out how to make this more witty...':
'Figuring out how to make this more witty...',
'Hmmm... let me think...': 'Hmmm... let me think...',
'What do you call a fish with no eyes? A fsh...':
'Hmmm... let me think...',
'What do you call a fish with no eyes? A fsh...',
'Why did the computer go to therapy? It had too many bytes...':
'Why did the computer go to therapy? It had too many bytes...',
"Why don't programmers like nature? It has too many bugs...":
"Why don't programmers like nature? It has too many bugs...",
'Why do programmers prefer dark mode? Because light attracts bugs...':
'Why do programmers prefer dark mode? Because light attracts bugs...',
'Why did the developer go broke? Because they used up all their cache...':
'Why did the developer go broke? Because they used up all their cache...',
"What can you do with a broken pencil? Nothing, it's pointless...":
"What can you do with a broken pencil? Nothing, it's pointless...",
'Applying percussive maintenance...': 'Applying percussive maintenance...',
'Searching for the correct USB orientation...':
'Applying percussive maintenance...',
'Searching for the correct USB orientation...',
'Ensuring the magic smoke stays inside the wires...':
'Ensuring the magic smoke stays inside the wires...',
'Rewriting in Rust for no particular reason...':
'Rewriting in Rust for no particular reason...',
'Trying to exit Vim...': 'Trying to exit Vim...',
'Spinning up the hamster wheel...': 'Spinning up the hamster wheel...',
"That's not a bug, it's an undocumented feature...":
'Trying to exit Vim...',
'Spinning up the hamster wheel...',
"That's not a bug, it's an undocumented feature...",
'Engage.': 'Engage.',
"I'll be back... with an answer.": "I'll be back... with an answer.",
'My other process is a TARDIS...': 'My other process is a TARDIS...',
'Communing with the machine spirit...':
'Engage.',
"I'll be back... with an answer.",
'My other process is a TARDIS...',
'Communing with the machine spirit...',
'Letting the thoughts marinate...': 'Letting the thoughts marinate...',
'Just remembered where I put my keys...':
'Letting the thoughts marinate...',
'Just remembered where I put my keys...',
'Pondering the orb...': 'Pondering the orb...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
'Pondering the orb...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
'Initiating thoughtful gaze...': 'Initiating thoughtful gaze...',
"What's a computer's favorite snack? Microchips.":
'Initiating thoughtful gaze...',
"What's a computer's favorite snack? Microchips.",
"Why do Java developers wear glasses? Because they don't C#.":
"Why do Java developers wear glasses? Because they don't C#.",
'Charging the laser... pew pew!': 'Charging the laser... pew pew!',
'Dividing by zero... just kidding!': 'Dividing by zero... just kidding!',
'Looking for an adult superviso... I mean, processing.':
'Charging the laser... pew pew!',
'Dividing by zero... just kidding!',
'Looking for an adult superviso... I mean, processing.',
'Making it go beep boop.': 'Making it go beep boop.',
'Buffering... because even AIs need a moment.':
'Making it go beep boop.',
'Buffering... because even AIs need a moment.',
'Entangling quantum particles for a faster response...':
'Entangling quantum particles for a faster response...',
'Polishing the chrome... on the algorithms.':
'Polishing the chrome... on the algorithms.',
'Are you not entertained? (Working on it!)':
'Are you not entertained? (Working on it!)',
'Summoning the code gremlins... to help, of course.':
'Summoning the code gremlins... to help, of course.',
'Just waiting for the dial-up tone to finish...':
'Just waiting for the dial-up tone to finish...',
'Recalibrating the humor-o-meter.': 'Recalibrating the humor-o-meter.',
'My other loading screen is even funnier.':
'Recalibrating the humor-o-meter.',
'My other loading screen is even funnier.',
"Pretty sure there's a cat walking on the keyboard somewhere...":
"Pretty sure there's a cat walking on the keyboard somewhere...",
'Enhancing... Enhancing... Still loading.':
'Enhancing... Enhancing... Still loading.',
"It's not a bug, it's a feature... of this loading screen.":
"It's not a bug, it's a feature... of this loading screen.",
'Have you tried turning it off and on again? (The loading screen, not me.)':
'Have you tried turning it off and on again? (The loading screen, not me.)',
'Constructing additional pylons...': 'Constructing additional pylons...',
'Constructing additional pylons...',
],
};

View File

@@ -103,8 +103,8 @@ export default {
'Theme "{{themeName}}" not found.': 'Тема "{{themeName}}" не найдена.',
'Theme "{{themeName}}" not found in selected scope.':
'Тема "{{themeName}}" не найдена в выбранной области.',
'clear the screen and conversation history':
'Очистка экрана и истории диалога',
'Clear conversation history and free up context':
'Очистить историю диалога и освободить контекст',
'Compresses the context by replacing it with a summary.':
'Сжатие контекста заменой на краткую сводку',
'open full Qwen Code documentation in your browser':
@@ -260,7 +260,8 @@ export default {
', Tab to change focus': ', Tab для смены фокуса',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'Для применения изменений необходимо перезапустить Qwen Code. Нажмите r для выхода и применения изменений.',
'The command "/{{command}}" is not supported in non-interactive mode.':
'Команда "/{{command}}" не поддерживается в неинтерактивном режиме.',
// ============================================================================
// Метки настроек
// ============================================================================
@@ -313,6 +314,7 @@ export default {
'Tool Output Truncation Lines': 'Лимит строк вывода инструментов',
'Folder Trust': 'Доверие к папке',
'Vision Model Preview': 'Визуальная модель (предпросмотр)',
'Tool Schema Compliance': 'Соответствие схеме инструмента',
// Варианты перечислений настроек
'Auto (detect from system)': 'Авто (определить из системы)',
Text: 'Текст',
@@ -341,8 +343,8 @@ export default {
'Установка предпочитаемого внешнего редактора',
'Manage extensions': 'Управление расширениями',
'List active extensions': 'Показать активные расширения',
'Update extensions. Usage: update |--all':
'Обновить расширения. Использование: update |--all',
'Update extensions. Usage: update <extension-names>|--all':
'Обновить расширения. Использование: update <extension-names>|--all',
'manage IDE integration': 'Управление интеграцией с IDE',
'check status of IDE integration': 'Проверить статус интеграции с IDE',
'install required IDE companion for {{ideName}}':
@@ -400,7 +402,8 @@ export default {
'Set LLM output language': 'Установка языка вывода LLM',
'Usage: /language ui [zh-CN|en-US]':
'Использование: /language ui [zh-CN|en-US|ru-RU]',
'Usage: /language output ': 'Использование: /language output ',
'Usage: /language output <language>':
'Использование: /language output <language>',
'Example: /language output 中文': 'Пример: /language output 中文',
'Example: /language output English': 'Пример: /language output English',
'Example: /language output 日本語': 'Пример: /language output 日本語',
@@ -417,9 +420,8 @@ export default {
'To request additional UI language packs, please open an issue on GitHub.':
'Для запроса дополнительных языковых пакетов интерфейса, пожалуйста, создайте обращение на GitHub.',
'Available options:': 'Доступные варианты:',
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
' - en-US: English': ' - en-US: Английский',
' - ru-RU: Russian': ' - ru-RU: Русский',
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
' - en-US: English': ' - en-US: Английский',
'Set UI language to Simplified Chinese (zh-CN)':
'Установить язык интерфейса на упрощенный китайский (zh-CN)',
'Set UI language to English (en-US)':
@@ -435,8 +437,8 @@ export default {
'Режим подтверждения изменен на: {{mode}}',
'Approval mode changed to: {{mode}} (saved to {{scope}} settings{{location}})':
'Режим подтверждения изменен на: {{mode}} (сохранено в настройках {{scope}}{{location}})',
'Usage: /approval-mode [--session|--user|--project]':
'Использование: /approval-mode [--session|--user|--project]',
'Usage: /approval-mode <mode> [--session|--user|--project]':
'Использование: /approval-mode <mode> [--session|--user|--project]',
'Scope subcommands do not accept additional arguments.':
'Подкоманды области не принимают дополнительных аргументов.',
'Plan mode - Analyze only, do not modify files or execute commands':
@@ -588,8 +590,8 @@ export default {
'Ошибка при экспорте диалога: {{error}}',
'Conversation shared to {{filePath}}': 'Диалог экспортирован в {{filePath}}',
'No conversation found to share.': 'Нет диалога для экспорта.',
'Share the current conversation to a markdown or json file. Usage: /chat share <путь-к-файлу>':
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <путь-к-файлу>',
'Share the current conversation to a markdown or json file. Usage: /chat share <file>':
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <файл>',
// ============================================================================
// Команды - Резюме
@@ -604,6 +606,12 @@ export default {
'Не найдено диалогов для создания сводки.',
'Failed to generate project context summary: {{error}}':
'Не удалось сгенерировать сводку контекста проекта: {{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'Сводка проекта сохранена в {{filePathForDisplay}}',
'Saving project summary...': 'Сохранение сводки проекта...',
'Generating project summary...': 'Генерация сводки проекта...',
'Failed to generate summary - no text content received from LLM response':
'Не удалось сгенерировать сводку - не получен текстовый контент из ответа LLM',
// ============================================================================
// Команды - Модель
@@ -618,8 +626,9 @@ export default {
// ============================================================================
// Команды - Очистка
// ============================================================================
'Clearing terminal and resetting chat.': 'Очистка терминала и сброс чата.',
'Clearing terminal.': 'Очистка терминала.',
'Starting a new session, resetting chat, and clearing terminal.':
'Начало новой сессии, сброс чата и очистка терминала.',
'Starting a new session and clearing.': 'Начало новой сессии и очистка.',
// ============================================================================
// Команды - Сжатие
@@ -650,8 +659,8 @@ export default {
'Команда /directory add не поддерживается в ограничительных профилях песочницы. Пожалуйста, используйте --include-directories при запуске сессии.',
"Error adding '{{path}}': {{error}}":
"Ошибка при добавлении '{{path}}': {{error}}",
'Successfully added GEMINI.md files from the following directories if there are:\n- {{directories}}':
'Успешно добавлены файлы GEMINI.md из следующих директорий (если они есть):\n- {{directories}}',
'Successfully added QWEN.md files from the following directories if there are:\n- {{directories}}':
'Успешно добавлены файлы QWEN.md из следующих директорий (если они есть):\n- {{directories}}',
'Error refreshing memory: {{error}}':
'Ошибка при обновлении памяти: {{error}}',
'Successfully added directories:\n- {{directories}}':
@@ -884,6 +893,7 @@ export default {
// Экран выхода / Статистика
// ============================================================================
'Agent powering down. Goodbye!': 'Агент завершает работу. До свидания!',
'To continue this session, run': 'Для продолжения этой сессии, выполните',
'Interaction Summary': 'Сводка взаимодействия',
'Session ID:': 'ID сессии:',
'Tool Calls:': 'Вызовы инструментов:',
@@ -943,179 +953,140 @@ export default {
'Waiting for user confirmation...':
'Ожидание подтверждения от пользователя...',
'(esc to cancel, {{time}})': '(esc для отмены, {{time}})',
"I'm Feeling Lucky": 'Мне повезёт!',
'Shipping awesomeness... ': 'Доставляем крутизну... ',
'Painting the serifs back on...': 'Рисуем засечки на буквах...',
'Navigating the slime mold...': 'Пробираемся через слизевиков..',
'Consulting the digital spirits...': 'Советуемся с цифровыми духами...',
'Reticulating splines...': 'Сглаживание сплайнов...',
'Warming up the AI hamsters...': 'Разогреваем ИИ-хомячков...',
'Asking the magic conch shell...': 'Спрашиваем волшебную ракушку...',
'Generating witty retort...': 'Генерируем остроумный ответ...',
'Polishing the algorithms...': 'Полируем алгоритмы...',
"Don't rush perfection (or my code)...":
// ============================================================================
// ============================================================================
// Loading Phrases
// ============================================================================
WITTY_LOADING_PHRASES: [
'Мне повезёт!',
'Доставляем крутизну... ',
'Рисуем засечки на буквах...',
'Пробираемся через слизевиков..',
'Советуемся с цифровыми духами...',
'Сглаживание сплайнов...',
'Разогреваем ИИ-хомячков...',
'Спрашиваем волшебную ракушку...',
'Генерируем остроумный ответ...',
'Полируем алгоритмы...',
'Не торопите совершенство (или мой код)...',
'Brewing fresh bytes...': 'Завариваем свежие байты...',
'Counting electrons...': 'Пересчитываем электроны...',
'Engaging cognitive processors...': 'Задействуем когнитивные процессоры...',
'Checking for syntax errors in the universe...':
'Завариваем свежие байты...',
'Пересчитываем электроны...',
'Задействуем когнитивные процессоры...',
'Ищем синтаксические ошибки во вселенной...',
'One moment, optimizing humor...': 'Секундочку, оптимизируем юмор...',
'Shuffling punchlines...': 'Перетасовываем панчлайны...',
'Untangling neural nets...': 'Распутаваем нейросети...',
'Compiling brilliance...': 'Компилируем гениальность...',
'Loading wit.exe...': 'Загружаем yumor.exe...',
'Summoning the cloud of wisdom...': 'Призываем облако мудрости...',
'Preparing a witty response...': 'Готовим остроумный ответ...',
"Just a sec, I'm debugging reality...": 'Секунду, идёт отладка реальности...',
'Confuzzling the options...': 'Запутываем варианты...',
'Tuning the cosmic frequencies...': 'Настраиваем космические частоты...',
'Crafting a response worthy of your patience...':
'Секундочку, оптимизируем юмор...',
'Перетасовываем панчлайны...',
'Распутаваем нейросети...',
'Компилируем гениальность...',
'Загружаем yumor.exe...',
'Призываем облако мудрости...',
'Готовим остроумный ответ...',
'Секунду, идёт отладка реальности...',
'Запутываем варианты...',
'Настраиваем космические частоты...',
'Создаем ответ, достойный вашего терпения...',
'Compiling the 1s and 0s...': 'Компилируем единички и нолики...',
'Resolving dependencies... and existential crises...':
'Компилируем единички и нолики...',
'Разрешаем зависимости... и экзистенциальные кризисы...',
'Defragmenting memories... both RAM and personal...':
'Дефрагментация памяти... и оперативной, и личной...',
'Rebooting the humor module...': 'Перезагрузка модуля юмора...',
'Caching the essentials (mostly cat memes)...':
'Перезагрузка модуля юмора...',
'Кэшируем самое важное (в основном мемы с котиками)...',
'Optimizing for ludicrous speed': 'Оптимизация для безумной скорости',
"Swapping bits... don't tell the bytes...":
'Оптимизация для безумной скорости',
'Меняем биты... только байтам не говорите...',
'Garbage collecting... be right back...': 'Сборка мусора... скоро вернусь...',
'Assembling the interwebs...': 'Сборка интернетов...',
'Converting coffee into code...': 'Превращаем кофе в код...',
'Updating the syntax for reality...': 'Обновляем синтаксис реальности...',
'Rewiring the synapses...': 'Переподключаем синапсы...',
'Looking for a misplaced semicolon...': 'Ищем лишнюю точку с запятой...',
"Greasin' the cogs of the machine...": 'Смазываем шестерёнки машины...',
'Pre-heating the servers...': 'Разогреваем серверы...',
'Calibrating the flux capacitor...': 'Калибруем потоковый накопитель...',
'Engaging the improbability drive...': 'Включаем двигатель невероятности...',
'Channeling the Force...': 'Направляем Силу...',
'Aligning the stars for optimal response...':
'Сборка мусора... скоро вернусь...',
'Сборка интернетов...',
'Превращаем кофе в код...',
'Обновляем синтаксис реальности...',
'Переподключаем синапсы...',
'Ищем лишнюю точку с запятой...',
'Смазываем шестерёнки машины...',
'Разогреваем серверы...',
'Калибруем потоковый накопитель...',
'Включаем двигатель невероятности...',
'Направляем Силу...',
'Выравниваем звёзды для оптимального ответа...',
'So say we all...': 'Так скажем мы все...',
'Loading the next great idea...': 'Загрузка следующей великой идеи...',
"Just a moment, I'm in the zone...": 'Минутку, я в потоке...',
'Preparing to dazzle you with brilliance...':
'Так скажем мы все...',
'Загрузка следующей великой идеи...',
'Минутку, я в потоке...',
'Готовлюсь ослепить вас гениальностью...',
"Just a tick, I'm polishing my wit...": 'Секунду, полирую остроумие...',
"Hold tight, I'm crafting a masterpiece...": 'Держитесь, создаю шедевр...',
"Just a jiffy, I'm debugging the universe...":
'Секунду, полирую остроумие...',
'Держитесь, создаю шедевр...',
'Мигом, отлаживаю вселенную...',
"Just a moment, I'm aligning the pixels...": 'Момент, выравниваю пиксели...',
"Just a sec, I'm optimizing the humor...": 'Секунду, оптимизирую юмор...',
"Just a moment, I'm tuning the algorithms...":
'Момент, выравниваю пиксели...',
'Секунду, оптимизирую юмор...',
'Момент, настраиваю алгоритмы...',
'Warp speed engaged...': 'Варп-скорость включена...',
'Mining for more Dilithium crystals...': 'Добываем кристаллы дилития...',
"Don't panic...": 'Без паники...',
'Following the white rabbit...': 'Следуем за белым кроликом...',
'The truth is in here... somewhere...': 'Истина где-то здесь... внутри...',
'Blowing on the cartridge...': 'Продуваем картридж...',
'Loading... Do a barrel roll!': 'Загрузка... Сделай бочку!',
'Waiting for the respawn...': 'Ждем респауна...',
'Finishing the Kessel Run in less than 12 parsecs...':
'Варп-прыжок активирован...',
'Добываем кристаллы дилития...',
'Без паники...',
'Следуем за белым кроликом...',
'Истина где-то здесь... внутри...',
'Продуваем картридж...',
'Загрузка... Сделай бочку!',
'Ждем респауна...',
'Делаем Дугу Кесселя менее чем за 12 парсеков...',
"The cake is not a lie, it's just still loading...":
'Тортик — не ложь, он просто ещё грузится...',
'Fiddling with the character creation screen...':
'Возимся с экраном создания персонажа...',
"Just a moment, I'm finding the right meme...":
'Минутку, ищу подходящий мем...',
"Pressing 'A' to continue...": "Нажимаем 'A' для продолжения...",
'Herding digital cats...': 'Пасём цифровых котов...',
'Polishing the pixels...': 'Полируем пиксели...',
'Finding a suitable loading screen pun...':
"Нажимаем 'A' для продолжения...",
'Пасём цифровых котов...',
'Полируем пиксели...',
'Ищем подходящий каламбур для экрана загрузки...',
'Distracting you with this witty phrase...':
'Отвлекаем вас этой остроумной фразой...',
'Almost there... probably...': 'Почти готово... вроде...',
'Our hamsters are working as fast as they can...':
'Почти готово... вроде...',
'Наши хомячки работают изо всех сил...',
'Giving Cloudy a pat on the head...': 'Гладим Облачко по голове...',
'Petting the cat...': 'Гладим кота...',
'Rickrolling my boss...': 'Рикроллим начальника...',
'Never gonna give you up, never gonna let you down...':
'Гладим Облачко по голове...',
'Гладим кота...',
'Рикроллим начальника...',
'Never gonna give you up, never gonna let you down...',
'Slapping the bass...': 'Лабаем бас-гитару...',
'Tasting the snozberries...': 'Пробуем снузберри на вкус...',
"I'm going the distance, I'm going for speed...":
'Лабаем бас-гитару...',
'Пробуем снузберри на вкус...',
'Иду до конца, иду на скорость...',
'Is this the real life? Is this just fantasy?...':
'Is this the real life? Is this just fantasy?...',
"I've got a good feeling about this...": 'У меня хорошее предчувствие...',
'Poking the bear...': 'Дразним медведя... (Не лезь...)',
'Doing research on the latest memes...': 'Изучаем свежие мемы...',
'Figuring out how to make this more witty...':
'У меня хорошее предчувствие...',
'Дразним медведя... (Не лезь...)',
'Изучаем свежие мемы...',
'Думаем, как сделать это остроумнее...',
'Hmmm... let me think...': 'Хмм... дайте подумать...',
'What do you call a fish with no eyes? A fsh...':
'Хмм... дайте подумать...',
'Как называется бумеранг, который не возвращается? Палка...',
'Why did the computer go to therapy? It had too many bytes...':
'Почему компьютер простудился? Потому что оставил окна открытыми...',
"Why don't programmers like nature? It has too many bugs...":
'Почему программисты не любят гулять на улице? Там среда не настроена...',
'Why do programmers prefer dark mode? Because light attracts bugs...':
'Почему программисты предпочитают тёмную тему? Потому что в темноте не видно багов...',
'Why did the developer go broke? Because they used up all their cache...':
'Почему разработчик разорился? Потому что потратил весь свой кэш...',
"What can you do with a broken pencil? Nothing, it's pointless...":
'Что можно делать со сломанным карандашом? Ничего — он тупой...',
'Applying percussive maintenance...': 'Провожу настройку методом тыка...',
'Searching for the correct USB orientation...':
'Провожу настройку методом тыка...',
'Ищем, какой стороной вставлять флешку...',
'Ensuring the magic smoke stays inside the wires...':
'Следим, чтобы волшебный дым не вышел из проводов...',
'Rewriting in Rust for no particular reason...':
'Переписываем всё на Rust без особой причины...',
'Trying to exit Vim...': 'Пытаемся выйти из Vim...',
'Spinning up the hamster wheel...': 'Раскручиваем колесо для хомяка...',
"That's not a bug, it's an undocumented feature...": 'Это не баг, а фича...',
'Engage.': 'Поехали!',
"I'll be back... with an answer.": 'Я вернусь... с ответом.',
'My other process is a TARDIS...': 'Мой другой процесс — это ТАРДИС...',
'Communing with the machine spirit...': 'Общаемся с духом машины...',
'Letting the thoughts marinate...': 'Даем мыслям замариноваться...',
'Just remembered where I put my keys...':
'Пытаемся выйти из Vim...',
'Раскручиваем колесо для хомяка...',
'Это не баг, а фича...',
'Поехали!',
'Я вернусь... с ответом.',
'Мой другой процесс — это ТАРДИС...',
'Общаемся с духом машины...',
'Даем мыслям замариноваться...',
'Только что вспомнил, куда положил ключи...',
'Pondering the orb...': 'Размышляю над сферой...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
'Я видел такое, во что вы, люди, просто не поверите... например, пользователя, читающего сообщения загрузки.',
'Initiating thoughtful gaze...': 'Инициируем задумчивый взгляд...',
"What's a computer's favorite snack? Microchips.":
'Размышляю над сферой...',
'Я видел такое, что вам, людям, и не снилось... пользователя, читающего эти сообщения.',
'Инициируем задумчивый взгляд...',
'Что сервер заказывает в баре? Пинг-коладу.',
"Why do Java developers wear glasses? Because they don't C#.":
'Почему Java-разработчики не убираются дома? Они ждут сборщик мусора...',
'Charging the laser... pew pew!': 'Заряжаем лазер... пиу-пиу!',
'Dividing by zero... just kidding!': 'Делим на ноль... шучу!',
'Looking for an adult superviso... I mean, processing.':
'Заряжаем лазер... пиу-пиу!',
'Делим на ноль... шучу!',
'Ищу взрослых для присмот... в смысле, обрабатываю.',
'Making it go beep boop.': 'Делаем бип-буп.',
'Buffering... because even AIs need a moment.':
'Буферизация... даже ИИ нужно мгновение.',
'Entangling quantum particles for a faster response...':
'Делаем бип-буп.',
'Буферизация... даже ИИ нужно время подумать.',
'Запутываем квантовые частицы для быстрого ответа...',
'Polishing the chrome... on the algorithms.':
'Полируем хром... на алгоритмах.',
'Are you not entertained? (Working on it!)':
'Вы ещё не развлеклись?! Разве вы не за этим сюда пришли?!',
'Summoning the code gremlins... to help, of course.':
'Призываем гремлинов кода... для помощи, конечно же.',
'Just waiting for the dial-up tone to finish...':
'Ждем, пока закончится звук dial-up модема...',
'Recalibrating the humor-o-meter.': 'Перекалибровка юморометра.',
'My other loading screen is even funnier.':
'Перекалибровка юморометра.',
'Мой другой экран загрузки ещё смешнее.',
"Pretty sure there's a cat walking on the keyboard somewhere...":
'Кажется, где-то по клавиатуре гуляет кот...',
'Enhancing... Enhancing... Still loading.':
'Улучшаем... Ещё улучшаем... Всё ещё грузится.',
"It's not a bug, it's a feature... of this loading screen.":
'Это не баг, это фича... экрана загрузки.',
'Have you tried turning it off and on again? (The loading screen, not me.)':
'Пробовали выключить и включить снова? (Экран загрузки, не меня!)',
'Constructing additional pylons...': 'Нужно построить больше пилонов...',
'Нужно построить больше пилонов...',
],
};

View File

@@ -101,7 +101,7 @@ export default {
'Theme "{{themeName}}" not found.': '未找到主题 "{{themeName}}"。',
'Theme "{{themeName}}" not found in selected scope.':
'在所选作用域中未找到主题 "{{themeName}}"。',
'clear the screen and conversation history': '清屏并清除对话历史',
'Clear conversation history and free up context': '清除对话历史并释放上下文',
'Compresses the context by replacing it with a summary.':
'通过用摘要替换来压缩上下文',
'open full Qwen Code documentation in your browser':
@@ -249,6 +249,8 @@ export default {
', Tab to change focus': 'Tab 切换焦点',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'要查看更改,必须重启 Qwen Code。按 r 退出并立即应用更改。',
'The command "/{{command}}" is not supported in non-interactive mode.':
'不支持在非交互模式下使用命令 "/{{command}}"。',
// ============================================================================
// Settings Labels
// ============================================================================
@@ -560,6 +562,12 @@ export default {
'No conversation found to summarize.': '未找到要总结的对话',
'Failed to generate project context summary: {{error}}':
'生成项目上下文摘要失败:{{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'项目摘要已保存到 {{filePathForDisplay}}',
'Saving project summary...': '正在保存项目摘要...',
'Generating project summary...': '正在生成项目摘要...',
'Failed to generate summary - no text content received from LLM response':
'生成摘要失败 - 未从 LLM 响应中接收到文本内容',
// ============================================================================
// Commands - Model
@@ -573,8 +581,9 @@ export default {
// ============================================================================
// Commands - Clear
// ============================================================================
'Clearing terminal and resetting chat.': '正在清屏并重置聊天',
'Clearing terminal.': '正在清屏',
'Starting a new session, resetting chat, and clearing terminal.':
'正在开始新会话,重置聊天并清屏',
'Starting a new session and clearing.': '正在开始新会话并清屏。',
// ============================================================================
// Commands - Compress
@@ -880,165 +889,39 @@ export default {
// ============================================================================
'Waiting for user confirmation...': '等待用户确认...',
'(esc to cancel, {{time}})': '(按 esc 取消,{{time}}',
"I'm Feeling Lucky": '我感觉很幸运',
'Shipping awesomeness... ': '正在运送精彩内容... ',
'Painting the serifs back on...': '正在重新绘制衬线...',
'Navigating the slime mold...': '正在导航粘液霉菌...',
'Consulting the digital spirits...': '正在咨询数字精灵...',
'Reticulating splines...': '正在网格化样条曲线...',
'Warming up the AI hamsters...': '正在预热 AI 仓鼠...',
'Asking the magic conch shell...': '正在询问魔法海螺壳...',
'Generating witty retort...': '正在生成机智的反驳...',
'Polishing the algorithms...': '正在打磨算法...',
"Don't rush perfection (or my code)...": '不要急于追求完美(或我的代码)...',
'Brewing fresh bytes...': '正在酿造新鲜字节...',
'Counting electrons...': '正在计算电子...',
'Engaging cognitive processors...': '正在启动认知处理器...',
'Checking for syntax errors in the universe...':
'正在检查宇宙中的语法错误...',
'One moment, optimizing humor...': '稍等片刻,正在优化幽默感...',
'Shuffling punchlines...': '正在洗牌笑点...',
'Untangling neural nets...': '正在解开神经网络...',
'Compiling brilliance...': '正在编译智慧...',
'Loading wit.exe...': '正在加载 wit.exe...',
'Summoning the cloud of wisdom...': '正在召唤智慧云...',
'Preparing a witty response...': '正在准备机智的回复...',
"Just a sec, I'm debugging reality...": '稍等片刻,我正在调试现实...',
'Confuzzling the options...': '正在混淆选项...',
'Tuning the cosmic frequencies...': '正在调谐宇宙频率...',
'Crafting a response worthy of your patience...':
'正在制作值得您耐心等待的回复...',
'Compiling the 1s and 0s...': '正在编译 1 和 0...',
'Resolving dependencies... and existential crises...':
'正在解决依赖关系...和存在主义危机...',
'Defragmenting memories... both RAM and personal...':
'正在整理记忆碎片...包括 RAM 和个人记忆...',
'Rebooting the humor module...': '正在重启幽默模块...',
'Caching the essentials (mostly cat memes)...':
'正在缓存必需品(主要是猫咪表情包)...',
'Optimizing for ludicrous speed': '正在优化到荒谬的速度',
"Swapping bits... don't tell the bytes...": '正在交换位...不要告诉字节...',
'Garbage collecting... be right back...': '正在垃圾回收...马上回来...',
'Assembling the interwebs...': '正在组装互联网...',
'Converting coffee into code...': '正在将咖啡转换为代码...',
'Updating the syntax for reality...': '正在更新现实的语法...',
'Rewiring the synapses...': '正在重新连接突触...',
'Looking for a misplaced semicolon...': '正在寻找放错位置的分号...',
"Greasin' the cogs of the machine...": '正在给机器的齿轮上油...',
'Pre-heating the servers...': '正在预热服务器...',
'Calibrating the flux capacitor...': '正在校准通量电容器...',
'Engaging the improbability drive...': '正在启动不可能性驱动器...',
'Channeling the Force...': '正在引导原力...',
'Aligning the stars for optimal response...': '正在对齐星星以获得最佳回复...',
'So say we all...': '我们都说...',
'Loading the next great idea...': '正在加载下一个伟大的想法...',
"Just a moment, I'm in the zone...": '稍等片刻,我正进入状态...',
'Preparing to dazzle you with brilliance...': '正在准备用智慧让您眼花缭乱...',
"Just a tick, I'm polishing my wit...": '稍等片刻,我正在打磨我的智慧...',
"Hold tight, I'm crafting a masterpiece...": '请稍等,我正在制作杰作...',
"Just a jiffy, I'm debugging the universe...": '稍等片刻,我正在调试宇宙...',
"Just a moment, I'm aligning the pixels...": '稍等片刻,我正在对齐像素...',
"Just a sec, I'm optimizing the humor...": '稍等片刻,我正在优化幽默感...',
"Just a moment, I'm tuning the algorithms...": '稍等片刻,我正在调整算法...',
'Warp speed engaged...': '曲速已启动...',
'Mining for more Dilithium crystals...': '正在挖掘更多二锂晶体...',
"Don't panic...": '不要惊慌...',
'Following the white rabbit...': '正在跟随白兔...',
'The truth is in here... somewhere...': '真相在这里...某个地方...',
'Blowing on the cartridge...': '正在吹卡带...',
'Loading... Do a barrel roll!': '正在加载...做个桶滚!',
'Waiting for the respawn...': '等待重生...',
'Finishing the Kessel Run in less than 12 parsecs...':
'正在以不到 12 秒差距完成凯塞尔航线...',
"The cake is not a lie, it's just still loading...":
'蛋糕不是谎言,只是还在加载...',
'Fiddling with the character creation screen...': '正在摆弄角色创建界面...',
"Just a moment, I'm finding the right meme...":
'稍等片刻,我正在寻找合适的表情包...',
"Pressing 'A' to continue...": "按 'A' 继续...",
'Herding digital cats...': '正在放牧数字猫...',
'Polishing the pixels...': '正在打磨像素...',
'Finding a suitable loading screen pun...': '正在寻找合适的加载屏幕双关语...',
'Distracting you with this witty phrase...':
'正在用这个机智的短语分散您的注意力...',
'Almost there... probably...': '快到了...可能...',
'Our hamsters are working as fast as they can...':
'我们的仓鼠正在尽可能快地工作...',
'Giving Cloudy a pat on the head...': '正在拍拍 Cloudy 的头...',
'Petting the cat...': '正在抚摸猫咪...',
'Rickrolling my boss...': '正在 Rickroll 我的老板...',
'Never gonna give you up, never gonna let you down...':
'永远不会放弃你,永远不会让你失望...',
'Slapping the bass...': '正在拍打低音...',
'Tasting the snozberries...': '正在品尝 snozberries...',
"I'm going the distance, I'm going for speed...":
'我要走得更远,我要追求速度...',
'Is this the real life? Is this just fantasy?...':
'这是真实的生活吗?还是只是幻想?...',
"I've got a good feeling about this...": '我对这个感觉很好...',
'Poking the bear...': '正在戳熊...',
'Doing research on the latest memes...': '正在研究最新的表情包...',
'Figuring out how to make this more witty...': '正在想办法让这更有趣...',
'Hmmm... let me think...': '嗯...让我想想...',
'What do you call a fish with no eyes? A fsh...':
'没有眼睛的鱼叫什么?一条鱼...',
'Why did the computer go to therapy? It had too many bytes...':
'为什么电脑去看心理医生?因为它有太多字节...',
"Why don't programmers like nature? It has too many bugs...":
'为什么程序员不喜欢大自然?因为虫子太多了...',
'Why do programmers prefer dark mode? Because light attracts bugs...':
'为什么程序员喜欢暗色模式?因为光会吸引虫子...',
'Why did the developer go broke? Because they used up all their cache...':
'为什么开发者破产了?因为他们用完了所有缓存...',
"What can you do with a broken pencil? Nothing, it's pointless...":
'你能用断了的铅笔做什么?什么都不能,因为它没有笔尖...',
'Applying percussive maintenance...': '正在应用敲击维护...',
'Searching for the correct USB orientation...': '正在寻找正确的 USB 方向...',
'Ensuring the magic smoke stays inside the wires...':
'确保魔法烟雾留在电线内...',
'Rewriting in Rust for no particular reason...':
'正在用 Rust 重写,没有特别的原因...',
'Trying to exit Vim...': '正在尝试退出 Vim...',
'Spinning up the hamster wheel...': '正在启动仓鼠轮...',
"That's not a bug, it's an undocumented feature...":
'这不是一个错误,这是一个未记录的功能...',
'Engage.': '启动。',
"I'll be back... with an answer.": '我会回来的...带着答案。',
'My other process is a TARDIS...': '我的另一个进程是 TARDIS...',
'Communing with the machine spirit...': '正在与机器精神交流...',
'Letting the thoughts marinate...': '让想法慢慢酝酿...',
'Just remembered where I put my keys...': '刚刚想起我把钥匙放在哪里了...',
'Pondering the orb...': '正在思考球体...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
'我见过你们不会相信的事情...比如一个阅读加载消息的用户。',
'Initiating thoughtful gaze...': '正在启动深思凝视...',
"What's a computer's favorite snack? Microchips.":
'电脑最喜欢的零食是什么?微芯片。',
"Why do Java developers wear glasses? Because they don't C#.":
'为什么 Java 开发者戴眼镜?因为他们不会 C#。',
'Charging the laser... pew pew!': '正在给激光充电...砰砰!',
'Dividing by zero... just kidding!': '除以零...只是开玩笑!',
'Looking for an adult superviso... I mean, processing.':
'正在寻找成人监督...我是说,处理中。',
'Making it go beep boop.': '让它发出哔哔声。',
'Buffering... because even AIs need a moment.':
'正在缓冲...因为即使是 AI 也需要片刻。',
'Entangling quantum particles for a faster response...':
'正在纠缠量子粒子以获得更快的回复...',
'Polishing the chrome... on the algorithms.': '正在打磨铬...在算法上。',
'Are you not entertained? (Working on it!)': '你不觉得有趣吗?(正在努力!)',
'Summoning the code gremlins... to help, of course.':
'正在召唤代码小精灵...当然是来帮忙的。',
'Just waiting for the dial-up tone to finish...': '只是等待拨号音结束...',
'Recalibrating the humor-o-meter.': '正在重新校准幽默计。',
'My other loading screen is even funnier.': '我的另一个加载屏幕更有趣。',
"Pretty sure there's a cat walking on the keyboard somewhere...":
'很确定有只猫在某个地方键盘上走...',
'Enhancing... Enhancing... Still loading.':
'正在增强...正在增强...仍在加载。',
"It's not a bug, it's a feature... of this loading screen.":
'这不是一个错误,这是一个功能...这个加载屏幕的功能。',
'Have you tried turning it off and on again? (The loading screen, not me.)':
'你试过把它关掉再打开吗?(加载屏幕,不是我。)',
'Constructing additional pylons...': '正在建造额外的能量塔...',
WITTY_LOADING_PHRASES: [
// --- 职场搬砖系列 ---
'正在努力搬砖,请稍候...',
'老板在身后,快加载啊!',
'头发掉光前,一定能加载完...',
'服务器正在深呼吸,准备放大招...',
'正在向服务器投喂咖啡...',
// --- 大厂黑话系列 ---
'正在赋能全链路,寻找关键抓手...',
'正在降本增效,优化加载路径...',
'正在打破部门壁垒,沉淀方法论...',
'正在拥抱变化,迭代核心价值...',
'正在对齐颗粒度,打磨底层逻辑...',
'大力出奇迹,正在强行加载...',
// --- 程序员自嘲系列 ---
'只要我不写代码,代码就没有 Bug...',
'正在把 Bug 转化为 Feature...',
'只要我不尴尬Bug 就追不上我...',
'正在试图理解去年的自己写了什么...',
'正在猿力觉醒中,请耐心等待...',
// --- 合作愉快系列 ---
'正在询问产品经理:这需求是真的吗?',
'正在给产品经理画饼,请稍等...',
// --- 温暖治愈系列 ---
'每一行代码,都在努力让世界变得更好一点点...',
'每一个伟大的想法,都值得这份耐心的等待...',
'别急,美好的事物总是需要一点时间去酝酿...',
'愿你的代码永无 Bug愿你的梦想终将成真...',
'哪怕只有 0.1% 的进度,也是在向目标靠近...',
'加载的是字节,承载的是对技术的热爱...',
],
};

View File

@@ -20,8 +20,7 @@ import type {
CLIControlSetModelRequest,
CLIMcpServerConfig,
} from '../../types.js';
import { CommandService } from '../../../services/CommandService.js';
import { BuiltinCommandLoader } from '../../../services/BuiltinCommandLoader.js';
import { getAvailableCommands } from '../../../nonInteractiveCliCommands.js';
import {
MCPServerConfig,
AuthProviderType,
@@ -407,7 +406,7 @@ export class SystemController extends BaseController {
}
/**
* Load slash command names using CommandService
* Load slash command names using getAvailableCommands
*
* @param signal - AbortSignal to respect for cancellation
* @returns Promise resolving to array of slash command names
@@ -418,21 +417,14 @@ export class SystemController extends BaseController {
}
try {
const service = await CommandService.create(
[new BuiltinCommandLoader(this.context.config)],
signal,
);
const commands = await getAvailableCommands(this.context.config, signal);
if (signal.aborted) {
return [];
}
const names = new Set<string>();
const commands = service.getCommands();
for (const command of commands) {
names.add(command.name);
}
return Array.from(names).sort();
// Extract command names and sort
return commands.map((cmd) => cmd.name).sort();
} catch (error) {
// Check if the error is due to abort
if (signal.aborted) {

View File

@@ -68,6 +68,7 @@ describe('runNonInteractive', () => {
let mockShutdownTelemetry: Mock;
let consoleErrorSpy: MockInstance;
let processStdoutSpy: MockInstance;
let processStderrSpy: MockInstance;
let mockGeminiClient: {
sendMessageStream: Mock;
getChatRecordingService: Mock;
@@ -86,6 +87,9 @@ describe('runNonInteractive', () => {
processStdoutSpy = vi
.spyOn(process.stdout, 'write')
.mockImplementation(() => true);
processStderrSpy = vi
.spyOn(process.stderr, 'write')
.mockImplementation(() => true);
vi.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit(${code}) called`);
});
@@ -139,6 +143,8 @@ describe('runNonInteractive', () => {
setModel: vi.fn(async (model: string) => {
currentModel = model;
}),
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
isInteractive: vi.fn().mockReturnValue(false),
} as unknown as Config;
mockSettings = {
@@ -852,7 +858,7 @@ describe('runNonInteractive', () => {
expect(processStdoutSpy).toHaveBeenCalledWith('Response from command');
});
it('should throw FatalInputError if a command requires confirmation', async () => {
it('should handle command that requires confirmation by returning early', async () => {
const mockCommand = {
name: 'confirm',
description: 'a command that needs confirmation',
@@ -864,15 +870,16 @@ describe('runNonInteractive', () => {
};
mockGetCommands.mockReturnValue([mockCommand]);
await expect(
runNonInteractive(
mockConfig,
mockSettings,
'/confirm',
'prompt-id-confirm',
),
).rejects.toThrow(
'Exiting due to a confirmation prompt requested by the command.',
await runNonInteractive(
mockConfig,
mockSettings,
'/confirm',
'prompt-id-confirm',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.\n',
);
});
@@ -909,7 +916,30 @@ describe('runNonInteractive', () => {
expect(processStdoutSpy).toHaveBeenCalledWith('Response to unknown');
});
it('should throw for unhandled command result types', async () => {
it('should handle known but unsupported slash commands like /help by returning early', async () => {
// Mock a built-in command that exists but is not in the allowed list
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
await runNonInteractive(
mockConfig,
mockSettings,
'/help',
'prompt-id-help',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'The command "/help" is not supported in non-interactive mode.\n',
);
});
it('should handle unhandled command result types by returning early with error', async () => {
const mockCommand = {
name: 'noaction',
description: 'unhandled type',
@@ -920,15 +950,16 @@ describe('runNonInteractive', () => {
};
mockGetCommands.mockReturnValue([mockCommand]);
await expect(
runNonInteractive(
mockConfig,
mockSettings,
'/noaction',
'prompt-id-unhandled',
),
).rejects.toThrow(
'Exiting due to command result that is not supported in non-interactive mode.',
await runNonInteractive(
mockConfig,
mockSettings,
'/noaction',
'prompt-id-unhandled',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'Unknown command result type: unhandled\n',
);
});

View File

@@ -42,6 +42,55 @@ import {
computeUsageFromMetrics,
} from './utils/nonInteractiveHelpers.js';
/**
* Emits a final message for slash command results.
* Note: systemMessage should already be emitted before calling this function.
*/
async function emitNonInteractiveFinalMessage(params: {
message: string;
isError: boolean;
adapter?: JsonOutputAdapterInterface;
config: Config;
startTimeMs: number;
}): Promise<void> {
const { message, isError, adapter, config } = params;
if (!adapter) {
// Text output mode: write directly to stdout/stderr
const target = isError ? process.stderr : process.stdout;
target.write(`${message}\n`);
return;
}
// JSON output mode: emit assistant message and result
// (systemMessage should already be emitted by caller)
adapter.startAssistantMessage();
adapter.processEvent({
type: GeminiEventType.Content,
value: message,
} as unknown as Parameters<JsonOutputAdapterInterface['processEvent']>[0]);
adapter.finalizeAssistantMessage();
const metrics = uiTelemetryService.getMetrics();
const usage = computeUsageFromMetrics(metrics);
const outputFormat = config.getOutputFormat();
const stats =
outputFormat === OutputFormat.JSON
? uiTelemetryService.getMetrics()
: undefined;
adapter.emitResult({
isError,
durationMs: Date.now() - params.startTimeMs,
apiDurationMs: 0,
numTurns: 0,
errorMessage: isError ? message : undefined,
usage,
stats,
summary: message,
});
}
/**
* Provides optional overrides for `runNonInteractive` execution.
*
@@ -115,6 +164,16 @@ export async function runNonInteractive(
process.on('SIGINT', shutdownHandler);
process.on('SIGTERM', shutdownHandler);
// Emit systemMessage first (always the first message in JSON mode)
if (adapter) {
const systemMessage = await buildSystemMessage(
config,
sessionId,
permissionMode,
);
adapter.emitMessage(systemMessage);
}
let initialPartList: PartListUnion | null = extractPartsFromUserMessage(
options.userMessage,
);
@@ -128,10 +187,45 @@ export async function runNonInteractive(
config,
settings,
);
if (slashCommandResult) {
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
initialPartList = slashCommandResult as PartListUnion;
slashHandled = true;
switch (slashCommandResult.type) {
case 'submit_prompt':
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
initialPartList = slashCommandResult.content;
slashHandled = true;
break;
case 'message': {
// systemMessage already emitted above
await emitNonInteractiveFinalMessage({
message: slashCommandResult.content,
isError: slashCommandResult.messageType === 'error',
adapter,
config,
startTimeMs: startTime,
});
return;
}
case 'stream_messages':
throw new FatalInputError(
'Stream messages mode is not supported in non-interactive CLI',
);
case 'unsupported': {
await emitNonInteractiveFinalMessage({
message: slashCommandResult.reason,
isError: true,
adapter,
config,
startTimeMs: startTime,
});
return;
}
case 'no_command':
break;
default: {
const _exhaustive: never = slashCommandResult;
throw new FatalInputError(
`Unhandled slash command result type: ${(_exhaustive as { type: string }).type}`,
);
}
}
}
@@ -163,15 +257,6 @@ export async function runNonInteractive(
const initialParts = normalizePartList(initialPartList);
let currentMessages: Content[] = [{ role: 'user', parts: initialParts }];
if (adapter) {
const systemMessage = await buildSystemMessage(
config,
sessionId,
permissionMode,
);
adapter.emitMessage(systemMessage);
}
let isFirstTurn = true;
while (true) {
turnCount++;

View File

@@ -0,0 +1,242 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { handleSlashCommand } from './nonInteractiveCliCommands.js';
import type { Config } from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from './config/settings.js';
import { CommandKind } from './ui/commands/types.js';
// Mock the CommandService
const mockGetCommands = vi.hoisted(() => vi.fn());
const mockCommandServiceCreate = vi.hoisted(() => vi.fn());
vi.mock('./services/CommandService.js', () => ({
CommandService: {
create: mockCommandServiceCreate,
},
}));
describe('handleSlashCommand', () => {
let mockConfig: Config;
let mockSettings: LoadedSettings;
let abortController: AbortController;
beforeEach(() => {
mockCommandServiceCreate.mockResolvedValue({
getCommands: mockGetCommands,
});
mockConfig = {
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
isInteractive: vi.fn().mockReturnValue(false),
getSessionId: vi.fn().mockReturnValue('test-session'),
getFolderTrustFeature: vi.fn().mockReturnValue(false),
getFolderTrust: vi.fn().mockReturnValue(false),
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
storage: {},
} as unknown as Config;
mockSettings = {
system: { path: '', settings: {} },
systemDefaults: { path: '', settings: {} },
user: { path: '', settings: {} },
workspace: { path: '', settings: {} },
} as LoadedSettings;
abortController = new AbortController();
});
it('should return no_command for non-slash input', async () => {
const result = await handleSlashCommand(
'regular text',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return no_command for unknown slash commands', async () => {
mockGetCommands.mockReturnValue([]);
const result = await handleSlashCommand(
'/unknowncommand',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return unsupported for known built-in commands not in allowed list', async () => {
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
const result = await handleSlashCommand(
'/help',
abortController,
mockConfig,
mockSettings,
[], // Empty allowed list
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toContain('/help');
expect(result.reason).toContain('not supported');
}
});
it('should return unsupported for /help when using default allowed list', async () => {
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
const result = await handleSlashCommand(
'/help',
abortController,
mockConfig,
mockSettings,
// Default allowed list: ['init', 'summary', 'compress']
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toBe(
'The command "/help" is not supported in non-interactive mode.',
);
}
});
it('should execute allowed built-in commands', async () => {
const mockInitCommand = {
name: 'init',
description: 'Initialize project',
kind: CommandKind.BUILT_IN,
action: vi.fn().mockResolvedValue({
type: 'message',
messageType: 'info',
content: 'Project initialized',
}),
};
mockGetCommands.mockReturnValue([mockInitCommand]);
const result = await handleSlashCommand(
'/init',
abortController,
mockConfig,
mockSettings,
['init'], // init is in the allowed list
);
expect(result.type).toBe('message');
if (result.type === 'message') {
expect(result.content).toBe('Project initialized');
}
});
it('should execute file commands regardless of allowed list', async () => {
const mockFileCommand = {
name: 'custom',
description: 'Custom file command',
kind: CommandKind.FILE,
action: vi.fn().mockResolvedValue({
type: 'submit_prompt',
content: [{ text: 'Custom prompt' }],
}),
};
mockGetCommands.mockReturnValue([mockFileCommand]);
const result = await handleSlashCommand(
'/custom',
abortController,
mockConfig,
mockSettings,
[], // Empty allowed list, but FILE commands should still work
);
expect(result.type).toBe('submit_prompt');
if (result.type === 'submit_prompt') {
expect(result.content).toEqual([{ text: 'Custom prompt' }]);
}
});
it('should return unsupported for other built-in commands like /quit', async () => {
const mockQuitCommand = {
name: 'quit',
description: 'Quit application',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockQuitCommand]);
const result = await handleSlashCommand(
'/quit',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toContain('/quit');
expect(result.reason).toContain('not supported');
}
});
it('should handle command with no action', async () => {
const mockCommand = {
name: 'noaction',
description: 'Command without action',
kind: CommandKind.FILE,
// No action property
};
mockGetCommands.mockReturnValue([mockCommand]);
const result = await handleSlashCommand(
'/noaction',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return message when command returns void', async () => {
const mockCommand = {
name: 'voidcmd',
description: 'Command that returns void',
kind: CommandKind.FILE,
action: vi.fn().mockResolvedValue(undefined),
};
mockGetCommands.mockReturnValue([mockCommand]);
const result = await handleSlashCommand(
'/voidcmd',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('message');
if (result.type === 'message') {
expect(result.content).toBe('Command executed successfully.');
expect(result.messageType).toBe('info');
}
});
});

View File

@@ -7,7 +7,6 @@
import type { PartListUnion } from '@google/genai';
import { parseSlashCommand } from './utils/commands.js';
import {
FatalInputError,
Logger,
uiTelemetryService,
type Config,
@@ -19,10 +18,164 @@ import {
CommandKind,
type CommandContext,
type SlashCommand,
type SlashCommandActionReturn,
} from './ui/commands/types.js';
import { createNonInteractiveUI } from './ui/noninteractive/nonInteractiveUi.js';
import type { LoadedSettings } from './config/settings.js';
import type { SessionStatsState } from './ui/contexts/SessionContext.js';
import { t } from './i18n/index.js';
/**
* Built-in commands that are allowed in non-interactive modes (CLI and ACP).
* Only safe, read-only commands that don't require interactive UI.
*
* These commands are:
* - init: Initialize project configuration
* - summary: Generate session summary
* - compress: Compress conversation history
*/
export const ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE = [
'init',
'summary',
'compress',
] as const;
/**
* Result of handling a slash command in non-interactive mode.
*
* Supported types:
* - 'submit_prompt': Submits content to the model (supports all modes)
* - 'message': Returns a single message (supports non-interactive JSON/text only)
* - 'stream_messages': Streams multiple messages (supports ACP only)
* - 'unsupported': Command cannot be executed in this mode
* - 'no_command': No command was found or executed
*/
export type NonInteractiveSlashCommandResult =
| {
type: 'submit_prompt';
content: PartListUnion;
}
| {
type: 'message';
messageType: 'info' | 'error';
content: string;
}
| {
type: 'stream_messages';
messages: AsyncGenerator<
{ messageType: 'info' | 'error'; content: string },
void,
unknown
>;
}
| {
type: 'unsupported';
reason: string;
originalType: string;
}
| {
type: 'no_command';
};
/**
* Converts a SlashCommandActionReturn to a NonInteractiveSlashCommandResult.
*
* Only the following result types are supported in non-interactive mode:
* - submit_prompt: Submits content to the model (all modes)
* - message: Returns a single message (non-interactive JSON/text only)
* - stream_messages: Streams multiple messages (ACP only)
*
* All other result types are converted to 'unsupported'.
*
* @param result The result from executing a slash command action
* @returns A NonInteractiveSlashCommandResult describing the outcome
*/
function handleCommandResult(
result: SlashCommandActionReturn,
): NonInteractiveSlashCommandResult {
switch (result.type) {
case 'submit_prompt':
return {
type: 'submit_prompt',
content: result.content,
};
case 'message':
return {
type: 'message',
messageType: result.messageType,
content: result.content,
};
case 'stream_messages':
return {
type: 'stream_messages',
messages: result.messages,
};
/**
* Currently return types below are never generated due to the
* whitelist of allowed slash commands in ACP and non-interactive mode.
* We'll try to add more supported return types in the future.
*/
case 'tool':
return {
type: 'unsupported',
reason:
'Tool execution from slash commands is not supported in non-interactive mode.',
originalType: 'tool',
};
case 'quit':
return {
type: 'unsupported',
reason:
'Quit command is not supported in non-interactive mode. The process will exit naturally after completion.',
originalType: 'quit',
};
case 'dialog':
return {
type: 'unsupported',
reason: `Dialog '${result.dialog}' cannot be opened in non-interactive mode.`,
originalType: 'dialog',
};
case 'load_history':
return {
type: 'unsupported',
reason:
'Loading history is not supported in non-interactive mode. Each invocation starts with a fresh context.',
originalType: 'load_history',
};
case 'confirm_shell_commands':
return {
type: 'unsupported',
reason:
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.',
originalType: 'confirm_shell_commands',
};
case 'confirm_action':
return {
type: 'unsupported',
reason:
'Action confirmation is not supported in non-interactive mode. Commands requiring confirmation cannot be executed.',
originalType: 'confirm_action',
};
default: {
// Exhaustiveness check
const _exhaustive: never = result;
return {
type: 'unsupported',
reason: `Unknown command result type: ${(_exhaustive as SlashCommandActionReturn).type}`,
originalType: 'unknown',
};
}
}
}
/**
* Filters commands based on the allowed built-in command names.
@@ -62,122 +215,146 @@ function filterCommandsForNonInteractive(
* @param config The configuration object
* @param settings The loaded settings
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
* allowed. If not provided or empty, only file commands are available.
* @returns A Promise that resolves to `PartListUnion` if a valid command is
* found and results in a prompt, or `undefined` otherwise.
* @throws {FatalInputError} if the command result is not supported in
* non-interactive mode.
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
* Pass an empty array to only allow file commands.
* @returns A Promise that resolves to a `NonInteractiveSlashCommandResult` describing
* the outcome of the command execution.
*/
export const handleSlashCommand = async (
rawQuery: string,
abortController: AbortController,
config: Config,
settings: LoadedSettings,
allowedBuiltinCommandNames?: string[],
): Promise<PartListUnion | undefined> => {
allowedBuiltinCommandNames: string[] = [
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
],
): Promise<NonInteractiveSlashCommandResult> => {
const trimmed = rawQuery.trim();
if (!trimmed.startsWith('/')) {
return;
return { type: 'no_command' };
}
const isAcpMode = config.getExperimentalZedIntegration();
const isInteractive = config.isInteractive();
const executionMode = isAcpMode
? 'acp'
: isInteractive
? 'interactive'
: 'non_interactive';
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);
// Only load BuiltinCommandLoader if there are allowed built-in commands
const loaders =
allowedBuiltinSet.size > 0
? [new BuiltinCommandLoader(config), new FileCommandLoader(config)]
: [new FileCommandLoader(config)];
// Load all commands to check if the command exists but is not allowed
const allLoaders = [
new BuiltinCommandLoader(config),
new FileCommandLoader(config),
];
const commandService = await CommandService.create(
loaders,
allLoaders,
abortController.signal,
);
const commands = commandService.getCommands();
const allCommands = commandService.getCommands();
const filteredCommands = filterCommandsForNonInteractive(
commands,
allCommands,
allowedBuiltinSet,
);
// First, try to parse with filtered commands
const { commandToExecute, args } = parseSlashCommand(
rawQuery,
filteredCommands,
);
if (commandToExecute) {
if (commandToExecute.action) {
// Not used by custom commands but may be in the future.
const sessionStats: SessionStatsState = {
sessionId: config?.getSessionId(),
sessionStartTime: new Date(),
metrics: uiTelemetryService.getMetrics(),
lastPromptTokenCount: 0,
promptCount: 1,
if (!commandToExecute) {
// Check if this is a known command that's just not allowed
const { commandToExecute: knownCommand } = parseSlashCommand(
rawQuery,
allCommands,
);
if (knownCommand) {
// Command exists but is not allowed in non-interactive mode
return {
type: 'unsupported',
reason: t(
'The command "/{{command}}" is not supported in non-interactive mode.',
{ command: knownCommand.name },
),
originalType: 'filtered_command',
};
const logger = new Logger(config?.getSessionId() || '', config?.storage);
const context: CommandContext = {
services: {
config,
settings,
git: undefined,
logger,
},
ui: createNonInteractiveUI(),
session: {
stats: sessionStats,
sessionShellAllowlist: new Set(),
},
invocation: {
raw: trimmed,
name: commandToExecute.name,
args,
},
};
const result = await commandToExecute.action(context, args);
if (result) {
switch (result.type) {
case 'submit_prompt':
return result.content;
case 'confirm_shell_commands':
// This result indicates a command attempted to confirm shell commands.
// However note that currently, ShellTool is excluded in non-interactive
// mode unless 'YOLO mode' is active, so confirmation actually won't
// occur because of YOLO mode.
// This ensures that if a command *does* request confirmation (e.g.
// in the future with more granular permissions), it's handled appropriately.
throw new FatalInputError(
'Exiting due to a confirmation prompt requested by the command.',
);
default:
throw new FatalInputError(
'Exiting due to command result that is not supported in non-interactive mode.',
);
}
}
}
return { type: 'no_command' };
}
return;
if (!commandToExecute.action) {
return { type: 'no_command' };
}
// Not used by custom commands but may be in the future.
const sessionStats: SessionStatsState = {
sessionId: config?.getSessionId(),
sessionStartTime: new Date(),
metrics: uiTelemetryService.getMetrics(),
lastPromptTokenCount: 0,
promptCount: 1,
};
const logger = new Logger(config?.getSessionId() || '', config?.storage);
const context: CommandContext = {
executionMode,
services: {
config,
settings,
git: undefined,
logger,
},
ui: createNonInteractiveUI(),
session: {
stats: sessionStats,
sessionShellAllowlist: new Set(),
},
invocation: {
raw: trimmed,
name: commandToExecute.name,
args,
},
};
const result = await commandToExecute.action(context, args);
if (!result) {
// Command executed but returned no result (e.g., void return)
return {
type: 'message',
messageType: 'info',
content: 'Command executed successfully.',
};
}
// Handle different result types
return handleCommandResult(result);
};
/**
* Retrieves all available slash commands for the current configuration.
*
* @param config The configuration object
* @param settings The loaded settings
* @param abortSignal Signal to cancel the loading process
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
* allowed. If not provided or empty, only file commands are available.
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
* Pass an empty array to only include file commands.
* @returns A Promise that resolves to an array of SlashCommand objects
*/
export const getAvailableCommands = async (
config: Config,
settings: LoadedSettings,
abortSignal: AbortSignal,
allowedBuiltinCommandNames?: string[],
allowedBuiltinCommandNames: string[] = [
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
],
): Promise<SlashCommand[]> => {
try {
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);

View File

@@ -228,6 +228,7 @@ export const useAuthCommand = (
![
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].includes(defaultAuthType as AuthType)
@@ -240,6 +241,7 @@ export const useAuthCommand = (
validValues: [
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].join(', '),

View File

@@ -19,7 +19,9 @@ export const compressCommand: SlashCommand = {
kind: CommandKind.BUILT_IN,
action: async (context) => {
const { ui } = context;
if (ui.pendingItem) {
const executionMode = context.executionMode ?? 'interactive';
if (executionMode === 'interactive' && ui.pendingItem) {
ui.addItem(
{
type: MessageType.ERROR,
@@ -40,13 +42,80 @@ export const compressCommand: SlashCommand = {
},
};
try {
ui.setPendingItem(pendingMessage);
const config = context.services.config;
const geminiClient = config?.getGeminiClient();
if (!config || !geminiClient) {
return {
type: 'message',
messageType: 'error',
content: t('Config not loaded.'),
};
}
const doCompress = async () => {
const promptId = `compress-${Date.now()}`;
const compressed = await context.services.config
?.getGeminiClient()
?.tryCompressChat(promptId, true);
if (compressed) {
return await geminiClient.tryCompressChat(promptId, true);
};
if (executionMode === 'acp') {
const messages = async function* () {
try {
yield {
messageType: 'info' as const,
content: 'Compressing context...',
};
const compressed = await doCompress();
if (!compressed) {
yield {
messageType: 'error' as const,
content: t('Failed to compress chat history.'),
};
return;
}
yield {
messageType: 'info' as const,
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
};
} catch (e) {
yield {
messageType: 'error' as const,
content: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
};
}
};
return { type: 'stream_messages', messages: messages() };
}
try {
if (executionMode === 'interactive') {
ui.setPendingItem(pendingMessage);
}
const compressed = await doCompress();
if (!compressed) {
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history.'),
},
Date.now(),
);
return;
}
return {
type: 'message',
messageType: 'error',
content: t('Failed to compress chat history.'),
};
}
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.COMPRESSION,
@@ -59,27 +128,39 @@ export const compressCommand: SlashCommand = {
} as HistoryItemCompression,
Date.now(),
);
} else {
return;
}
return {
type: 'message',
messageType: 'info',
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
};
} catch (e) {
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history.'),
text: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
},
Date.now(),
);
return;
}
} catch (e) {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
},
Date.now(),
);
return {
type: 'message',
messageType: 'error',
content: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
};
} finally {
ui.setPendingItem(null);
if (executionMode === 'interactive') {
ui.setPendingItem(null);
}
}
},
};

View File

@@ -13,6 +13,16 @@ import { createMockCommandContext } from '../../test-utils/mockCommandContext.js
vi.mock('../../i18n/index.js', () => ({
setLanguageAsync: vi.fn().mockResolvedValue(undefined),
getCurrentLanguage: vi.fn().mockReturnValue('en'),
detectSystemLanguage: vi.fn().mockReturnValue('en'),
getLanguageNameFromLocale: vi.fn((locale: string) => {
const map: Record<string, string> = {
zh: 'Chinese',
en: 'English',
ru: 'Russian',
de: 'German',
};
return map[locale] || 'English';
}),
t: vi.fn((key: string) => key),
}));
@@ -61,7 +71,10 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
// Import modules after mocking
import * as i18n from '../../i18n/index.js';
import { languageCommand } from './languageCommand.js';
import {
languageCommand,
initializeLlmOutputLanguage,
} from './languageCommand.js';
describe('languageCommand', () => {
let mockContext: CommandContext;
@@ -186,6 +199,39 @@ describe('languageCommand', () => {
content: expect.stringContaining('Chinese'),
});
});
it('should parse Unicode LLM output language from marker', async () => {
vi.mocked(fs.existsSync).mockReturnValue(true);
vi.mocked(fs.readFileSync).mockReturnValue(
[
'# ⚠️ CRITICAL: 中文 Output Language Rule - HIGHEST PRIORITY ⚠️',
'<!-- qwen-code:llm-output-language: 中文 -->',
'',
'Some other content...',
].join('\n'),
);
vi.mocked(i18n.t).mockImplementation(
(key: string, params?: Record<string, string>) => {
if (params && key.includes('{{lang}}')) {
return key.replace('{{lang}}', params['lang'] || '');
}
return key;
},
);
if (!languageCommand.action) {
throw new Error('The language command must have an action.');
}
const result = await languageCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: expect.stringContaining('中文'),
});
});
});
describe('main command action - config not available', () => {
@@ -400,6 +446,34 @@ describe('languageCommand', () => {
});
});
it('should normalize locale code "ru" to "Russian"', async () => {
if (!languageCommand.action) {
throw new Error('The language command must have an action.');
}
await languageCommand.action(mockContext, 'output ru');
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('Russian'),
'utf-8',
);
});
it('should normalize locale code "de" to "German"', async () => {
if (!languageCommand.action) {
throw new Error('The language command must have an action.');
}
await languageCommand.action(mockContext, 'output de');
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('German'),
'utf-8',
);
});
it('should handle file write errors gracefully', async () => {
vi.mocked(fs.writeFileSync).mockImplementation(() => {
throw new Error('Permission denied');
@@ -481,6 +555,8 @@ describe('languageCommand', () => {
const nestedNames = uiSubcommand?.subCommands?.map((c) => c.name);
expect(nestedNames).toContain('zh-CN');
expect(nestedNames).toContain('en-US');
expect(nestedNames).toContain('ru-RU');
expect(nestedNames).toContain('de-DE');
});
it('should have action that sets language', async () => {
@@ -542,16 +618,9 @@ describe('languageCommand', () => {
const enUSSubcommand = uiSubcommand?.subCommands?.find(
(c) => c.name === 'en-US',
);
it('zh-CN should have aliases', () => {
expect(zhCNSubcommand?.altNames).toContain('zh');
expect(zhCNSubcommand?.altNames).toContain('chinese');
});
it('en-US should have aliases', () => {
expect(enUSSubcommand?.altNames).toContain('en');
expect(enUSSubcommand?.altNames).toContain('english');
});
const deDESubcommand = uiSubcommand?.subCommands?.find(
(c) => c.name === 'de-DE',
);
it('zh-CN action should set Chinese', async () => {
if (!zhCNSubcommand?.action) {
@@ -583,6 +652,21 @@ describe('languageCommand', () => {
});
});
it('de-DE action should set German', async () => {
if (!deDESubcommand?.action) {
throw new Error('de-DE subcommand must have an action.');
}
const result = await deDESubcommand.action(mockContext, '');
expect(i18n.setLanguageAsync).toHaveBeenCalledWith('de');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: expect.stringContaining('UI language changed'),
});
});
it('should reject extra arguments', async () => {
if (!zhCNSubcommand?.action) {
throw new Error('zh-CN subcommand must have an action.');
@@ -597,4 +681,74 @@ describe('languageCommand', () => {
});
});
});
describe('initializeLlmOutputLanguage', () => {
beforeEach(() => {
vi.clearAllMocks();
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(fs.mkdirSync).mockImplementation(() => undefined);
vi.mocked(fs.writeFileSync).mockImplementation(() => undefined);
});
it('should create file when it does not exist', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('en');
initializeLlmOutputLanguage();
expect(fs.mkdirSync).toHaveBeenCalled();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('English'),
'utf-8',
);
});
it('should NOT overwrite existing file', () => {
vi.mocked(fs.existsSync).mockReturnValue(true);
initializeLlmOutputLanguage();
expect(fs.writeFileSync).not.toHaveBeenCalled();
});
it('should detect Chinese locale and create Chinese rule file', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('zh');
initializeLlmOutputLanguage();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('Chinese'),
'utf-8',
);
});
it('should detect Russian locale and create Russian rule file', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('ru');
initializeLlmOutputLanguage();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('Russian'),
'utf-8',
);
});
it('should detect German locale and create German rule file', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('de');
initializeLlmOutputLanguage();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('German'),
'utf-8',
);
});
});
});

View File

@@ -1,6 +1,6 @@
/**
* @license
* Copyright 2025 Google LLC
* Copyright 2025 Qwen team
* SPDX-License-Identifier: Apache-2.0
*/
@@ -15,51 +15,72 @@ import { SettingScope } from '../../config/settings.js';
import {
setLanguageAsync,
getCurrentLanguage,
detectSystemLanguage,
getLanguageNameFromLocale,
type SupportedLanguage,
t,
} from '../../i18n/index.js';
import {
SUPPORTED_LANGUAGES,
type LanguageDefinition,
} from '../../i18n/languages.js';
import * as fs from 'node:fs';
import * as path from 'node:path';
import { Storage } from '@qwen-code/qwen-code-core';
const LLM_OUTPUT_LANGUAGE_RULE_FILENAME = 'output-language.md';
const LLM_OUTPUT_LANGUAGE_MARKER_PREFIX = 'qwen-code:llm-output-language:';
function parseUiLanguageArg(input: string): SupportedLanguage | null {
const lowered = input.trim().toLowerCase();
if (!lowered) return null;
for (const lang of SUPPORTED_LANGUAGES) {
if (
lowered === lang.code ||
lowered === lang.id.toLowerCase() ||
lowered === lang.fullName.toLowerCase()
) {
return lang.code;
}
}
return null;
}
function formatUiLanguageDisplay(lang: SupportedLanguage): string {
const option = SUPPORTED_LANGUAGES.find((o) => o.code === lang);
return option ? `${option.fullName}${option.id}` : lang;
}
function sanitizeLanguageForMarker(language: string): string {
// HTML comments cannot contain "--" or end markers like "-->" or "--!>" safely.
// Also avoid newlines to keep the marker single-line and robust to parsing.
return language
.replace(/[\r\n]/g, ' ')
.replace(/--!?>/g, '')
.replace(/--/g, '');
}
/**
* Generates the LLM output language rule template based on the language name.
*/
function generateLlmOutputLanguageRule(language: string): string {
return `# ⚠️ CRITICAL: ${language} Output Language Rule - HIGHEST PRIORITY ⚠️
const markerLanguage = sanitizeLanguageForMarker(language);
return `# Output language preference: ${language}
<!-- ${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX} ${markerLanguage} -->
## 🚨 MANDATORY RULE - NO EXCEPTIONS 🚨
## Goal
Prefer responding in **${language}** for normal assistant messages and explanations.
**YOU MUST RESPOND IN ${language.toUpperCase()} FOR EVERY SINGLE OUTPUT, REGARDLESS OF THE USER'S INPUT LANGUAGE.**
## Keep technical artifacts unchanged
Do **not** translate or rewrite:
- Code blocks, CLI commands, file paths, stack traces, logs, JSON keys, identifiers
- Exact quoted text from the user (keep quotes verbatim)
This is a **NON-NEGOTIABLE** requirement. Even if the user writes in English, says "hi", asks a simple question, or explicitly requests another language, **YOU MUST ALWAYS RESPOND IN ${language.toUpperCase()}.**
## When a conflict exists
If higher-priority instructions (system/developer) require a different behavior, follow them.
## What Must Be in ${language}
**EVERYTHING** you output: conversation replies, tool call descriptions, success/error messages, generated file content (comments, documentation), and all explanatory text.
**Tool outputs**: All descriptive text from \`read_file\`, \`write_file\`, \`codebase_search\`, \`run_terminal_cmd\`, \`todo_write\`, \`web_search\`, etc. MUST be in ${language}.
## Examples
### ✅ CORRECT:
- User says "hi" → Respond in ${language} (e.g., "Bonjour" if ${language} is French)
- Tool result → "已成功读取文件 config.json" (if ${language} is Chinese)
- Error → "无法找到指定的文件" (if ${language} is Chinese)
### ❌ WRONG:
- User says "hi" → "Hello" in English
- Tool result → "Successfully read file" in English
- Error → "File not found" in English
## Notes
- Code elements (variable/function names, syntax) can remain in English
- Comments, documentation, and all other text MUST be in ${language}
**THIS RULE IS ACTIVE NOW. ALL OUTPUTS MUST BE IN ${language.toUpperCase()}. NO EXCEPTIONS.**
## Tool / system outputs
Raw tool/system outputs may contain fixed-format English. Preserve them verbatim, and if needed, add a short **${language}** explanation below.
`;
}
@@ -73,6 +94,80 @@ function getLlmOutputLanguageRulePath(): string {
);
}
/**
* Normalizes a language input to its full English name.
* If the input is a known locale code (e.g., "ru", "zh"), converts it to the full name.
* Otherwise, returns the input as-is (e.g., "Japanese" stays "Japanese").
*/
function normalizeLanguageName(language: string): string {
const lowered = language.toLowerCase();
// Check if it's a known locale code and convert to full name
const fullName = getLanguageNameFromLocale(lowered);
// If getLanguageNameFromLocale returned a different value, use it
// Otherwise, use the original input (preserves case for unknown languages)
if (fullName !== 'English' || lowered === 'en') {
return fullName;
}
return language;
}
function extractLlmOutputLanguageFromRuleFileContent(
content: string,
): string | null {
// Preferred: machine-readable marker that supports Unicode and spaces.
// Example: <!-- qwen-code:llm-output-language: 中文 -->
const markerMatch = content.match(
new RegExp(
String.raw`<!--\s*${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX}\s*(.*?)\s*-->`,
'i',
),
);
if (markerMatch?.[1]) {
const lang = markerMatch[1].trim();
if (lang) return lang;
}
// Backward compatibility: parse the heading line.
// Example: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
// Example: "# ⚠️ CRITICAL: 日本語 Output Language Rule - HIGHEST PRIORITY ⚠️"
const headingMatch = content.match(
/^#.*?CRITICAL:\s*(.*?)\s+Output Language Rule\b/im,
);
if (headingMatch?.[1]) {
const lang = headingMatch[1].trim();
if (lang) return lang;
}
return null;
}
/**
* Initializes the LLM output language rule file on first startup.
* If the file already exists, it is not overwritten (respects user preference).
*/
export function initializeLlmOutputLanguage(): void {
const filePath = getLlmOutputLanguageRulePath();
// Skip if file already exists (user preference)
if (fs.existsSync(filePath)) {
return;
}
// Detect system language and map to language name
const detectedLocale = detectSystemLanguage();
const languageName = getLanguageNameFromLocale(detectedLocale);
// Generate the rule file
const content = generateLlmOutputLanguageRule(languageName);
// Ensure directory exists
const dir = path.dirname(filePath);
fs.mkdirSync(dir, { recursive: true });
// Write file
fs.writeFileSync(filePath, content, 'utf-8');
}
/**
* Gets the current LLM output language from the rule file if it exists.
*/
@@ -81,12 +176,7 @@ function getCurrentLlmOutputLanguage(): string | null {
if (fs.existsSync(filePath)) {
try {
const content = fs.readFileSync(filePath, 'utf-8');
// Extract language name from the first line
// Template format: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
const match = content.match(/^#.*?(\w+)\s+Output Language Rule/i);
if (match) {
return match[1];
}
return extractLlmOutputLanguageFromRuleFileContent(content);
} catch {
// Ignore errors
}
@@ -127,18 +217,11 @@ async function setUiLanguage(
// Reload commands to update their descriptions with the new language
context.ui.reloadCommands();
// Map language codes to friendly display names
const langDisplayNames: Partial<Record<SupportedLanguage, string>> = {
zh: '中文zh-CN',
en: 'Englishen-US',
ru: 'Русский (ru-RU)',
};
return {
type: 'message',
messageType: 'info',
content: t('UI language changed to {{lang}}', {
lang: langDisplayNames[lang] || lang,
lang: formatUiLanguageDisplay(lang),
}),
};
}
@@ -151,7 +234,9 @@ function generateLlmOutputLanguageRuleFile(
): Promise<MessageActionReturn> {
try {
const filePath = getLlmOutputLanguageRulePath();
const content = generateLlmOutputLanguageRule(language);
// Normalize locale codes (e.g., "ru" -> "Russian") to full language names
const normalizedLanguage = normalizeLanguageName(language);
const content = generateLlmOutputLanguageRule(normalizedLanguage);
// Ensure directory exists
const dir = path.dirname(filePath);
@@ -196,7 +281,6 @@ export const languageCommand: SlashCommand = {
args: string,
): Promise<SlashCommandActionReturn> => {
const { services } = context;
if (!services.config) {
return {
type: 'message',
@@ -207,18 +291,37 @@ export const languageCommand: SlashCommand = {
const trimmedArgs = args.trim();
// Handle subcommands if called directly via action (for tests/backward compatibility)
const parts = trimmedArgs.split(/\s+/);
const firstArg = parts[0].toLowerCase();
const subArgs = parts.slice(1).join(' ');
if (firstArg === 'ui' || firstArg === 'output') {
const subCommand = languageCommand.subCommands?.find(
(s) => s.name === firstArg,
);
if (subCommand?.action) {
return subCommand.action(
context,
subArgs,
) as Promise<SlashCommandActionReturn>;
}
}
// If no arguments, show current language settings and usage
if (!trimmedArgs) {
const currentUiLang = getCurrentLanguage();
const currentLlmLang = getCurrentLlmOutputLanguage();
const message = [
t('Current UI language: {{lang}}', { lang: currentUiLang }),
t('Current UI language: {{lang}}', {
lang: formatUiLanguageDisplay(currentUiLang as SupportedLanguage),
}),
currentLlmLang
? t('Current LLM output language: {{lang}}', { lang: currentLlmLang })
: t('LLM output language not set'),
'',
t('Available subcommands:'),
` /language ui [zh-CN|en-US|ru-RU] - ${t('Set UI language')}`,
` /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
` /language output <language> - ${t('Set LLM output language')}`,
].join('\n');
@@ -229,115 +332,21 @@ export const languageCommand: SlashCommand = {
};
}
// Parse subcommand
const parts = trimmedArgs.split(/\s+/);
const subcommand = parts[0].toLowerCase();
if (subcommand === 'ui') {
// Handle /language ui [zh-CN|en-US|ru-RU]
if (parts.length === 1) {
// Show UI language subcommand help
return {
type: 'message',
messageType: 'info',
content: [
t('Set UI language'),
'',
t('Usage: /language ui [zh-CN|en-US|ru-RU]'),
'',
t('Available options:'),
t(' - zh-CN: Simplified Chinese'),
t(' - en-US: English'),
t(' - ru-RU: Russian'),
'',
t(
'To request additional UI language packs, please open an issue on GitHub.',
),
].join('\n'),
};
}
const langArg = parts[1].toLowerCase();
let targetLang: SupportedLanguage | null = null;
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
targetLang = 'en';
} else if (
langArg === 'zh' ||
langArg === 'chinese' ||
langArg === '中文' ||
langArg === 'zh-cn'
) {
targetLang = 'zh';
} else if (
langArg === 'ru' ||
langArg === 'ru-RU' ||
langArg === 'russian' ||
langArg === 'русский'
) {
targetLang = 'ru';
} else {
return {
type: 'message',
messageType: 'error',
content: t('Invalid language. Available: en-US, zh-CN, ru-RU'),
};
}
return setUiLanguage(context, targetLang);
} else if (subcommand === 'output') {
// Handle /language output <language>
if (parts.length === 1) {
return {
type: 'message',
messageType: 'info',
content: [
t('Set LLM output language'),
'',
t('Usage: /language output <language>'),
` ${t('Example: /language output 中文')}`,
].join('\n'),
};
}
// Join all parts after "output" as the language name
const language = parts.slice(1).join(' ');
return generateLlmOutputLanguageRuleFile(language);
} else {
// Backward compatibility: treat as UI language
const langArg = trimmedArgs.toLowerCase();
let targetLang: SupportedLanguage | null = null;
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
targetLang = 'en';
} else if (
langArg === 'zh' ||
langArg === 'chinese' ||
langArg === '中文' ||
langArg === 'zh-cn'
) {
targetLang = 'zh';
} else if (
langArg === 'ru' ||
langArg === 'ru-RU' ||
langArg === 'russian' ||
langArg === 'русский'
) {
targetLang = 'ru';
} else {
return {
type: 'message',
messageType: 'error',
content: [
t('Invalid command. Available subcommands:'),
' - /language ui [zh-CN|en-US|ru-RU] - ' + t('Set UI language'),
' - /language output <language> - ' + t('Set LLM output language'),
].join('\n'),
};
}
// Handle backward compatibility for /language [lang]
const targetLang = parseUiLanguageArg(trimmedArgs);
if (targetLang) {
return setUiLanguage(context, targetLang);
}
return {
type: 'message',
messageType: 'error',
content: [
t('Invalid command. Available subcommands:'),
` - /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
' - /language output <language> - ' + t('Set LLM output language'),
].join('\n'),
};
},
subCommands: [
{
@@ -358,11 +367,14 @@ export const languageCommand: SlashCommand = {
content: [
t('Set UI language'),
'',
t('Usage: /language ui [zh-CN|en-US]'),
t('Usage: /language ui [{{options}}]', {
options: SUPPORTED_LANGUAGES.map((o) => o.id).join('|'),
}),
'',
t('Available options:'),
t(' - zh-CN: Simplified Chinese'),
t(' - en-US: English'),
...SUPPORTED_LANGUAGES.map(
(o) => ` - ${o.id}: ${t(o.fullName)}`,
),
'',
t(
'To request additional UI language packs, please open an issue on GitHub.',
@@ -371,99 +383,20 @@ export const languageCommand: SlashCommand = {
};
}
const langArg = trimmedArgs.toLowerCase();
let targetLang: SupportedLanguage | null = null;
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
targetLang = 'en';
} else if (
langArg === 'zh' ||
langArg === 'chinese' ||
langArg === '中文' ||
langArg === 'zh-cn'
) {
targetLang = 'zh';
} else {
const targetLang = parseUiLanguageArg(trimmedArgs);
if (!targetLang) {
return {
type: 'message',
messageType: 'error',
content: t('Invalid language. Available: en-US, zh-CN'),
content: t('Invalid language. Available: {{options}}', {
options: SUPPORTED_LANGUAGES.map((o) => o.id).join(','),
}),
};
}
return setUiLanguage(context, targetLang);
},
subCommands: [
{
name: 'zh-CN',
altNames: ['zh', 'chinese', '中文'],
get description() {
return t('Set UI language to Simplified Chinese (zh-CN)');
},
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<MessageActionReturn> => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, 'zh');
},
},
{
name: 'en-US',
altNames: ['en', 'english'],
get description() {
return t('Set UI language to English (en-US)');
},
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<MessageActionReturn> => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, 'en');
},
},
{
name: 'ru-RU',
altNames: ['ru', 'russian', 'русский'],
get description() {
return t('Set UI language to Russian (ru-RU)');
},
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<MessageActionReturn> => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, 'ru');
},
},
],
subCommands: SUPPORTED_LANGUAGES.map(createUiLanguageSubCommand),
},
{
name: 'output',
@@ -496,3 +429,28 @@ export const languageCommand: SlashCommand = {
},
],
};
/**
* Helper to create a UI language subcommand.
*/
function createUiLanguageSubCommand(option: LanguageDefinition): SlashCommand {
return {
name: option.id,
get description() {
return t('Set UI language to {{name}}', { name: option.fullName });
},
kind: CommandKind.BUILT_IN,
action: async (context, args) => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, option.code);
},
};
}

View File

@@ -26,6 +26,8 @@ export const summaryCommand: SlashCommand = {
action: async (context): Promise<SlashCommandActionReturn> => {
const { config } = context.services;
const { ui } = context;
const executionMode = context.executionMode ?? 'interactive';
if (!config) {
return {
type: 'message',
@@ -43,8 +45,8 @@ export const summaryCommand: SlashCommand = {
};
}
// Check if already generating summary
if (ui.pendingItem) {
// Check if already generating summary (interactive UI only)
if (executionMode === 'interactive' && ui.pendingItem) {
ui.addItem(
{
type: 'error' as const,
@@ -63,29 +65,22 @@ export const summaryCommand: SlashCommand = {
};
}
try {
// Get the current chat history
const getChatHistory = () => {
const chat = geminiClient.getChat();
const history = chat.getHistory();
return chat.getHistory();
};
const validateChatHistory = (
history: ReturnType<typeof getChatHistory>,
) => {
if (history.length <= 2) {
return {
type: 'message',
messageType: 'info',
content: t('No conversation found to summarize.'),
};
throw new Error(t('No conversation found to summarize.'));
}
};
// Show loading state
const pendingMessage: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: true,
stage: 'generating',
},
};
ui.setPendingItem(pendingMessage);
const generateSummaryMarkdown = async (
history: ReturnType<typeof getChatHistory>,
): Promise<string> => {
// Build the conversation context for summary generation
const conversationContext = history.map((message) => ({
role: message.role,
@@ -121,19 +116,21 @@ export const summaryCommand: SlashCommand = {
if (!markdownSummary) {
throw new Error(
'Failed to generate summary - no text content received from LLM response',
t(
'Failed to generate summary - no text content received from LLM response',
),
);
}
// Update loading message to show saving progress
ui.setPendingItem({
type: 'summary',
summary: {
isPending: true,
stage: 'saving',
},
});
return markdownSummary;
};
const saveSummaryToDisk = async (
markdownSummary: string,
): Promise<{
filePathForDisplay: string;
fullPath: string;
}> => {
// Ensure .qwen directory exists
const projectRoot = config.getProjectRoot();
const qwenDir = path.join(projectRoot, '.qwen');
@@ -155,45 +152,163 @@ export const summaryCommand: SlashCommand = {
await fsPromises.writeFile(summaryPath, summaryContent, 'utf8');
// Clear pending item and show success message
return {
filePathForDisplay: '.qwen/PROJECT_SUMMARY.md',
fullPath: summaryPath,
};
};
const emitInteractivePending = (stage: 'generating' | 'saving') => {
if (executionMode !== 'interactive') {
return;
}
const pendingMessage: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: true,
stage,
},
};
ui.setPendingItem(pendingMessage);
};
const completeInteractive = (filePathForDisplay: string) => {
if (executionMode !== 'interactive') {
return;
}
ui.setPendingItem(null);
const completedSummaryItem: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: false,
stage: 'completed',
filePath: '.qwen/PROJECT_SUMMARY.md',
filePath: filePathForDisplay,
},
};
ui.addItem(completedSummaryItem, Date.now());
};
return {
type: 'message',
messageType: 'info',
content: '', // Empty content since we show the message in UI component
};
} catch (error) {
// Clear pending item on error
const formatErrorMessage = (error: unknown): string =>
t('Failed to generate project context summary: {{error}}', {
error: error instanceof Error ? error.message : String(error),
});
const failInteractive = (error: unknown) => {
if (executionMode !== 'interactive') {
return;
}
ui.setPendingItem(null);
ui.addItem(
{
type: 'error' as const,
text: `${t(
'Failed to generate project context summary: {{error}}',
{
error: error instanceof Error ? error.message : String(error),
},
)}`,
text: `${formatErrorMessage(error)}`,
},
Date.now(),
);
};
const formatSuccessMessage = (filePathForDisplay: string): string =>
t('Saved project summary to {{filePathForDisplay}}.', {
filePathForDisplay,
});
const returnNoConversationMessage = (): SlashCommandActionReturn => {
const msg = t('No conversation found to summarize.');
if (executionMode === 'acp') {
const messages = async function* () {
yield {
messageType: 'info' as const,
content: msg,
};
};
return {
type: 'stream_messages',
messages: messages(),
};
}
return {
type: 'message',
messageType: 'info',
content: msg,
};
};
const executeSummaryGeneration = async (
history: ReturnType<typeof getChatHistory>,
): Promise<{
markdownSummary: string;
filePathForDisplay: string;
}> => {
emitInteractivePending('generating');
const markdownSummary = await generateSummaryMarkdown(history);
emitInteractivePending('saving');
const { filePathForDisplay } = await saveSummaryToDisk(markdownSummary);
completeInteractive(filePathForDisplay);
return { markdownSummary, filePathForDisplay };
};
// Validate chat history once at the beginning
const history = getChatHistory();
try {
validateChatHistory(history);
} catch (_error) {
return returnNoConversationMessage();
}
if (executionMode === 'acp') {
const messages = async function* () {
try {
yield {
messageType: 'info' as const,
content: t('Generating project summary...'),
};
const { filePathForDisplay } =
await executeSummaryGeneration(history);
yield {
messageType: 'info' as const,
content: formatSuccessMessage(filePathForDisplay),
};
} catch (error) {
failInteractive(error);
yield {
messageType: 'error' as const,
content: formatErrorMessage(error),
};
}
};
return {
type: 'stream_messages',
messages: messages(),
};
}
try {
const { filePathForDisplay } = await executeSummaryGeneration(history);
if (executionMode === 'non_interactive') {
return {
type: 'message',
messageType: 'info',
content: formatSuccessMessage(filePathForDisplay),
};
}
// Interactive mode: UI components already display progress and completion.
return {
type: 'message',
messageType: 'info',
content: '',
};
} catch (error) {
failInteractive(error);
return {
type: 'message',
messageType: 'error',
content: t('Failed to generate project context summary: {{error}}', {
error: error instanceof Error ? error.message : String(error),
}),
content: formatErrorMessage(error),
};
}
},

View File

@@ -22,6 +22,14 @@ import type {
// Grouped dependencies for clarity and easier mocking
export interface CommandContext {
/**
* Execution mode for the current invocation.
*
* - interactive: React/Ink UI mode
* - non_interactive: non-interactive CLI mode (text/json)
* - acp: ACP/Zed integration mode
*/
executionMode?: 'interactive' | 'non_interactive' | 'acp';
// Invocation properties for when commands are called.
invocation?: {
/** The raw, untrimmed input string from the user. */
@@ -108,6 +116,19 @@ export interface MessageActionReturn {
content: string;
}
/**
* The return type for a command action that streams multiple messages.
* Used for long-running operations that need to send progress updates.
*/
export interface StreamMessagesActionReturn {
type: 'stream_messages';
messages: AsyncGenerator<
{ messageType: 'info' | 'error'; content: string },
void,
unknown
>;
}
/**
* The return type for a command action that needs to open a dialog.
*/
@@ -174,6 +195,7 @@ export interface ConfirmActionReturn {
export type SlashCommandActionReturn =
| ToolActionReturn
| MessageActionReturn
| StreamMessagesActionReturn
| QuitActionReturn
| OpenDialogActionReturn
| LoadHistoryActionReturn

View File

@@ -520,6 +520,13 @@ export const useSlashCommandProcessor = (
true,
);
}
case 'stream_messages': {
// stream_messages is only used in ACP/Zed integration mode
// and should not be returned in interactive UI mode
throw new Error(
'stream_messages result type is not supported in interactive mode',
);
}
default: {
const unhandled: never = result;
throw new Error(

View File

@@ -526,10 +526,15 @@ export const useGeminiStream = (
return currentThoughtBuffer;
}
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
const pendingType = pendingHistoryItemRef.current?.type;
const isPendingThought =
pendingType === 'gemini_thought' ||
pendingType === 'gemini_thought_content';
// If we're not already showing a thought, start a new one
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
if (!isPendingThought) {
// If there's a pending non-thought item, finalize it first
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
@@ -537,11 +542,37 @@ export const useGeminiStream = (
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
}
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: 'gemini_thought',
text: newThoughtBuffer,
});
// Split large thought messages for better rendering performance (same rationale
// as regular content streaming). This helps avoid terminal flicker caused by
// constantly re-rendering an ever-growing "pending" block.
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
isPendingThought && pendingType === 'gemini_thought_content'
? 'gemini_thought_content'
: 'gemini_thought';
if (splitPoint === newThoughtBuffer.length) {
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: nextPendingType,
text: newThoughtBuffer,
});
} else {
const beforeText = newThoughtBuffer.substring(0, splitPoint);
const afterText = newThoughtBuffer.substring(splitPoint);
addItem(
{
type: nextPendingType,
text: beforeText,
},
userMessageTimestamp,
);
setPendingHistoryItem({
type: 'gemini_thought_content',
text: afterText,
});
newThoughtBuffer = afterText;
}
// Also update the thought state for the loading indicator
mergeThought(eventValue);

View File

@@ -8,19 +8,22 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useLoadingIndicator } from './useLoadingIndicator.js';
import { StreamingState } from '../types.js';
import {
WITTY_LOADING_PHRASES,
PHRASE_CHANGE_INTERVAL_MS,
} from './usePhraseCycler.js';
import { PHRASE_CHANGE_INTERVAL_MS } from './usePhraseCycler.js';
import * as i18n from '../../i18n/index.js';
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
describe('useLoadingIndicator', () => {
beforeEach(() => {
vi.useFakeTimers();
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
vi.spyOn(i18n, 't').mockImplementation((key) => key);
});
afterEach(() => {
vi.useRealTimers(); // Restore real timers after each test
act(() => vi.runOnlyPendingTimers);
vi.restoreAllMocks();
});
it('should initialize with default values when Idle', () => {
@@ -28,9 +31,7 @@ describe('useLoadingIndicator', () => {
useLoadingIndicator(StreamingState.Idle),
);
expect(result.current.elapsedTime).toBe(0);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
});
it('should reflect values when Responding', async () => {
@@ -40,18 +41,14 @@ describe('useLoadingIndicator', () => {
// Initial state before timers advance
expect(result.current.elapsedTime).toBe(0);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
await act(async () => {
await vi.advanceTimersByTimeAsync(PHRASE_CHANGE_INTERVAL_MS + 1);
});
// Phrase should cycle if PHRASE_CHANGE_INTERVAL_MS has passed
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
});
it('should show waiting phrase and retain elapsedTime when WaitingForConfirmation', async () => {
@@ -104,9 +101,7 @@ describe('useLoadingIndicator', () => {
rerender({ streamingState: StreamingState.Responding });
});
expect(result.current.elapsedTime).toBe(0); // Should reset
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
await act(async () => {
await vi.advanceTimersByTimeAsync(1000);
@@ -130,9 +125,7 @@ describe('useLoadingIndicator', () => {
});
expect(result.current.elapsedTime).toBe(0);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
// Timer should not advance
await act(async () => {

View File

@@ -8,13 +8,17 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import {
usePhraseCycler,
WITTY_LOADING_PHRASES,
PHRASE_CHANGE_INTERVAL_MS,
} from './usePhraseCycler.js';
import * as i18n from '../../i18n/index.js';
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
describe('usePhraseCycler', () => {
beforeEach(() => {
vi.useFakeTimers();
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
vi.spyOn(i18n, 't').mockImplementation((key) => key);
});
afterEach(() => {
@@ -23,7 +27,7 @@ describe('usePhraseCycler', () => {
it('should initialize with a witty phrase when not active and not waiting', () => {
const { result } = renderHook(() => usePhraseCycler(false, false));
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
});
it('should show "Waiting for user confirmation..." when isWaiting is true', () => {
@@ -47,35 +51,30 @@ describe('usePhraseCycler', () => {
it('should cycle through witty phrases when isActive is true and not waiting', () => {
const { result } = renderHook(() => usePhraseCycler(true, false));
// Initial phrase should be one of the witty phrases
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
const _initialPhrase = result.current;
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
// Phrase should change and be one of the witty phrases
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
const _secondPhrase = result.current;
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
});
it('should reset to a witty phrase when isActive becomes true after being false (and not waiting)', () => {
// Ensure there are at least two phrases for this test to be meaningful.
if (WITTY_LOADING_PHRASES.length < 2) {
return;
}
// Mock Math.random to make the test deterministic.
let callCount = 0;
vi.spyOn(Math, 'random').mockImplementation(() => {
// Cycle through 0, 1, 0, 1, ...
const val = callCount % 2;
callCount++;
return val / WITTY_LOADING_PHRASES.length;
return val / MOCK_WITTY_PHRASES.length;
});
const { result, rerender } = renderHook(
@@ -86,9 +85,9 @@ describe('usePhraseCycler', () => {
// Activate
rerender({ isActive: true, isWaiting: false });
const firstActivePhrase = result.current;
expect(WITTY_LOADING_PHRASES).toContain(firstActivePhrase);
expect(MOCK_WITTY_PHRASES).toContain(firstActivePhrase);
// With our mock, this should be the first phrase.
expect(firstActivePhrase).toBe(WITTY_LOADING_PHRASES[0]);
expect(firstActivePhrase).toBe(MOCK_WITTY_PHRASES[0]);
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
@@ -96,18 +95,18 @@ describe('usePhraseCycler', () => {
// Phrase should change to the second phrase.
expect(result.current).not.toBe(firstActivePhrase);
expect(result.current).toBe(WITTY_LOADING_PHRASES[1]);
expect(result.current).toBe(MOCK_WITTY_PHRASES[1]);
// Set to inactive - should reset to the default initial phrase
rerender({ isActive: false, isWaiting: false });
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
// Set back to active - should pick a random witty phrase (which our mock controls)
act(() => {
rerender({ isActive: true, isWaiting: false });
});
// The random mock will now return 0, so it should be the first phrase again.
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
expect(result.current).toBe(MOCK_WITTY_PHRASES[0]);
});
it('should clear phrase interval on unmount when active', () => {
@@ -148,7 +147,7 @@ describe('usePhraseCycler', () => {
rerender({ isActive: true, isWaiting: false, customPhrases: undefined });
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
});
it('should fall back to witty phrases if custom phrases are an empty array', () => {
@@ -164,7 +163,7 @@ describe('usePhraseCycler', () => {
},
);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
});
it('should reset to a witty phrase when transitioning from waiting to active', () => {
@@ -174,16 +173,13 @@ describe('usePhraseCycler', () => {
);
const _initialPhrase = result.current;
expect(WITTY_LOADING_PHRASES).toContain(_initialPhrase);
expect(MOCK_WITTY_PHRASES).toContain(_initialPhrase);
// Cycle to a different phrase (potentially)
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
if (WITTY_LOADING_PHRASES.length > 1) {
// This check is probabilistic with random selection
}
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
// Go to waiting state
rerender({ isActive: false, isWaiting: true });
@@ -191,6 +187,6 @@ describe('usePhraseCycler', () => {
// Go back to active cycling - should pick a random witty phrase
rerender({ isActive: true, isWaiting: false });
expect(WITTY_LOADING_PHRASES).toContain(result.current);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
});
});

View File

@@ -5,139 +5,9 @@
*/
import { useState, useEffect, useRef, useMemo } from 'react';
import { t } from '../../i18n/index.js';
import { t, ta } from '../../i18n/index.js';
export const WITTY_LOADING_PHRASES = [
"I'm Feeling Lucky",
'Shipping awesomeness... ',
'Painting the serifs back on...',
'Navigating the slime mold...',
'Consulting the digital spirits...',
'Reticulating splines...',
'Warming up the AI hamsters...',
'Asking the magic conch shell...',
'Generating witty retort...',
'Polishing the algorithms...',
"Don't rush perfection (or my code)...",
'Brewing fresh bytes...',
'Counting electrons...',
'Engaging cognitive processors...',
'Checking for syntax errors in the universe...',
'One moment, optimizing humor...',
'Shuffling punchlines...',
'Untangling neural nets...',
'Compiling brilliance...',
'Loading wit.exe...',
'Summoning the cloud of wisdom...',
'Preparing a witty response...',
"Just a sec, I'm debugging reality...",
'Confuzzling the options...',
'Tuning the cosmic frequencies...',
'Crafting a response worthy of your patience...',
'Compiling the 1s and 0s...',
'Resolving dependencies... and existential crises...',
'Defragmenting memories... both RAM and personal...',
'Rebooting the humor module...',
'Caching the essentials (mostly cat memes)...',
'Optimizing for ludicrous speed',
"Swapping bits... don't tell the bytes...",
'Garbage collecting... be right back...',
'Assembling the interwebs...',
'Converting coffee into code...',
'Updating the syntax for reality...',
'Rewiring the synapses...',
'Looking for a misplaced semicolon...',
"Greasin' the cogs of the machine...",
'Pre-heating the servers...',
'Calibrating the flux capacitor...',
'Engaging the improbability drive...',
'Channeling the Force...',
'Aligning the stars for optimal response...',
'So say we all...',
'Loading the next great idea...',
"Just a moment, I'm in the zone...",
'Preparing to dazzle you with brilliance...',
"Just a tick, I'm polishing my wit...",
"Hold tight, I'm crafting a masterpiece...",
"Just a jiffy, I'm debugging the universe...",
"Just a moment, I'm aligning the pixels...",
"Just a sec, I'm optimizing the humor...",
"Just a moment, I'm tuning the algorithms...",
'Warp speed engaged...',
'Mining for more Dilithium crystals...',
"Don't panic...",
'Following the white rabbit...',
'The truth is in here... somewhere...',
'Blowing on the cartridge...',
'Loading... Do a barrel roll!',
'Waiting for the respawn...',
'Finishing the Kessel Run in less than 12 parsecs...',
"The cake is not a lie, it's just still loading...",
'Fiddling with the character creation screen...',
"Just a moment, I'm finding the right meme...",
"Pressing 'A' to continue...",
'Herding digital cats...',
'Polishing the pixels...',
'Finding a suitable loading screen pun...',
'Distracting you with this witty phrase...',
'Almost there... probably...',
'Our hamsters are working as fast as they can...',
'Giving Cloudy a pat on the head...',
'Petting the cat...',
'Rickrolling my boss...',
'Never gonna give you up, never gonna let you down...',
'Slapping the bass...',
'Tasting the snozberries...',
"I'm going the distance, I'm going for speed...",
'Is this the real life? Is this just fantasy?...',
"I've got a good feeling about this...",
'Poking the bear...',
'Doing research on the latest memes...',
'Figuring out how to make this more witty...',
'Hmmm... let me think...',
'What do you call a fish with no eyes? A fsh...',
'Why did the computer go to therapy? It had too many bytes...',
"Why don't programmers like nature? It has too many bugs...",
'Why do programmers prefer dark mode? Because light attracts bugs...',
'Why did the developer go broke? Because they used up all their cache...',
"What can you do with a broken pencil? Nothing, it's pointless...",
'Applying percussive maintenance...',
'Searching for the correct USB orientation...',
'Ensuring the magic smoke stays inside the wires...',
'Rewriting in Rust for no particular reason...',
'Trying to exit Vim...',
'Spinning up the hamster wheel...',
"That's not a bug, it's an undocumented feature...",
'Engage.',
"I'll be back... with an answer.",
'My other process is a TARDIS...',
'Communing with the machine spirit...',
'Letting the thoughts marinate...',
'Just remembered where I put my keys...',
'Pondering the orb...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
'Initiating thoughtful gaze...',
"What's a computer's favorite snack? Microchips.",
"Why do Java developers wear glasses? Because they don't C#.",
'Charging the laser... pew pew!',
'Dividing by zero... just kidding!',
'Looking for an adult superviso... I mean, processing.',
'Making it go beep boop.',
'Buffering... because even AIs need a moment.',
'Entangling quantum particles for a faster response...',
'Polishing the chrome... on the algorithms.',
'Are you not entertained? (Working on it!)',
'Summoning the code gremlins... to help, of course.',
'Just waiting for the dial-up tone to finish...',
'Recalibrating the humor-o-meter.',
'My other loading screen is even funnier.',
"Pretty sure there's a cat walking on the keyboard somewhere...",
'Enhancing... Enhancing... Still loading.',
"It's not a bug, it's a feature... of this loading screen.",
'Have you tried turning it off and on again? (The loading screen, not me.)',
'Constructing additional pylons...',
'New line? Thats Ctrl+J.',
];
export const WITTY_LOADING_PHRASES: string[] = ["I'm Feeling Lucky"];
export const PHRASE_CHANGE_INTERVAL_MS = 15000;
@@ -152,14 +22,16 @@ export const usePhraseCycler = (
isWaiting: boolean,
customPhrases?: string[],
) => {
// Translate all phrases at once if using default phrases
const loadingPhrases = useMemo(
() =>
customPhrases && customPhrases.length > 0
? customPhrases
: WITTY_LOADING_PHRASES.map((phrase) => t(phrase)),
[customPhrases],
);
// Get phrases from translations if available
const loadingPhrases = useMemo(() => {
if (customPhrases && customPhrases.length > 0) {
return customPhrases;
}
const translatedPhrases = ta('WITTY_LOADING_PHRASES');
return translatedPhrases.length > 0
? translatedPhrases
: WITTY_LOADING_PHRASES;
}, [customPhrases]);
const [currentLoadingPhrase, setCurrentLoadingPhrase] = useState(
loadingPhrases[0],

View File

@@ -60,6 +60,11 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
return id ? { id, label: id } : null;
}
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
const id = process.env['ANTHROPIC_MODEL']?.trim();
return id ? { id, label: id } : null;
}
export function getAvailableModelsForAuthType(
authType: AuthType,
): AvailableModel[] {
@@ -70,6 +75,10 @@ export function getAvailableModelsForAuthType(
const openAIModel = getOpenAIAvailableModelFromEnv();
return openAIModel ? [openAIModel] : [];
}
case AuthType.USE_ANTHROPIC: {
const anthropicModel = getAnthropicAvailableModelFromEnv();
return anthropicModel ? [anthropicModel] : [];
}
default:
// For other auth types, return empty array for now
// This can be expanded later according to the design doc

View File

@@ -20,6 +20,11 @@ const makeConfig = (tools: Record<string, AnyDeclarativeTool>) =>
getToolRegistry: () => ({
getTool: (name: string) => tools[name],
}),
getContentGenerator: () => ({
// Default to showing full thinking content during resume unless explicitly
// summarized; tests don't care about summarized thinking behavior.
useSummarizedThinking: () => false,
}),
}) as unknown as Config;
describe('resumeHistoryUtils', () => {

View File

@@ -204,7 +204,11 @@ function convertToHistoryItems(
const parts = record.message?.parts as Part[] | undefined;
// Extract thought content
const thoughtText = extractThoughtTextFromParts(parts);
const thoughtText = !config
.getContentGenerator()
.useSummarizedThinking()
? extractThoughtTextFromParts(parts)
: '';
// Extract text content (non-function-call, non-thought)
const text = extractTextFromParts(parts);

View File

@@ -35,22 +35,33 @@ import {
} from './nonInteractiveHelpers.js';
// Mock dependencies
vi.mock('../services/CommandService.js', () => ({
CommandService: {
create: vi.fn().mockResolvedValue({
getCommands: vi
.fn()
.mockReturnValue([
{ name: 'help' },
{ name: 'commit' },
{ name: 'memory' },
]),
}),
},
}));
vi.mock('../nonInteractiveCliCommands.js', () => ({
getAvailableCommands: vi
.fn()
.mockImplementation(
async (
_config: unknown,
_signal: AbortSignal,
allowedBuiltinCommandNames?: string[],
) => {
const allowedSet = new Set(allowedBuiltinCommandNames ?? []);
const allCommands = [
{ name: 'help', kind: 'built-in' },
{ name: 'commit', kind: 'file' },
{ name: 'memory', kind: 'built-in' },
{ name: 'init', kind: 'built-in' },
{ name: 'summary', kind: 'built-in' },
{ name: 'compress', kind: 'built-in' },
];
vi.mock('../services/BuiltinCommandLoader.js', () => ({
BuiltinCommandLoader: vi.fn().mockImplementation(() => ({})),
// Filter commands: always include file commands, only include allowed built-in commands
return allCommands.filter(
(cmd) =>
cmd.kind === 'file' ||
(cmd.kind === 'built-in' && allowedSet.has(cmd.name)),
);
},
),
}));
vi.mock('../ui/utils/computeStats.js', () => ({
@@ -511,10 +522,12 @@ describe('buildSystemMessage', () => {
});
it('should build system message with all fields', async () => {
const allowedBuiltinCommands = ['init', 'summary', 'compress'];
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
allowedBuiltinCommands,
);
expect(result).toEqual({
@@ -530,7 +543,7 @@ describe('buildSystemMessage', () => {
],
model: 'test-model',
permission_mode: 'auto',
slash_commands: ['commit', 'help', 'memory'],
slash_commands: ['commit', 'compress', 'init', 'summary'],
qwen_code_version: '1.0.0',
agents: [],
});
@@ -546,6 +559,7 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.tools).toEqual([]);
@@ -561,6 +575,7 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.mcp_servers).toEqual([]);
@@ -576,10 +591,37 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.qwen_code_version).toBe('unknown');
});
it('should only include allowed built-in commands and all file commands', async () => {
const allowedBuiltinCommands = ['init', 'summary'];
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
allowedBuiltinCommands,
);
// Should include: 'commit' (FILE), 'init' (BUILT_IN, allowed), 'summary' (BUILT_IN, allowed)
// Should NOT include: 'help', 'memory', 'compress' (BUILT_IN but not in allowed set)
expect(result.slash_commands).toEqual(['commit', 'init', 'summary']);
});
it('should include only file commands when no built-in commands are allowed', async () => {
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
[], // Empty array - no built-in commands allowed
);
// Should only include 'commit' (FILE command)
expect(result.slash_commands).toEqual(['commit']);
});
});
describe('createTaskToolProgressHandler', () => {

View File

@@ -25,10 +25,9 @@ import type {
PermissionMode,
CLISystemMessage,
} from '../nonInteractive/types.js';
import { CommandService } from '../services/CommandService.js';
import { BuiltinCommandLoader } from '../services/BuiltinCommandLoader.js';
import type { JsonOutputAdapterInterface } from '../nonInteractive/io/BaseJsonOutputAdapter.js';
import { computeSessionStats } from '../ui/utils/computeStats.js';
import { getAvailableCommands } from '../nonInteractiveCliCommands.js';
/**
* Normalizes various part list formats into a consistent Part[] array.
@@ -187,24 +186,27 @@ export function computeUsageFromMetrics(metrics: SessionMetrics): Usage {
}
/**
* Load slash command names using CommandService
* Load slash command names using getAvailableCommands
*
* @param config - Config instance
* @param allowedBuiltinCommandNames - Optional array of allowed built-in command names.
* If not provided, uses the default from getAvailableCommands.
* @returns Promise resolving to array of slash command names
*/
async function loadSlashCommandNames(config: Config): Promise<string[]> {
async function loadSlashCommandNames(
config: Config,
allowedBuiltinCommandNames?: string[],
): Promise<string[]> {
const controller = new AbortController();
try {
const service = await CommandService.create(
[new BuiltinCommandLoader(config)],
const commands = await getAvailableCommands(
config,
controller.signal,
allowedBuiltinCommandNames,
);
const names = new Set<string>();
const commands = service.getCommands();
for (const command of commands) {
names.add(command.name);
}
return Array.from(names).sort();
// Extract command names and sort
return commands.map((cmd) => cmd.name).sort();
} catch (error) {
if (config.getDebugMode()) {
console.error(
@@ -233,12 +235,15 @@ async function loadSlashCommandNames(config: Config): Promise<string[]> {
* @param config - Config instance
* @param sessionId - Session identifier
* @param permissionMode - Current permission/approval mode
* @param allowedBuiltinCommandNames - Optional array of allowed built-in command names.
* If not provided, defaults to empty array (only file commands will be included).
* @returns Promise resolving to CLISystemMessage
*/
export async function buildSystemMessage(
config: Config,
sessionId: string,
permissionMode: PermissionMode,
allowedBuiltinCommandNames?: string[],
): Promise<CLISystemMessage> {
const toolRegistry = config.getToolRegistry();
const tools = toolRegistry ? toolRegistry.getAllToolNames() : [];
@@ -251,8 +256,11 @@ export async function buildSystemMessage(
}))
: [];
// Load slash commands
const slashCommands = await loadSlashCommandNames(config);
// Load slash commands with filtering based on allowed built-in commands
const slashCommands = await loadSlashCommandNames(
config,
allowedBuiltinCommandNames,
);
// Load subagent names from config
let agentNames: string[] = [];

View File

@@ -153,7 +153,8 @@ export async function getExtendedSystemInfo(
// Get base URL if using OpenAI auth
const baseUrl =
baseInfo.selectedAuthType === AuthType.USE_OPENAI
baseInfo.selectedAuthType === AuthType.USE_OPENAI ||
baseInfo.selectedAuthType === AuthType.USE_ANTHROPIC
? context.services.config?.getContentGeneratorConfig()?.baseUrl
: undefined;

View File

@@ -19,6 +19,9 @@ describe('validateNonInterActiveAuth', () => {
let originalEnvVertexAi: string | undefined;
let originalEnvGcp: string | undefined;
let originalEnvOpenAiApiKey: string | undefined;
let originalEnvQwenOauth: string | undefined;
let originalEnvGoogleApiKey: string | undefined;
let originalEnvAnthropicApiKey: string | undefined;
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
let processExitSpy: ReturnType<typeof vi.spyOn<[code?: number], never>>;
let refreshAuthMock: ReturnType<typeof vi.fn>;
@@ -29,10 +32,16 @@ describe('validateNonInterActiveAuth', () => {
originalEnvVertexAi = process.env['GOOGLE_GENAI_USE_VERTEXAI'];
originalEnvGcp = process.env['GOOGLE_GENAI_USE_GCA'];
originalEnvOpenAiApiKey = process.env['OPENAI_API_KEY'];
originalEnvQwenOauth = process.env['QWEN_OAUTH'];
originalEnvGoogleApiKey = process.env['GOOGLE_API_KEY'];
originalEnvAnthropicApiKey = process.env['ANTHROPIC_API_KEY'];
delete process.env['GEMINI_API_KEY'];
delete process.env['GOOGLE_GENAI_USE_VERTEXAI'];
delete process.env['GOOGLE_GENAI_USE_GCA'];
delete process.env['OPENAI_API_KEY'];
delete process.env['QWEN_OAUTH'];
delete process.env['GOOGLE_API_KEY'];
delete process.env['ANTHROPIC_API_KEY'];
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit(${code}) called`);
@@ -80,6 +89,21 @@ describe('validateNonInterActiveAuth', () => {
} else {
delete process.env['OPENAI_API_KEY'];
}
if (originalEnvQwenOauth !== undefined) {
process.env['QWEN_OAUTH'] = originalEnvQwenOauth;
} else {
delete process.env['QWEN_OAUTH'];
}
if (originalEnvGoogleApiKey !== undefined) {
process.env['GOOGLE_API_KEY'] = originalEnvGoogleApiKey;
} else {
delete process.env['GOOGLE_API_KEY'];
}
if (originalEnvAnthropicApiKey !== undefined) {
process.env['ANTHROPIC_API_KEY'] = originalEnvAnthropicApiKey;
} else {
delete process.env['ANTHROPIC_API_KEY'];
}
vi.restoreAllMocks();
});

View File

@@ -27,6 +27,9 @@ function getAuthTypeFromEnv(): AuthType | undefined {
if (process.env['GOOGLE_API_KEY']) {
return AuthType.USE_VERTEX_AI;
}
if (process.env['ANTHROPIC_API_KEY']) {
return AuthType.USE_ANTHROPIC;
}
return undefined;
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"description": "Qwen Code Core",
"repository": {
"type": "git",
@@ -23,6 +23,7 @@
"scripts/postinstall.js"
],
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
"@google/genai": "1.30.0",
"@modelcontextprotocol/sdk": "^1.25.1",
"@opentelemetry/api": "^1.9.0",

View File

@@ -16,7 +16,6 @@ import {
QwenLogger,
} from '../telemetry/index.js';
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import {
AuthType,
createContentGeneratorConfig,
@@ -273,7 +272,7 @@ describe('Server Config (config.ts)', () => {
authType,
{
model: MODEL,
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
baseUrl: undefined,
},
);
// Verify that contentGeneratorConfig is updated

View File

@@ -96,7 +96,6 @@ import {
} from './constants.js';
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
import { Storage } from './storage.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import { ChatRecordingService } from '../services/chatRecordingService.js';
import {
SessionService,
@@ -574,7 +573,7 @@ export class Config {
this._generationConfig = {
model: params.model,
...(params.generationConfig || {}),
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
baseUrl: params.generationConfig?.baseUrl,
};
this.contentGeneratorConfig = this
._generationConfig as ContentGeneratorConfig;

View File

@@ -0,0 +1,500 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import type {
CountTokensParameters,
GenerateContentParameters,
} from '@google/genai';
import { FinishReason, GenerateContentResponse } from '@google/genai';
// Mock the request tokenizer module BEFORE importing the class that uses it.
const mockTokenizer = {
calculateTokens: vi.fn(),
dispose: vi.fn(),
};
vi.mock('../../utils/request-tokenizer/index.js', () => ({
getDefaultTokenizer: vi.fn(() => mockTokenizer),
DefaultRequestTokenizer: vi.fn(() => mockTokenizer),
disposeDefaultTokenizer: vi.fn(),
}));
type AnthropicCreateArgs = [unknown, { signal?: AbortSignal }?];
const anthropicMockState: {
constructorOptions?: Record<string, unknown>;
lastCreateArgs?: AnthropicCreateArgs;
createImpl: ReturnType<typeof vi.fn>;
} = {
constructorOptions: undefined,
lastCreateArgs: undefined,
createImpl: vi.fn(),
};
vi.mock('@anthropic-ai/sdk', () => {
class AnthropicMock {
messages: { create: (...args: AnthropicCreateArgs) => unknown };
constructor(options: Record<string, unknown>) {
anthropicMockState.constructorOptions = options;
this.messages = {
create: (...args: AnthropicCreateArgs) => {
anthropicMockState.lastCreateArgs = args;
return anthropicMockState.createImpl(...args);
},
};
}
}
return {
default: AnthropicMock,
__anthropicState: anthropicMockState,
};
});
// Now import the modules that depend on the mocked modules.
import type { Config } from '../../config/config.js';
const importGenerator = async (): Promise<{
AnthropicContentGenerator: typeof import('./anthropicContentGenerator.js').AnthropicContentGenerator;
}> => import('./anthropicContentGenerator.js');
const importConverter = async (): Promise<{
AnthropicContentConverter: typeof import('./converter.js').AnthropicContentConverter;
}> => import('./converter.js');
describe('AnthropicContentGenerator', () => {
let mockConfig: Config;
let anthropicState: {
constructorOptions?: Record<string, unknown>;
lastCreateArgs?: AnthropicCreateArgs;
createImpl: ReturnType<typeof vi.fn>;
};
beforeEach(async () => {
vi.clearAllMocks();
vi.resetModules();
mockTokenizer.calculateTokens.mockResolvedValue({
totalTokens: 50,
breakdown: {
textTokens: 50,
imageTokens: 0,
audioTokens: 0,
otherTokens: 0,
},
processingTime: 1,
});
anthropicState = anthropicMockState;
anthropicState.createImpl.mockReset();
anthropicState.lastCreateArgs = undefined;
anthropicState.constructorOptions = undefined;
mockConfig = {
getCliVersion: vi.fn().mockReturnValue('1.2.3'),
} as unknown as Config;
});
afterEach(() => {
vi.restoreAllMocks();
});
it('passes a QwenCode User-Agent header to the Anthropic SDK', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['User-Agent']).toContain('QwenCode/1.2.3');
expect(headers['User-Agent']).toContain(
`(${process.platform}; ${process.arch})`,
);
});
it('adds the effort beta header when reasoning.effort is set', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
reasoning: { effort: 'medium' },
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).toContain('effort-2025-11-24');
});
it('does not add the effort beta header when reasoning.effort is not set', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).not.toContain('effort-2025-11-24');
});
it('omits the anthropic beta header when reasoning is disabled', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
reasoning: false,
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).toBeUndefined();
});
describe('generateContent', () => {
it('builds request with config sampling params (config overrides request) and thinking budget', async () => {
const { AnthropicContentConverter } = await importConverter();
const { AnthropicContentGenerator } = await importGenerator();
const convertResponseSpy = vi
.spyOn(
AnthropicContentConverter.prototype,
'convertAnthropicResponseToGemini',
)
.mockReturnValue(
(() => {
const r = new GenerateContentResponse();
r.responseId = 'gemini-1';
return r;
})(),
);
anthropicState.createImpl.mockResolvedValue({
id: 'anthropic-1',
model: 'claude-test',
content: [{ type: 'text', text: 'hi' }],
});
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
top_k: 20,
},
schemaCompliance: 'auto',
reasoning: { effort: 'high', budget_tokens: 1000 },
},
mockConfig,
);
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'models/ignored',
contents: 'Hello',
config: {
temperature: 0.1,
maxOutputTokens: 200,
topP: 0.5,
topK: 5,
abortSignal: abortController.signal,
},
};
const result = await generator.generateContent(request);
expect(result.responseId).toBe('gemini-1');
expect(anthropicState.lastCreateArgs).toBeDefined();
const [anthropicRequest, options] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(options?.signal).toBe(abortController.signal);
expect(anthropicRequest).toEqual(
expect.objectContaining({
model: 'claude-test',
max_tokens: 1000,
temperature: 0.7,
top_p: 0.9,
top_k: 20,
thinking: { type: 'enabled', budget_tokens: 1000 },
output_config: { effort: 'high' },
}),
);
expect(convertResponseSpy).toHaveBeenCalledTimes(1);
});
it('omits thinking when request.config.thinkingConfig.includeThoughts is false', async () => {
const { AnthropicContentGenerator } = await importGenerator();
anthropicState.createImpl.mockResolvedValue({
id: 'anthropic-1',
model: 'claude-test',
content: [{ type: 'text', text: 'hi' }],
});
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: { max_tokens: 500 },
schemaCompliance: 'auto',
reasoning: { effort: 'high' },
},
mockConfig,
);
await generator.generateContent({
model: 'models/ignored',
contents: 'Hello',
config: { thinkingConfig: { includeThoughts: false } },
} as unknown as GenerateContentParameters);
const [anthropicRequest] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(anthropicRequest).toEqual(
expect.not.objectContaining({ thinking: expect.anything() }),
);
});
});
describe('countTokens', () => {
it('counts tokens using the request tokenizer', async () => {
const { AnthropicContentGenerator } = await importGenerator();
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const request: CountTokensParameters = {
contents: [{ role: 'user', parts: [{ text: 'Hello world' }] }],
model: 'claude-test',
};
const result = await generator.countTokens(request);
expect(mockTokenizer.calculateTokens).toHaveBeenCalledWith(request, {
textEncoding: 'cl100k_base',
});
expect(result.totalTokens).toBe(50);
});
it('falls back to character approximation when tokenizer throws', async () => {
const { AnthropicContentGenerator } = await importGenerator();
mockTokenizer.calculateTokens.mockRejectedValueOnce(new Error('boom'));
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const request: CountTokensParameters = {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
model: 'claude-test',
};
const content = JSON.stringify(request.contents);
const expected = Math.ceil(content.length / 4);
const result = await generator.countTokens(request);
expect(result.totalTokens).toBe(expected);
});
});
describe('generateContentStream', () => {
it('requests stream=true and converts streamed events into Gemini chunks', async () => {
const { AnthropicContentGenerator } = await importGenerator();
anthropicState.createImpl.mockResolvedValue(
(async function* () {
yield {
type: 'message_start',
message: {
id: 'msg-1',
model: 'claude-test',
usage: { cache_read_input_tokens: 2, input_tokens: 3 },
},
};
yield {
type: 'content_block_start',
index: 0,
content_block: { type: 'text' },
};
yield {
type: 'content_block_delta',
index: 0,
delta: { type: 'text_delta', text: 'Hello' },
};
yield { type: 'content_block_stop', index: 0 };
yield {
type: 'content_block_start',
index: 1,
content_block: { type: 'thinking', signature: '' },
};
yield {
type: 'content_block_delta',
index: 1,
delta: { type: 'thinking_delta', thinking: 'Think' },
};
yield {
type: 'content_block_delta',
index: 1,
delta: { type: 'signature_delta', signature: 'abc' },
};
yield { type: 'content_block_stop', index: 1 };
yield {
type: 'content_block_start',
index: 2,
content_block: {
type: 'tool_use',
id: 't1',
name: 'tool',
input: {},
},
};
yield {
type: 'content_block_delta',
index: 2,
delta: { type: 'input_json_delta', partial_json: '{"x":' },
};
yield {
type: 'content_block_delta',
index: 2,
delta: { type: 'input_json_delta', partial_json: '1}' },
};
yield { type: 'content_block_stop', index: 2 };
yield {
type: 'message_delta',
delta: { stop_reason: 'end_turn' },
usage: {
output_tokens: 5,
input_tokens: 7,
cache_read_input_tokens: 2,
},
};
yield { type: 'message_stop' };
})(),
);
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: { max_tokens: 123 },
schemaCompliance: 'auto',
},
mockConfig,
);
const stream = await generator.generateContentStream({
model: 'models/ignored',
contents: 'Hello',
} as unknown as GenerateContentParameters);
const chunks: GenerateContentResponse[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
const [anthropicRequest] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(anthropicRequest).toEqual(
expect.objectContaining({ stream: true }),
);
// Text chunk.
expect(chunks[0]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
text: 'Hello',
});
// Thinking chunk.
expect(chunks[1]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
text: 'Think',
thought: true,
});
// Signature chunk.
expect(chunks[2]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
thought: true,
thoughtSignature: 'abc',
});
// Tool call chunk.
expect(chunks[3]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
functionCall: { id: 't1', name: 'tool', args: { x: 1 } },
});
// Usage/finish chunks exist; check the last one.
const last = chunks[chunks.length - 1]!;
expect(last.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
expect(last.usageMetadata).toEqual({
cachedContentTokenCount: 2,
promptTokenCount: 9, // cached(2) + input(7)
candidatesTokenCount: 5,
totalTokenCount: 14,
});
});
});
});

View File

@@ -0,0 +1,502 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import Anthropic from '@anthropic-ai/sdk';
import type {
CountTokensParameters,
CountTokensResponse,
EmbedContentParameters,
EmbedContentResponse,
GenerateContentParameters,
GenerateContentResponseUsageMetadata,
Part,
} from '@google/genai';
import { GenerateContentResponse } from '@google/genai';
import type { Config } from '../../config/config.js';
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../contentGenerator.js';
type Message = Anthropic.Message;
type MessageCreateParamsNonStreaming =
Anthropic.MessageCreateParamsNonStreaming;
type MessageCreateParamsStreaming = Anthropic.MessageCreateParamsStreaming;
type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import { AnthropicContentConverter } from './converter.js';
type StreamingBlockState = {
type: string;
id?: string;
name?: string;
inputJson: string;
signature: string;
};
type MessageCreateParamsWithThinking = MessageCreateParamsNonStreaming & {
thinking?: { type: 'enabled'; budget_tokens: number };
// Anthropic beta feature: output_config.effort (requires beta header effort-2025-11-24)
// This is not yet represented in the official SDK types we depend on.
output_config?: { effort: 'low' | 'medium' | 'high' };
};
export class AnthropicContentGenerator implements ContentGenerator {
private client: Anthropic;
private converter: AnthropicContentConverter;
constructor(
private contentGeneratorConfig: ContentGeneratorConfig,
private readonly cliConfig: Config,
) {
const defaultHeaders = this.buildHeaders();
const baseURL = contentGeneratorConfig.baseUrl;
this.client = new Anthropic({
apiKey: contentGeneratorConfig.apiKey,
baseURL,
timeout: contentGeneratorConfig.timeout,
maxRetries: contentGeneratorConfig.maxRetries,
defaultHeaders,
});
this.converter = new AnthropicContentConverter(
contentGeneratorConfig.model,
contentGeneratorConfig.schemaCompliance,
);
}
async generateContent(
request: GenerateContentParameters,
): Promise<GenerateContentResponse> {
const anthropicRequest = await this.buildRequest(request);
const response = (await this.client.messages.create(anthropicRequest, {
signal: request.config?.abortSignal,
})) as Message;
return this.converter.convertAnthropicResponseToGemini(response);
}
async generateContentStream(
request: GenerateContentParameters,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const anthropicRequest = await this.buildRequest(request);
const streamingRequest: MessageCreateParamsStreaming & {
thinking?: { type: 'enabled'; budget_tokens: number };
} = {
...anthropicRequest,
stream: true,
};
const stream = (await this.client.messages.create(
streamingRequest as MessageCreateParamsStreaming,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<RawMessageStreamEvent>;
return this.processStream(stream);
}
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
try {
const tokenizer = getDefaultTokenizer();
const result = await tokenizer.calculateTokens(request, {
textEncoding: 'cl100k_base',
});
return {
totalTokens: result.totalTokens,
};
} catch (error) {
console.warn(
'Failed to calculate tokens with tokenizer, ' +
'falling back to simple method:',
error,
);
const content = JSON.stringify(request.contents);
const totalTokens = Math.ceil(content.length / 4);
return {
totalTokens,
};
}
}
async embedContent(
_request: EmbedContentParameters,
): Promise<EmbedContentResponse> {
throw new Error('Anthropic does not support embeddings.');
}
useSummarizedThinking(): boolean {
return false;
}
private buildHeaders(): Record<string, string> {
const version = this.cliConfig.getCliVersion() || 'unknown';
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
const betas: string[] = [];
const reasoning = this.contentGeneratorConfig.reasoning;
// Interleaved thinking is used when we send the `thinking` field.
if (reasoning !== false) {
betas.push('interleaved-thinking-2025-05-14');
}
// Effort (beta) is enabled when reasoning.effort is set.
if (reasoning !== false && reasoning?.effort !== undefined) {
betas.push('effort-2025-11-24');
}
const headers: Record<string, string> = {
'User-Agent': userAgent,
};
if (betas.length) {
headers['anthropic-beta'] = betas.join(',');
}
return headers;
}
private async buildRequest(
request: GenerateContentParameters,
): Promise<MessageCreateParamsWithThinking> {
const { system, messages } =
this.converter.convertGeminiRequestToAnthropic(request);
const tools = request.config?.tools
? await this.converter.convertGeminiToolsToAnthropic(request.config.tools)
: undefined;
const sampling = this.buildSamplingParameters(request);
const thinking = this.buildThinkingConfig(request);
const outputConfig = this.buildOutputConfig();
return {
model: this.contentGeneratorConfig.model,
system,
messages,
tools,
...sampling,
...(thinking ? { thinking } : {}),
...(outputConfig ? { output_config: outputConfig } : {}),
};
}
private buildSamplingParameters(request: GenerateContentParameters): {
max_tokens: number;
temperature?: number;
top_p?: number;
top_k?: number;
} {
const configSamplingParams = this.contentGeneratorConfig.samplingParams;
const requestConfig = request.config || {};
const getParam = <T>(
configKey: keyof NonNullable<typeof configSamplingParams>,
requestKey?: keyof NonNullable<typeof requestConfig>,
): T | undefined => {
const configValue = configSamplingParams?.[configKey] as T | undefined;
const requestValue = requestKey
? (requestConfig[requestKey] as T | undefined)
: undefined;
return configValue !== undefined ? configValue : requestValue;
};
const maxTokens =
getParam<number>('max_tokens', 'maxOutputTokens') ?? 10_000;
return {
max_tokens: maxTokens,
temperature: getParam<number>('temperature', 'temperature') ?? 1,
top_p: getParam<number>('top_p', 'topP'),
top_k: getParam<number>('top_k', 'topK'),
};
}
private buildThinkingConfig(
request: GenerateContentParameters,
): { type: 'enabled'; budget_tokens: number } | undefined {
if (request.config?.thinkingConfig?.includeThoughts === false) {
return undefined;
}
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false) {
return undefined;
}
if (reasoning?.budget_tokens !== undefined) {
return {
type: 'enabled',
budget_tokens: reasoning.budget_tokens,
};
}
const effort = reasoning?.effort;
// When using interleaved thinking with tools, this budget token limit is the entire context window(200k tokens).
const budgetTokens =
effort === 'low' ? 16_000 : effort === 'high' ? 64_000 : 32_000;
return {
type: 'enabled',
budget_tokens: budgetTokens,
};
}
private buildOutputConfig():
| { effort: 'low' | 'medium' | 'high' }
| undefined {
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false || reasoning === undefined) {
return undefined;
}
if (reasoning.effort === undefined) {
return undefined;
}
return { effort: reasoning.effort };
}
private async *processStream(
stream: AsyncIterable<RawMessageStreamEvent>,
): AsyncGenerator<GenerateContentResponse> {
let messageId: string | undefined;
let model = this.contentGeneratorConfig.model;
let cachedTokens = 0;
let promptTokens = 0;
let completionTokens = 0;
let finishReason: string | undefined;
const blocks = new Map<number, StreamingBlockState>();
const collectedResponses: GenerateContentResponse[] = [];
for await (const event of stream) {
switch (event.type) {
case 'message_start': {
messageId = event.message.id ?? messageId;
model = event.message.model ?? model;
cachedTokens =
event.message.usage?.cache_read_input_tokens ?? cachedTokens;
promptTokens = event.message.usage?.input_tokens ?? promptTokens;
break;
}
case 'content_block_start': {
const index = event.index ?? 0;
const type = String(event.content_block.type || 'text');
const initialInput =
type === 'tool_use' && 'input' in event.content_block
? JSON.stringify(event.content_block.input)
: '';
blocks.set(index, {
type,
id:
'id' in event.content_block ? event.content_block.id : undefined,
name:
'name' in event.content_block
? event.content_block.name
: undefined,
inputJson: initialInput !== '{}' ? initialInput : '',
signature:
type === 'thinking' &&
'signature' in event.content_block &&
typeof event.content_block.signature === 'string'
? event.content_block.signature
: '',
});
break;
}
case 'content_block_delta': {
const index = event.index ?? 0;
const deltaType = (event.delta as { type?: string }).type || '';
const blockState = blocks.get(index);
if (deltaType === 'text_delta') {
const text = 'text' in event.delta ? event.delta.text : '';
if (text) {
const chunk = this.buildGeminiChunk({ text }, messageId, model);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'thinking_delta') {
const thinking =
(event.delta as { thinking?: string }).thinking || '';
if (thinking) {
const chunk = this.buildGeminiChunk(
{ text: thinking, thought: true },
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'signature_delta' && blockState) {
const signature =
(event.delta as { signature?: string }).signature || '';
if (signature) {
blockState.signature += signature;
const chunk = this.buildGeminiChunk(
{ thought: true, thoughtSignature: signature },
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'input_json_delta' && blockState) {
const jsonDelta =
(event.delta as { partial_json?: string }).partial_json || '';
if (jsonDelta) {
blockState.inputJson += jsonDelta;
}
}
break;
}
case 'content_block_stop': {
const index = event.index ?? 0;
const blockState = blocks.get(index);
if (blockState?.type === 'tool_use') {
const args = safeJsonParse(blockState.inputJson || '{}', {});
const chunk = this.buildGeminiChunk(
{
functionCall: {
id: blockState.id,
name: blockState.name,
args,
},
},
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
blocks.delete(index);
break;
}
case 'message_delta': {
const stopReasonValue = event.delta.stop_reason;
if (stopReasonValue) {
finishReason = stopReasonValue;
}
// Some Anthropic-compatible providers may include additional usage fields
// (e.g. `input_tokens`, `cache_read_input_tokens`) even though the official
// Anthropic SDK types only expose `output_tokens` here.
const usageUnknown = event.usage as unknown;
const usageRecord =
usageUnknown && typeof usageUnknown === 'object'
? (usageUnknown as Record<string, unknown>)
: undefined;
if (event.usage?.output_tokens !== undefined) {
completionTokens = event.usage.output_tokens;
}
if (usageRecord?.['input_tokens'] !== undefined) {
const inputTokens = usageRecord['input_tokens'];
if (typeof inputTokens === 'number') {
promptTokens = inputTokens;
}
}
if (usageRecord?.['cache_read_input_tokens'] !== undefined) {
const cacheRead = usageRecord['cache_read_input_tokens'];
if (typeof cacheRead === 'number') {
cachedTokens = cacheRead;
}
}
if (finishReason || event.usage) {
const chunk = this.buildGeminiChunk(
undefined,
messageId,
model,
finishReason,
{
cachedContentTokenCount: cachedTokens,
promptTokenCount: cachedTokens + promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: cachedTokens + promptTokens + completionTokens,
},
);
collectedResponses.push(chunk);
yield chunk;
}
break;
}
case 'message_stop': {
if (promptTokens || completionTokens) {
const chunk = this.buildGeminiChunk(
undefined,
messageId,
model,
finishReason,
{
cachedContentTokenCount: cachedTokens,
promptTokenCount: cachedTokens + promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: cachedTokens + promptTokens + completionTokens,
},
);
collectedResponses.push(chunk);
yield chunk;
}
break;
}
default:
break;
}
}
}
private buildGeminiChunk(
part?: {
text?: string;
thought?: boolean;
thoughtSignature?: string;
functionCall?: unknown;
},
responseId?: string,
model?: string,
finishReason?: string,
usageMetadata?: GenerateContentResponseUsageMetadata,
): GenerateContentResponse {
const response = new GenerateContentResponse();
response.responseId = responseId;
response.createTime = Date.now().toString();
response.modelVersion = model || this.contentGeneratorConfig.model;
response.promptFeedback = { safetyRatings: [] };
const candidateParts = part ? [part as unknown as Part] : [];
const mappedFinishReason =
finishReason !== undefined
? this.converter.mapAnthropicFinishReasonToGemini(finishReason)
: undefined;
response.candidates = [
{
content: {
parts: candidateParts,
role: 'model' as const,
},
index: 0,
safetyRatings: [],
...(mappedFinishReason ? { finishReason: mappedFinishReason } : {}),
},
];
if (usageMetadata) {
response.usageMetadata = usageMetadata;
}
return response;
}
}

View File

@@ -0,0 +1,377 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { beforeEach, describe, expect, it, vi } from 'vitest';
import type { CallableTool, Content, Tool } from '@google/genai';
import { FinishReason } from '@google/genai';
import type Anthropic from '@anthropic-ai/sdk';
// Mock schema conversion so we can force edge-cases (e.g. missing `type`).
vi.mock('../../utils/schemaConverter.js', () => ({
convertSchema: vi.fn((schema: unknown) => schema),
}));
import { convertSchema } from '../../utils/schemaConverter.js';
import { AnthropicContentConverter } from './converter.js';
describe('AnthropicContentConverter', () => {
let converter: AnthropicContentConverter;
beforeEach(() => {
vi.clearAllMocks();
converter = new AnthropicContentConverter('test-model', 'auto');
});
describe('convertGeminiRequestToAnthropic', () => {
it('extracts systemInstruction text from string', () => {
const { system } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'hi',
config: { systemInstruction: 'sys' },
});
expect(system).toBe('sys');
});
it('extracts systemInstruction text from parts and joins with newlines', () => {
const { system } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'hi',
config: {
systemInstruction: {
role: 'system',
parts: [{ text: 'a' }, { text: 'b' }],
} as unknown as Content,
},
});
expect(system).toBe('a\nb');
});
it('converts a plain string content into a user message', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'Hello',
});
expect(messages).toEqual([
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
]);
});
it('converts user content parts into a user message with text blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [{ text: 'Hello' }, { text: 'World' }],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'text', text: 'World' },
],
},
]);
});
it('converts assistant thought parts into Anthropic thinking blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{ text: 'internal', thought: true, thoughtSignature: 'sig' },
{ text: 'visible' },
],
},
],
});
expect(messages).toEqual([
{
role: 'assistant',
content: [
{ type: 'thinking', thinking: 'internal', signature: 'sig' },
{ type: 'text', text: 'visible' },
],
},
]);
});
it('converts functionCall parts from model role into tool_use blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{ text: 'preface' },
{
functionCall: {
id: 'call-1',
name: 'tool_name',
args: { a: 1 },
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'assistant',
content: [
{ type: 'text', text: 'preface' },
{
type: 'tool_use',
id: 'call-1',
name: 'tool_name',
input: { a: 1 },
},
],
},
]);
});
it('converts functionResponse parts into user tool_result messages', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'tool_name',
response: { output: 'ok' },
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: 'ok',
},
],
},
]);
});
it('extracts function response error field when present', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'tool_name',
response: { error: 'boom' },
},
},
],
},
],
});
expect(messages[0]).toEqual({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: 'boom',
},
],
});
});
});
describe('convertGeminiToolsToAnthropic', () => {
it('converts Tool.functionDeclarations to Anthropic tools and runs schema conversion', async () => {
const tools = [
{
functionDeclarations: [
{
name: 'get_weather',
description: 'Get weather',
parametersJsonSchema: {
type: 'object',
properties: { location: { type: 'string' } },
required: ['location'],
},
},
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
name: 'get_weather',
description: 'Get weather',
input_schema: {
type: 'object',
properties: { location: { type: 'string' } },
required: ['location'],
},
});
expect(vi.mocked(convertSchema)).toHaveBeenCalledTimes(1);
});
it('resolves CallableTool.tool() and converts its functionDeclarations', async () => {
const callable = [
{
tool: async () =>
({
functionDeclarations: [
{
name: 'dynamic_tool',
description: 'resolved tool',
parametersJsonSchema: { type: 'object', properties: {} },
},
],
}) as unknown as Tool,
},
] as CallableTool[];
const result = await converter.convertGeminiToolsToAnthropic(callable);
expect(result).toHaveLength(1);
expect(result[0].name).toBe('dynamic_tool');
});
it('defaults missing parameters to an empty object schema', async () => {
const tools = [
{
functionDeclarations: [
{ name: 'no_params', description: 'no params' },
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
name: 'no_params',
description: 'no params',
input_schema: { type: 'object', properties: {} },
});
});
it('forces input_schema.type to "object" when schema conversion yields no type', async () => {
vi.mocked(convertSchema).mockImplementationOnce(() => ({
properties: {},
}));
const tools = [
{
functionDeclarations: [
{
name: 'edge',
description: 'edge',
parametersJsonSchema: { type: 'object', properties: {} },
},
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result[0]?.input_schema?.type).toBe('object');
});
});
describe('convertAnthropicResponseToGemini', () => {
it('converts text, tool_use, thinking, and redacted_thinking blocks', () => {
const response = converter.convertAnthropicResponseToGemini({
id: 'msg-1',
model: 'claude-test',
stop_reason: 'end_turn',
content: [
{ type: 'thinking', thinking: 'thought', signature: 'sig' },
{ type: 'text', text: 'hello' },
{ type: 'tool_use', id: 't1', name: 'tool', input: { x: 1 } },
{ type: 'redacted_thinking' },
],
usage: { input_tokens: 3, output_tokens: 5 },
} as unknown as Anthropic.Message);
expect(response.responseId).toBe('msg-1');
expect(response.modelVersion).toBe('claude-test');
expect(response.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
expect(response.usageMetadata).toEqual({
promptTokenCount: 3,
candidatesTokenCount: 5,
totalTokenCount: 8,
});
const parts = response.candidates?.[0]?.content?.parts || [];
expect(parts).toEqual([
{ text: 'thought', thought: true, thoughtSignature: 'sig' },
{ text: 'hello' },
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
{ text: '', thought: true },
]);
});
it('handles tool_use input that is a JSON string', () => {
const response = converter.convertAnthropicResponseToGemini({
id: 'msg-1',
model: 'claude-test',
stop_reason: null,
content: [
{ type: 'tool_use', id: 't1', name: 'tool', input: '{"x":1}' },
],
} as unknown as Anthropic.Message);
const parts = response.candidates?.[0]?.content?.parts || [];
expect(parts).toEqual([
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
]);
});
});
describe('mapAnthropicFinishReasonToGemini', () => {
it('maps known reasons', () => {
expect(converter.mapAnthropicFinishReasonToGemini('end_turn')).toBe(
FinishReason.STOP,
);
expect(converter.mapAnthropicFinishReasonToGemini('max_tokens')).toBe(
FinishReason.MAX_TOKENS,
);
expect(converter.mapAnthropicFinishReasonToGemini('content_filter')).toBe(
FinishReason.SAFETY,
);
});
it('returns undefined for null/empty', () => {
expect(converter.mapAnthropicFinishReasonToGemini(null)).toBeUndefined();
expect(converter.mapAnthropicFinishReasonToGemini('')).toBeUndefined();
});
});
});

View File

@@ -0,0 +1,448 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type {
Candidate,
CallableTool,
Content,
ContentListUnion,
ContentUnion,
FunctionCall,
FunctionResponse,
GenerateContentParameters,
Part,
PartUnion,
Tool,
ToolListUnion,
} from '@google/genai';
import { FinishReason, GenerateContentResponse } from '@google/genai';
import type Anthropic from '@anthropic-ai/sdk';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import {
convertSchema,
type SchemaComplianceMode,
} from '../../utils/schemaConverter.js';
type AnthropicMessageParam = Anthropic.MessageParam;
type AnthropicToolParam = Anthropic.Tool;
type AnthropicContentBlockParam = Anthropic.ContentBlockParam;
type ThoughtPart = { text: string; signature?: string };
interface ParsedParts {
thoughtParts: ThoughtPart[];
contentParts: string[];
functionCalls: FunctionCall[];
functionResponses: FunctionResponse[];
}
export class AnthropicContentConverter {
private model: string;
private schemaCompliance: SchemaComplianceMode;
constructor(model: string, schemaCompliance: SchemaComplianceMode = 'auto') {
this.model = model;
this.schemaCompliance = schemaCompliance;
}
convertGeminiRequestToAnthropic(request: GenerateContentParameters): {
system?: string;
messages: AnthropicMessageParam[];
} {
const messages: AnthropicMessageParam[] = [];
const system = this.extractTextFromContentUnion(
request.config?.systemInstruction,
);
this.processContents(request.contents, messages);
return {
system: system || undefined,
messages,
};
}
async convertGeminiToolsToAnthropic(
geminiTools: ToolListUnion,
): Promise<AnthropicToolParam[]> {
const tools: AnthropicToolParam[] = [];
for (const tool of geminiTools) {
let actualTool: Tool;
if ('tool' in tool) {
actualTool = await (tool as CallableTool).tool();
} else {
actualTool = tool as Tool;
}
if (!actualTool.functionDeclarations) {
continue;
}
for (const func of actualTool.functionDeclarations) {
if (!func.name) continue;
let inputSchema: Record<string, unknown> | undefined;
if (func.parametersJsonSchema) {
inputSchema = {
...(func.parametersJsonSchema as Record<string, unknown>),
};
} else if (func.parameters) {
inputSchema = func.parameters as Record<string, unknown>;
}
if (!inputSchema) {
inputSchema = { type: 'object', properties: {} };
}
inputSchema = convertSchema(inputSchema, this.schemaCompliance);
if (typeof inputSchema['type'] !== 'string') {
inputSchema['type'] = 'object';
}
tools.push({
name: func.name,
description: func.description,
input_schema: inputSchema as Anthropic.Tool.InputSchema,
});
}
}
return tools;
}
convertAnthropicResponseToGemini(
response: Anthropic.Message,
): GenerateContentResponse {
const geminiResponse = new GenerateContentResponse();
const parts: Part[] = [];
for (const block of response.content || []) {
const blockType = String((block as { type?: string })['type'] || '');
if (blockType === 'text') {
const text =
typeof (block as { text?: string }).text === 'string'
? (block as { text?: string }).text
: '';
if (text) {
parts.push({ text });
}
} else if (blockType === 'tool_use') {
const toolUse = block as {
id?: string;
name?: string;
input?: unknown;
};
parts.push({
functionCall: {
id: typeof toolUse.id === 'string' ? toolUse.id : undefined,
name: typeof toolUse.name === 'string' ? toolUse.name : undefined,
args: this.safeInputToArgs(toolUse.input),
},
});
} else if (blockType === 'thinking') {
const thinking =
typeof (block as { thinking?: string }).thinking === 'string'
? (block as { thinking?: string }).thinking
: '';
const signature =
typeof (block as { signature?: string }).signature === 'string'
? (block as { signature?: string }).signature
: '';
if (thinking || signature) {
const thoughtPart: Part = {
text: thinking,
thought: true,
thoughtSignature: signature,
};
parts.push(thoughtPart);
}
} else if (blockType === 'redacted_thinking') {
parts.push({ text: '', thought: true });
}
}
const candidate: Candidate = {
content: {
parts,
role: 'model' as const,
},
index: 0,
safetyRatings: [],
};
const finishReason = this.mapAnthropicFinishReasonToGemini(
response.stop_reason,
);
if (finishReason) {
candidate.finishReason = finishReason;
}
geminiResponse.candidates = [candidate];
geminiResponse.responseId = response.id;
geminiResponse.createTime = Date.now().toString();
geminiResponse.modelVersion = response.model || this.model;
geminiResponse.promptFeedback = { safetyRatings: [] };
if (response.usage) {
const promptTokens = response.usage.input_tokens || 0;
const completionTokens = response.usage.output_tokens || 0;
geminiResponse.usageMetadata = {
promptTokenCount: promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: promptTokens + completionTokens,
};
}
return geminiResponse;
}
private processContents(
contents: ContentListUnion,
messages: AnthropicMessageParam[],
): void {
if (Array.isArray(contents)) {
for (const content of contents) {
this.processContent(content, messages);
}
} else if (contents) {
this.processContent(contents, messages);
}
}
private processContent(
content: ContentUnion | PartUnion,
messages: AnthropicMessageParam[],
): void {
if (typeof content === 'string') {
messages.push({
role: 'user',
content: [{ type: 'text', text: content }],
});
return;
}
if (!this.isContentObject(content)) return;
const parsed = this.parseParts(content.parts || []);
if (parsed.functionResponses.length > 0) {
for (const response of parsed.functionResponses) {
messages.push({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: response.id || '',
content: this.extractFunctionResponseContent(response.response),
},
],
});
}
return;
}
if (content.role === 'model' && parsed.functionCalls.length > 0) {
const thinkingBlocks: AnthropicContentBlockParam[] =
parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
});
const toolUses: AnthropicContentBlockParam[] = parsed.functionCalls.map(
(call, index) => ({
type: 'tool_use',
id: call.id || `tool_${index}`,
name: call.name || '',
input: (call.args as Record<string, unknown>) || {},
}),
);
const textBlocks: AnthropicContentBlockParam[] = parsed.contentParts.map(
(text) => ({
type: 'text' as const,
text,
}),
);
messages.push({
role: 'assistant',
content: [...thinkingBlocks, ...textBlocks, ...toolUses],
});
return;
}
const role = content.role === 'model' ? 'assistant' : 'user';
const thinkingBlocks: AnthropicContentBlockParam[] =
role === 'assistant'
? parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
})
: [];
const textBlocks: AnthropicContentBlockParam[] = [
...thinkingBlocks,
...parsed.contentParts.map((text) => ({
type: 'text' as const,
text,
})),
];
if (textBlocks.length > 0) {
messages.push({ role, content: textBlocks });
}
}
private parseParts(parts: Part[]): ParsedParts {
const thoughtParts: ThoughtPart[] = [];
const contentParts: string[] = [];
const functionCalls: FunctionCall[] = [];
const functionResponses: FunctionResponse[] = [];
for (const part of parts) {
if (typeof part === 'string') {
contentParts.push(part);
} else if (
'text' in part &&
part.text &&
!('thought' in part && part.thought)
) {
contentParts.push(part.text);
} else if ('text' in part && 'thought' in part && part.thought) {
thoughtParts.push({
text: part.text || '',
signature:
'thoughtSignature' in part &&
typeof part.thoughtSignature === 'string'
? part.thoughtSignature
: undefined,
});
} else if ('functionCall' in part && part.functionCall) {
functionCalls.push(part.functionCall);
} else if ('functionResponse' in part && part.functionResponse) {
functionResponses.push(part.functionResponse);
}
}
return {
thoughtParts,
contentParts,
functionCalls,
functionResponses,
};
}
private extractTextFromContentUnion(contentUnion: unknown): string {
if (typeof contentUnion === 'string') {
return contentUnion;
}
if (Array.isArray(contentUnion)) {
return contentUnion
.map((item) => this.extractTextFromContentUnion(item))
.filter(Boolean)
.join('\n');
}
if (typeof contentUnion === 'object' && contentUnion !== null) {
if ('parts' in contentUnion) {
const content = contentUnion as Content;
return (
content.parts
?.map((part: Part) => {
if (typeof part === 'string') return part;
if ('text' in part) return part.text || '';
return '';
})
.filter(Boolean)
.join('\n') || ''
);
}
}
return '';
}
private extractFunctionResponseContent(response: unknown): string {
if (response === null || response === undefined) {
return '';
}
if (typeof response === 'string') {
return response;
}
if (typeof response === 'object') {
const responseObject = response as Record<string, unknown>;
const output = responseObject['output'];
if (typeof output === 'string') {
return output;
}
const error = responseObject['error'];
if (typeof error === 'string') {
return error;
}
}
try {
const serialized = JSON.stringify(response);
return serialized ?? String(response);
} catch {
return String(response);
}
}
private safeInputToArgs(input: unknown): Record<string, unknown> {
if (input && typeof input === 'object') {
return input as Record<string, unknown>;
}
if (typeof input === 'string') {
return safeJsonParse(input, {});
}
return {};
}
mapAnthropicFinishReasonToGemini(
reason?: string | null,
): FinishReason | undefined {
if (!reason) return undefined;
const mapping: Record<string, FinishReason> = {
end_turn: FinishReason.STOP,
stop_sequence: FinishReason.STOP,
tool_use: FinishReason.STOP,
max_tokens: FinishReason.MAX_TOKENS,
content_filter: FinishReason.SAFETY,
};
return mapping[reason] || FinishReason.FINISH_REASON_UNSPECIFIED;
}
private isContentObject(
content: unknown,
): content is { role: string; parts: Part[] } {
return (
typeof content === 'object' &&
content !== null &&
'role' in content &&
'parts' in content &&
Array.isArray((content as Record<string, unknown>)['parts'])
);
}
}

View File

@@ -0,0 +1,21 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../contentGenerator.js';
import type { Config } from '../../config/config.js';
import { AnthropicContentGenerator } from './anthropicContentGenerator.js';
export { AnthropicContentGenerator } from './anthropicContentGenerator.js';
export function createAnthropicContentGenerator(
contentGeneratorConfig: ContentGeneratorConfig,
cliConfig: Config,
): ContentGenerator {
return new AnthropicContentGenerator(contentGeneratorConfig, cliConfig);
}

View File

@@ -8,7 +8,7 @@ import { describe, it, expect, vi } from 'vitest';
import { createContentGenerator, AuthType } from './contentGenerator.js';
import { GoogleGenAI } from '@google/genai';
import type { Config } from '../config/config.js';
import { LoggingContentGenerator } from './geminiContentGenerator/loggingContentGenerator.js';
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
vi.mock('@google/genai');

View File

@@ -14,6 +14,7 @@ import type {
} from '@google/genai';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
import type { Config } from '../config/config.js';
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
/**
* Interface abstracting the core functionalities for generating content and counting tokens.
@@ -37,10 +38,11 @@ export interface ContentGenerator {
}
export enum AuthType {
USE_GEMINI = 'gemini-api-key',
USE_VERTEX_AI = 'vertex-ai',
USE_OPENAI = 'openai',
QWEN_OAUTH = 'qwen-oauth',
USE_GEMINI = 'gemini',
USE_VERTEX_AI = 'vertex-ai',
USE_ANTHROPIC = 'anthropic',
}
export type ContentGeneratorConfig = {
@@ -63,9 +65,12 @@ export type ContentGeneratorConfig = {
temperature?: number;
max_tokens?: number;
};
reasoning?: {
effort?: 'low' | 'medium' | 'high';
};
reasoning?:
| false
| {
effort?: 'low' | 'medium' | 'high';
budget_tokens?: number;
};
proxy?: string | undefined;
userAgent?: string;
// Schema compliance mode for tool definitions
@@ -77,7 +82,7 @@ export function createContentGeneratorConfig(
authType: AuthType | undefined,
generationConfig?: Partial<ContentGeneratorConfig>,
): ContentGeneratorConfig {
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
let newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
...(generationConfig || {}),
authType,
proxy: config?.getProxy(),
@@ -94,8 +99,16 @@ export function createContentGeneratorConfig(
}
if (authType === AuthType.USE_OPENAI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['OPENAI_API_KEY'],
baseUrl:
newContentGeneratorConfig.baseUrl || process.env['OPENAI_BASE_URL'],
model: newContentGeneratorConfig.model || process.env['OPENAI_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('OpenAI API key is required');
throw new Error('OPENAI_API_KEY environment variable not found.');
}
return {
@@ -104,10 +117,62 @@ export function createContentGeneratorConfig(
} as ContentGeneratorConfig;
}
return {
...newContentGeneratorConfig,
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
} as ContentGeneratorConfig;
if (authType === AuthType.USE_ANTHROPIC) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey:
newContentGeneratorConfig.apiKey || process.env['ANTHROPIC_API_KEY'],
baseUrl:
newContentGeneratorConfig.baseUrl || process.env['ANTHROPIC_BASE_URL'],
model: newContentGeneratorConfig.model || process.env['ANTHROPIC_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.baseUrl) {
throw new Error('ANTHROPIC_BASE_URL environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('ANTHROPIC_MODEL environment variable not found.');
}
}
if (authType === AuthType.USE_GEMINI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['GEMINI_API_KEY'],
model: newContentGeneratorConfig.model || process.env['GEMINI_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('GEMINI_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('GEMINI_MODEL environment variable not found.');
}
}
if (authType === AuthType.USE_VERTEX_AI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['GOOGLE_API_KEY'],
model: newContentGeneratorConfig.model || process.env['GOOGLE_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('GOOGLE_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('GOOGLE_MODEL environment variable not found.');
}
}
return newContentGeneratorConfig as ContentGeneratorConfig;
}
export async function createContentGenerator(
@@ -115,19 +180,9 @@ export async function createContentGenerator(
gcConfig: Config,
isInitialAuth?: boolean,
): Promise<ContentGenerator> {
if (
config.authType === AuthType.USE_GEMINI ||
config.authType === AuthType.USE_VERTEX_AI
) {
const { createGeminiContentGenerator } = await import(
'./geminiContentGenerator/index.js'
);
return createGeminiContentGenerator(config, gcConfig);
}
if (config.authType === AuthType.USE_OPENAI) {
if (!config.apiKey) {
throw new Error('OpenAI API key is required');
throw new Error('OPENAI_API_KEY environment variable not found.');
}
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
@@ -136,7 +191,8 @@ export async function createContentGenerator(
);
// Always use OpenAIContentGenerator, logging is controlled by enableOpenAILogging flag
return createOpenAIContentGenerator(config, gcConfig);
const generator = createOpenAIContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
}
if (config.authType === AuthType.QWEN_OAUTH) {
@@ -157,7 +213,8 @@ export async function createContentGenerator(
);
// Create the content generator with dynamic token management
return new QwenContentGenerator(qwenClient, config, gcConfig);
const generator = new QwenContentGenerator(qwenClient, config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
} catch (error) {
throw new Error(
`${error instanceof Error ? error.message : String(error)}`,
@@ -165,6 +222,30 @@ export async function createContentGenerator(
}
}
if (config.authType === AuthType.USE_ANTHROPIC) {
if (!config.apiKey) {
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
}
const { createAnthropicContentGenerator } = await import(
'./anthropicContentGenerator/index.js'
);
const generator = createAnthropicContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
}
if (
config.authType === AuthType.USE_GEMINI ||
config.authType === AuthType.USE_VERTEX_AI
) {
const { createGeminiContentGenerator } = await import(
'./geminiContentGenerator/index.js'
);
const generator = createGeminiContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
}
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
);

View File

@@ -720,66 +720,6 @@ describe('GeminiChat', () => {
);
});
it('should handle summarized thinking by conditionally including thoughts in history', async () => {
// Case 1: useSummarizedThinking is true -> thoughts NOT in history
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
true,
);
const stream1 = (async function* () {
yield {
candidates: [
{
content: {
role: 'model',
parts: [{ thought: true, text: 'T1' }, { text: 'A1' }],
},
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
stream1,
);
const res1 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p1');
for await (const _ of res1);
const history1 = chat.getHistory();
expect(history1[1].parts).toEqual([{ text: 'A1' }]);
// Case 2: useSummarizedThinking is false -> thoughts ARE in history
chat.clearHistory();
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
false,
);
const stream2 = (async function* () {
yield {
candidates: [
{
content: {
role: 'model',
parts: [{ thought: true, text: 'T2' }, { text: 'A2' }],
},
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
stream2,
);
const res2 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p2');
for await (const _ of res2);
const history2 = chat.getHistory();
expect(history2[1].parts).toEqual([
{ text: 'T2', thought: true },
{ text: 'A2' },
]);
});
it('should keep parts with thoughtSignature when consolidating history', async () => {
const stream = (async function* () {
yield {

View File

@@ -559,14 +559,25 @@ export class GeminiChat {
yield chunk; // Yield every chunk to the UI immediately.
}
let thoughtText = '';
// Only include thoughts if not using summarized thinking.
if (!this.config.getContentGenerator().useSummarizedThinking()) {
thoughtText = allModelParts
.filter((part) => part.thought)
.map((part) => part.text)
.join('')
.trim();
let thoughtContentPart: Part | undefined;
const thoughtText = allModelParts
.filter((part) => part.thought)
.map((part) => part.text)
.join('')
.trim();
if (thoughtText !== '') {
thoughtContentPart = {
text: thoughtText,
thought: true,
};
const thoughtSignature = allModelParts.filter(
(part) => part.thoughtSignature && part.thought,
)?.[0]?.thoughtSignature;
if (thoughtContentPart && thoughtSignature) {
thoughtContentPart.thoughtSignature = thoughtSignature;
}
}
const contentParts = allModelParts.filter((part) => !part.thought);
@@ -592,11 +603,11 @@ export class GeminiChat {
.trim();
// Record assistant turn with raw Content and metadata
if (thoughtText || contentText || hasToolCall || usageMetadata) {
if (thoughtContentPart || contentText || hasToolCall || usageMetadata) {
this.chatRecordingService?.recordAssistantTurn({
model,
message: [
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
...(thoughtContentPart ? [thoughtContentPart] : []),
...(contentText ? [{ text: contentText }] : []),
...(hasToolCall
? contentParts
@@ -632,7 +643,7 @@ export class GeminiChat {
this.history.push({
role: 'model',
parts: [
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
...(thoughtContentPart ? [thoughtContentPart] : []),
...consolidatedHistoryParts,
],
});

View File

@@ -39,7 +39,7 @@ export class GeminiContentGenerator implements ContentGenerator {
this.contentGeneratorConfig = contentGeneratorConfig;
}
private buildSamplingParameters(
private buildGenerateContentConfig(
request: GenerateContentParameters,
): GenerateContentConfig {
const configSamplingParams = this.contentGeneratorConfig?.samplingParams;
@@ -84,17 +84,7 @@ export class GeminiContentGenerator implements ContentGenerator {
'frequencyPenalty',
),
thinkingConfig: getParameterValue(
this.contentGeneratorConfig?.reasoning
? {
includeThoughts: true,
thinkingLevel: (this.contentGeneratorConfig.reasoning.effort ===
'low'
? 'LOW'
: this.contentGeneratorConfig.reasoning.effort === 'high'
? 'HIGH'
: 'THINKING_LEVEL_UNSPECIFIED') as ThinkingLevel,
}
: undefined,
this.buildThinkingConfig(),
'thinkingConfig',
{
includeThoughts: true,
@@ -104,13 +94,40 @@ export class GeminiContentGenerator implements ContentGenerator {
};
}
private buildThinkingConfig():
| { includeThoughts: boolean; thinkingLevel?: ThinkingLevel }
| undefined {
const reasoning = this.contentGeneratorConfig?.reasoning;
if (reasoning === false) {
return { includeThoughts: false };
}
if (reasoning) {
const thinkingLevel = (
reasoning.effort === 'low'
? 'LOW'
: reasoning.effort === 'high'
? 'HIGH'
: 'THINKING_LEVEL_UNSPECIFIED'
) as ThinkingLevel;
return {
includeThoughts: true,
thinkingLevel,
};
}
return undefined;
}
async generateContent(
request: GenerateContentParameters,
_userPromptId: string,
): Promise<GenerateContentResponse> {
const finalRequest = {
...request,
config: this.buildSamplingParameters(request),
config: this.buildGenerateContentConfig(request),
};
return this.googleGenAI.models.generateContent(finalRequest);
}
@@ -121,7 +138,7 @@ export class GeminiContentGenerator implements ContentGenerator {
): Promise<AsyncGenerator<GenerateContentResponse>> {
const finalRequest = {
...request,
config: this.buildSamplingParameters(request),
config: this.buildGenerateContentConfig(request),
};
return this.googleGenAI.models.generateContentStream(finalRequest);
}

View File

@@ -7,7 +7,6 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { createGeminiContentGenerator } from './index.js';
import { GeminiContentGenerator } from './geminiContentGenerator.js';
import { LoggingContentGenerator } from './loggingContentGenerator.js';
import type { Config } from '../../config/config.js';
import { AuthType } from '../contentGenerator.js';
@@ -15,10 +14,6 @@ vi.mock('./geminiContentGenerator.js', () => ({
GeminiContentGenerator: vi.fn().mockImplementation(() => ({})),
}));
vi.mock('./loggingContentGenerator.js', () => ({
LoggingContentGenerator: vi.fn().mockImplementation((wrapped) => wrapped),
}));
describe('createGeminiContentGenerator', () => {
let mockConfig: Config;
@@ -31,7 +26,7 @@ describe('createGeminiContentGenerator', () => {
} as unknown as Config;
});
it('should create a GeminiContentGenerator wrapped in LoggingContentGenerator', () => {
it('should create a GeminiContentGenerator', () => {
const config = {
model: 'gemini-1.5-flash',
apiKey: 'test-key',
@@ -41,7 +36,6 @@ describe('createGeminiContentGenerator', () => {
const generator = createGeminiContentGenerator(config, mockConfig);
expect(GeminiContentGenerator).toHaveBeenCalled();
expect(LoggingContentGenerator).toHaveBeenCalled();
expect(generator).toBeDefined();
});
});

View File

@@ -11,10 +11,8 @@ import type {
} from '../contentGenerator.js';
import type { Config } from '../../config/config.js';
import { InstallationManager } from '../../utils/installationManager.js';
import { LoggingContentGenerator } from './loggingContentGenerator.js';
export { GeminiContentGenerator } from './geminiContentGenerator.js';
export { LoggingContentGenerator } from './loggingContentGenerator.js';
/**
* Create a Gemini content generator.
@@ -51,5 +49,5 @@ export function createGeminiContentGenerator(
config,
);
return new LoggingContentGenerator(geminiContentGenerator, gcConfig);
return geminiContentGenerator;
}

View File

@@ -0,0 +1,7 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
export { LoggingContentGenerator } from './loggingContentGenerator.js';

View File

@@ -0,0 +1,371 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type {
GenerateContentParameters,
GenerateContentResponseUsageMetadata,
} from '@google/genai';
import { GenerateContentResponse } from '@google/genai';
import type { Config } from '../../config/config.js';
import type { ContentGenerator } from '../contentGenerator.js';
import { LoggingContentGenerator } from './index.js';
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
import {
logApiRequest,
logApiResponse,
logApiError,
} from '../../telemetry/loggers.js';
import { OpenAILogger } from '../../utils/openaiLogger.js';
import type OpenAI from 'openai';
vi.mock('../../telemetry/loggers.js', () => ({
logApiRequest: vi.fn(),
logApiResponse: vi.fn(),
logApiError: vi.fn(),
}));
vi.mock('../../utils/openaiLogger.js', () => ({
OpenAILogger: vi.fn().mockImplementation(() => ({
logInteraction: vi.fn().mockResolvedValue(undefined),
})),
}));
const convertGeminiRequestToOpenAISpy = vi
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiRequestToOpenAI')
.mockReturnValue([{ role: 'user', content: 'converted' }]);
const convertGeminiToolsToOpenAISpy = vi
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiToolsToOpenAI')
.mockResolvedValue([{ type: 'function', function: { name: 'tool' } }]);
const convertGeminiResponseToOpenAISpy = vi
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiResponseToOpenAI')
.mockReturnValue({
id: 'openai-response',
object: 'chat.completion',
created: 123456789,
model: 'test-model',
choices: [],
} as OpenAI.Chat.ChatCompletion);
const createConfig = (overrides: Record<string, unknown> = {}): Config =>
({
getContentGeneratorConfig: () => ({
authType: 'openai',
enableOpenAILogging: false,
...overrides,
}),
}) as Config;
const createWrappedGenerator = (
generateContent: ContentGenerator['generateContent'],
generateContentStream: ContentGenerator['generateContentStream'],
): ContentGenerator =>
({
generateContent,
generateContentStream,
countTokens: vi.fn(),
embedContent: vi.fn(),
useSummarizedThinking: vi.fn().mockReturnValue(false),
}) as ContentGenerator;
const createResponse = (
responseId: string,
modelVersion: string,
parts: Array<Record<string, unknown>>,
usageMetadata?: GenerateContentResponseUsageMetadata,
finishReason?: string,
): GenerateContentResponse => {
const response = new GenerateContentResponse();
response.responseId = responseId;
response.modelVersion = modelVersion;
response.usageMetadata = usageMetadata;
response.candidates = [
{
content: {
role: 'model',
parts: parts as never[],
},
finishReason: finishReason as never,
index: 0,
safetyRatings: [],
},
];
return response;
};
describe('LoggingContentGenerator', () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
convertGeminiRequestToOpenAISpy.mockClear();
convertGeminiToolsToOpenAISpy.mockClear();
convertGeminiResponseToOpenAISpy.mockClear();
});
it('logs request/response, normalizes thought parts, and logs OpenAI interaction', async () => {
const wrapped = createWrappedGenerator(
vi.fn().mockResolvedValue(
createResponse(
'resp-1',
'model-v2',
[{ text: 'ok' }],
{
promptTokenCount: 3,
candidatesTokenCount: 5,
totalTokenCount: 8,
},
'STOP',
),
),
vi.fn(),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({
enableOpenAILogging: true,
openAILoggingDir: 'logs',
schemaCompliance: 'openapi_30',
}),
);
const request = {
model: 'test-model',
contents: [
{
role: 'user',
parts: [
{ text: 'Hello', thought: 'internal' },
{
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
thought: 'strip-me',
},
null,
],
},
],
config: {
temperature: 0.3,
topP: 0.9,
maxOutputTokens: 256,
presencePenalty: 0.2,
frequencyPenalty: 0.1,
tools: [
{
functionDeclarations: [
{ name: 'tool', description: 'desc', parameters: {} },
],
},
],
},
} as unknown as GenerateContentParameters;
const response = await generator.generateContent(request, 'prompt-1');
expect(response.responseId).toBe('resp-1');
expect(logApiRequest).toHaveBeenCalledTimes(1);
const [, requestEvent] = vi.mocked(logApiRequest).mock.calls[0];
const loggedContents = JSON.parse(requestEvent.request_text || '[]');
expect(loggedContents[0].parts[0]).toEqual({
text: 'Hello\n[Thought: internal]',
});
expect(loggedContents[0].parts[1]).toEqual({
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
});
expect(logApiResponse).toHaveBeenCalledTimes(1);
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
expect(responseEvent.response_id).toBe('resp-1');
expect(responseEvent.model).toBe('model-v2');
expect(responseEvent.prompt_id).toBe('prompt-1');
expect(responseEvent.input_token_count).toBe(3);
expect(convertGeminiRequestToOpenAISpy).toHaveBeenCalledTimes(1);
expect(convertGeminiToolsToOpenAISpy).toHaveBeenCalledTimes(1);
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
?.value as { logInteraction: ReturnType<typeof vi.fn> };
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
const [openaiRequest, openaiResponse, openaiError] =
openaiLoggerInstance.logInteraction.mock.calls[0];
expect(openaiRequest).toEqual(
expect.objectContaining({
model: 'test-model',
messages: [{ role: 'user', content: 'converted' }],
tools: [{ type: 'function', function: { name: 'tool' } }],
temperature: 0.3,
top_p: 0.9,
max_tokens: 256,
presence_penalty: 0.2,
frequency_penalty: 0.1,
}),
);
expect(openaiResponse).toEqual({
id: 'openai-response',
object: 'chat.completion',
created: 123456789,
model: 'test-model',
choices: [],
});
expect(openaiError).toBeUndefined();
});
it('logs errors with status code and request id, then rethrows', async () => {
const error = Object.assign(new Error('boom'), {
code: 429,
request_id: 'req-99',
type: 'rate_limit',
});
const wrapped = createWrappedGenerator(
vi.fn().mockRejectedValue(error),
vi.fn(),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
);
const request = {
model: 'test-model',
contents: 'Hello',
} as unknown as GenerateContentParameters;
await expect(
generator.generateContent(request, 'prompt-2'),
).rejects.toThrow('boom');
expect(logApiError).toHaveBeenCalledTimes(1);
const [, errorEvent] = vi.mocked(logApiError).mock.calls[0];
expect(errorEvent.response_id).toBe('req-99');
expect(errorEvent.status_code).toBe(429);
expect(errorEvent.error_type).toBe('rate_limit');
expect(errorEvent.prompt_id).toBe('prompt-2');
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
?.value as { logInteraction: ReturnType<typeof vi.fn> };
const [, , loggedError] = openaiLoggerInstance.logInteraction.mock.calls[0];
expect(loggedError).toBeInstanceOf(Error);
expect((loggedError as Error).message).toBe('boom');
});
it('logs streaming responses and consolidates tool calls', async () => {
const usage1 = {
promptTokenCount: 1,
} as GenerateContentResponseUsageMetadata;
const usage2 = {
promptTokenCount: 2,
candidatesTokenCount: 4,
totalTokenCount: 6,
} as GenerateContentResponseUsageMetadata;
const response1 = createResponse(
'resp-1',
'model-stream',
[
{ text: 'Hello' },
{ functionCall: { id: 'call-1', name: 'tool', args: '{}' } },
],
usage1,
);
const response2 = createResponse(
'resp-2',
'model-stream',
[
{ text: ' world' },
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
],
usage2,
'STOP',
);
const wrapped = createWrappedGenerator(
vi.fn(),
vi.fn().mockResolvedValue(
(async function* () {
yield response1;
yield response2;
})(),
),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
);
const request = {
model: 'test-model',
contents: 'Hello',
} as unknown as GenerateContentParameters;
const stream = await generator.generateContentStream(request, 'prompt-3');
const seen: GenerateContentResponse[] = [];
for await (const item of stream) {
seen.push(item);
}
expect(seen).toHaveLength(2);
expect(logApiResponse).toHaveBeenCalledTimes(1);
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
expect(responseEvent.response_id).toBe('resp-1');
expect(responseEvent.input_token_count).toBe(2);
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
const [consolidatedResponse] =
convertGeminiResponseToOpenAISpy.mock.calls[0];
const consolidatedParts =
consolidatedResponse.candidates?.[0]?.content?.parts || [];
expect(consolidatedParts).toEqual([
{ text: 'Hello' },
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
{ text: ' world' },
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
]);
expect(consolidatedResponse.usageMetadata).toBe(usage2);
expect(consolidatedResponse.responseId).toBe('resp-2');
expect(consolidatedResponse.candidates?.[0]?.finishReason).toBe('STOP');
});
it('logs stream errors and skips response logging', async () => {
const response1 = createResponse('resp-1', 'model-stream', [
{ text: 'partial' },
]);
const streamError = new Error('stream-fail');
const wrapped = createWrappedGenerator(
vi.fn(),
vi.fn().mockResolvedValue(
(async function* () {
yield response1;
throw streamError;
})(),
),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
);
const request = {
model: 'test-model',
contents: 'Hello',
} as unknown as GenerateContentParameters;
const stream = await generator.generateContentStream(request, 'prompt-4');
await expect(async () => {
for await (const _item of stream) {
// Consume stream to trigger error.
}
}).rejects.toThrow('stream-fail');
expect(logApiResponse).not.toHaveBeenCalled();
expect(logApiError).toHaveBeenCalledTimes(1);
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
?.value as { logInteraction: ReturnType<typeof vi.fn> };
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
});
});

View File

@@ -4,20 +4,22 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type {
Content,
CountTokensParameters,
CountTokensResponse,
EmbedContentParameters,
EmbedContentResponse,
GenerateContentParameters,
GenerateContentResponseUsageMetadata,
import {
GenerateContentResponse,
ContentListUnion,
ContentUnion,
Part,
PartUnion,
type Content,
type CountTokensParameters,
type CountTokensResponse,
type EmbedContentParameters,
type EmbedContentResponse,
type GenerateContentParameters,
type GenerateContentResponseUsageMetadata,
type ContentListUnion,
type ContentUnion,
type Part,
type PartUnion,
type FinishReason,
} from '@google/genai';
import type OpenAI from 'openai';
import {
ApiRequestEvent,
ApiResponseEvent,
@@ -31,6 +33,8 @@ import {
} from '../../telemetry/loggers.js';
import type { ContentGenerator } from '../contentGenerator.js';
import { isStructuredError } from '../../utils/quotaErrorDetection.js';
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
import { OpenAILogger } from '../../utils/openaiLogger.js';
interface StructuredError {
status: number;
@@ -40,10 +44,19 @@ interface StructuredError {
* A decorator that wraps a ContentGenerator to add logging to API calls.
*/
export class LoggingContentGenerator implements ContentGenerator {
private openaiLogger?: OpenAILogger;
private schemaCompliance?: 'auto' | 'openapi_30';
constructor(
private readonly wrapped: ContentGenerator,
private readonly config: Config,
) {}
) {
const generatorConfig = this.config.getContentGeneratorConfig();
if (generatorConfig?.enableOpenAILogging) {
this.openaiLogger = new OpenAILogger(generatorConfig.openAILoggingDir);
this.schemaCompliance = generatorConfig.schemaCompliance;
}
}
getWrapped(): ContentGenerator {
return this.wrapped;
@@ -91,21 +104,31 @@ export class LoggingContentGenerator implements ContentGenerator {
prompt_id: string,
): void {
const errorMessage = error instanceof Error ? error.message : String(error);
const errorType = error instanceof Error ? error.name : 'unknown';
const errorType =
(error as { type?: string })?.type ||
(error instanceof Error ? error.name : 'unknown');
const errorResponseId =
(error as { requestID?: string; request_id?: string })?.requestID ||
(error as { requestID?: string; request_id?: string })?.request_id ||
responseId;
const errorStatus =
(error as { code?: string | number; status?: number })?.code ??
(error as { status?: number })?.status ??
(isStructuredError(error)
? (error as StructuredError).status
: undefined);
logApiError(
this.config,
new ApiErrorEvent(
responseId,
errorResponseId,
model,
errorMessage,
durationMs,
prompt_id,
this.config.getContentGeneratorConfig()?.authType,
errorType,
isStructuredError(error)
? (error as StructuredError).status
: undefined,
errorStatus,
),
);
}
@@ -116,6 +139,7 @@ export class LoggingContentGenerator implements ContentGenerator {
): Promise<GenerateContentResponse> {
const startTime = Date.now();
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
try {
const response = await this.wrapped.generateContent(req, userPromptId);
const durationMs = Date.now() - startTime;
@@ -127,10 +151,12 @@ export class LoggingContentGenerator implements ContentGenerator {
response.usageMetadata,
JSON.stringify(response),
);
await this.logOpenAIInteraction(openaiRequest, response);
return response;
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
throw error;
}
}
@@ -141,6 +167,7 @@ export class LoggingContentGenerator implements ContentGenerator {
): Promise<AsyncGenerator<GenerateContentResponse>> {
const startTime = Date.now();
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
let stream: AsyncGenerator<GenerateContentResponse>;
try {
@@ -148,6 +175,7 @@ export class LoggingContentGenerator implements ContentGenerator {
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
throw error;
}
@@ -156,6 +184,7 @@ export class LoggingContentGenerator implements ContentGenerator {
startTime,
userPromptId,
req.model,
openaiRequest,
);
}
@@ -164,6 +193,7 @@ export class LoggingContentGenerator implements ContentGenerator {
startTime: number,
userPromptId: string,
model: string,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
): AsyncGenerator<GenerateContentResponse> {
const responses: GenerateContentResponse[] = [];
@@ -186,6 +216,9 @@ export class LoggingContentGenerator implements ContentGenerator {
lastUsageMetadata,
JSON.stringify(responses),
);
const consolidatedResponse =
this.consolidateGeminiResponsesForLogging(responses);
await this.logOpenAIInteraction(openaiRequest, consolidatedResponse);
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(
@@ -195,10 +228,182 @@ export class LoggingContentGenerator implements ContentGenerator {
responses[0]?.modelVersion || model,
userPromptId,
);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
throw error;
}
}
private async buildOpenAIRequestForLogging(
request: GenerateContentParameters,
): Promise<OpenAI.Chat.ChatCompletionCreateParams | undefined> {
if (!this.openaiLogger) {
return undefined;
}
const converter = new OpenAIContentConverter(
request.model,
this.schemaCompliance,
);
const messages = converter.convertGeminiRequestToOpenAI(request, {
cleanOrphanToolCalls: false,
});
const openaiRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: request.model,
messages,
};
if (request.config?.tools) {
openaiRequest.tools = await converter.convertGeminiToolsToOpenAI(
request.config.tools,
);
}
if (request.config?.temperature !== undefined) {
openaiRequest.temperature = request.config.temperature;
}
if (request.config?.topP !== undefined) {
openaiRequest.top_p = request.config.topP;
}
if (request.config?.maxOutputTokens !== undefined) {
openaiRequest.max_tokens = request.config.maxOutputTokens;
}
if (request.config?.presencePenalty !== undefined) {
openaiRequest.presence_penalty = request.config.presencePenalty;
}
if (request.config?.frequencyPenalty !== undefined) {
openaiRequest.frequency_penalty = request.config.frequencyPenalty;
}
return openaiRequest;
}
private async logOpenAIInteraction(
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams | undefined,
response?: GenerateContentResponse,
error?: unknown,
): Promise<void> {
if (!this.openaiLogger || !openaiRequest) {
return;
}
const openaiResponse = response
? this.convertGeminiResponseToOpenAIForLogging(response, openaiRequest)
: undefined;
await this.openaiLogger.logInteraction(
openaiRequest,
openaiResponse,
error instanceof Error
? error
: error
? new Error(String(error))
: undefined,
);
}
private convertGeminiResponseToOpenAIForLogging(
response: GenerateContentResponse,
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
): OpenAI.Chat.ChatCompletion {
const converter = new OpenAIContentConverter(
openaiRequest.model,
this.schemaCompliance,
);
return converter.convertGeminiResponseToOpenAI(response);
}
private consolidateGeminiResponsesForLogging(
responses: GenerateContentResponse[],
): GenerateContentResponse | undefined {
if (responses.length === 0) {
return undefined;
}
const consolidated = new GenerateContentResponse();
const combinedParts: Part[] = [];
const functionCallIndex = new Map<string, number>();
let finishReason: FinishReason | undefined;
let usageMetadata: GenerateContentResponseUsageMetadata | undefined;
for (const response of responses) {
if (response.usageMetadata) {
usageMetadata = response.usageMetadata;
}
const candidate = response.candidates?.[0];
if (candidate?.finishReason) {
finishReason = candidate.finishReason;
}
const parts = candidate?.content?.parts ?? [];
for (const part of parts as Part[]) {
if (typeof part === 'string') {
combinedParts.push({ text: part });
continue;
}
if ('text' in part) {
if (part.text) {
combinedParts.push({
text: part.text,
...(part.thought ? { thought: true } : {}),
...(part.thoughtSignature
? { thoughtSignature: part.thoughtSignature }
: {}),
});
}
continue;
}
if ('functionCall' in part && part.functionCall) {
const callKey =
part.functionCall.id || part.functionCall.name || 'tool_call';
const existingIndex = functionCallIndex.get(callKey);
const functionPart = { functionCall: part.functionCall };
if (existingIndex !== undefined) {
combinedParts[existingIndex] = functionPart;
} else {
functionCallIndex.set(callKey, combinedParts.length);
combinedParts.push(functionPart);
}
continue;
}
if ('functionResponse' in part && part.functionResponse) {
combinedParts.push({ functionResponse: part.functionResponse });
continue;
}
combinedParts.push(part);
}
}
const lastResponse = responses[responses.length - 1];
const lastCandidate = lastResponse.candidates?.[0];
consolidated.responseId = lastResponse.responseId;
consolidated.createTime = lastResponse.createTime;
consolidated.modelVersion = lastResponse.modelVersion;
consolidated.promptFeedback = lastResponse.promptFeedback;
consolidated.usageMetadata = usageMetadata;
consolidated.candidates = [
{
content: {
role: lastCandidate?.content?.role || 'model',
parts: combinedParts,
},
...(finishReason ? { finishReason } : {}),
index: 0,
safetyRatings: lastCandidate?.safetyRatings || [],
},
];
return consolidated;
}
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
return this.wrapped.countTokens(req);
}

View File

@@ -236,8 +236,9 @@ export class OpenAIContentConverter {
*/
convertGeminiRequestToOpenAI(
request: GenerateContentParameters,
options: { cleanOrphanToolCalls: boolean } = { cleanOrphanToolCalls: true },
): OpenAI.Chat.ChatCompletionMessageParam[] {
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
let messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
// Handle system instruction from config
this.addSystemInstructionMessage(request, messages);
@@ -246,11 +247,89 @@ export class OpenAIContentConverter {
this.processContents(request.contents, messages);
// Clean up orphaned tool calls and merge consecutive assistant messages
const cleanedMessages = this.cleanOrphanedToolCalls(messages);
const mergedMessages =
this.mergeConsecutiveAssistantMessages(cleanedMessages);
if (options.cleanOrphanToolCalls) {
messages = this.cleanOrphanedToolCalls(messages);
}
messages = this.mergeConsecutiveAssistantMessages(messages);
return mergedMessages;
return messages;
}
/**
* Convert Gemini response to OpenAI completion format (for logging).
*/
convertGeminiResponseToOpenAI(
response: GenerateContentResponse,
): OpenAI.Chat.ChatCompletion {
const candidate = response.candidates?.[0];
const parts = (candidate?.content?.parts || []) as Part[];
const parsedParts = this.parseParts(parts);
const message: ExtendedCompletionMessage = {
role: 'assistant',
content: parsedParts.contentParts.join('') || null,
refusal: null,
};
const reasoningContent = parsedParts.thoughtParts.join('');
if (reasoningContent) {
message.reasoning_content = reasoningContent;
}
if (parsedParts.functionCalls.length > 0) {
message.tool_calls = parsedParts.functionCalls.map((call, index) => ({
id: call.id || `call_${index}`,
type: 'function' as const,
function: {
name: call.name || '',
arguments: JSON.stringify(call.args || {}),
},
}));
}
const finishReason = this.mapGeminiFinishReasonToOpenAI(
candidate?.finishReason,
);
const usageMetadata = response.usageMetadata;
const usage: OpenAI.CompletionUsage = {
prompt_tokens: usageMetadata?.promptTokenCount || 0,
completion_tokens: usageMetadata?.candidatesTokenCount || 0,
total_tokens: usageMetadata?.totalTokenCount || 0,
};
if (usageMetadata?.cachedContentTokenCount !== undefined) {
(
usage as OpenAI.CompletionUsage & {
prompt_tokens_details?: { cached_tokens?: number };
}
).prompt_tokens_details = {
cached_tokens: usageMetadata.cachedContentTokenCount,
};
}
const createdMs = response.createTime
? Number(response.createTime)
: Date.now();
const createdSeconds = Number.isFinite(createdMs)
? Math.floor(createdMs / 1000)
: Math.floor(Date.now() / 1000);
return {
id: response.responseId || `gemini-${Date.now()}`,
object: 'chat.completion',
created: createdSeconds,
model: response.modelVersion || this.model,
choices: [
{
index: 0,
message,
finish_reason: finishReason,
logprobs: null,
},
],
usage,
};
}
/**
@@ -836,84 +915,6 @@ export class OpenAIContentConverter {
return response;
}
/**
* Convert Gemini response format to OpenAI chat completion format for logging
*/
convertGeminiResponseToOpenAI(
response: GenerateContentResponse,
): OpenAI.Chat.ChatCompletion {
const candidate = response.candidates?.[0];
const content = candidate?.content;
let messageContent: string | null = null;
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
if (content?.parts) {
const textParts: string[] = [];
for (const part of content.parts) {
if ('text' in part && part.text) {
textParts.push(part.text);
} else if ('functionCall' in part && part.functionCall) {
toolCalls.push({
id: part.functionCall.id || `call_${toolCalls.length}`,
type: 'function' as const,
function: {
name: part.functionCall.name || '',
arguments: JSON.stringify(part.functionCall.args || {}),
},
});
}
}
messageContent = textParts.join('').trimEnd();
}
const choice: OpenAI.Chat.ChatCompletion.Choice = {
index: 0,
message: {
role: 'assistant',
content: messageContent,
refusal: null,
},
finish_reason: this.mapGeminiFinishReasonToOpenAI(
candidate?.finishReason,
) as OpenAI.Chat.ChatCompletion.Choice['finish_reason'],
logprobs: null,
};
if (toolCalls.length > 0) {
choice.message.tool_calls = toolCalls;
}
const openaiResponse: OpenAI.Chat.ChatCompletion = {
id: response.responseId || `chatcmpl-${Date.now()}`,
object: 'chat.completion',
created: response.createTime
? Number(response.createTime)
: Math.floor(Date.now() / 1000),
model: this.model,
choices: [choice],
};
// Add usage metadata if available
if (response.usageMetadata) {
openaiResponse.usage = {
prompt_tokens: response.usageMetadata.promptTokenCount || 0,
completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
total_tokens: response.usageMetadata.totalTokenCount || 0,
};
if (response.usageMetadata.cachedContentTokenCount) {
openaiResponse.usage.prompt_tokens_details = {
cached_tokens: response.usageMetadata.cachedContentTokenCount,
};
}
}
return openaiResponse;
}
/**
* Map OpenAI finish reasons to Gemini finish reasons
*/
@@ -931,29 +932,24 @@ export class OpenAIContentConverter {
return mapping[openaiReason] || FinishReason.FINISH_REASON_UNSPECIFIED;
}
/**
* Map Gemini finish reasons to OpenAI finish reasons
*/
private mapGeminiFinishReasonToOpenAI(geminiReason?: unknown): string {
if (!geminiReason) return 'stop';
private mapGeminiFinishReasonToOpenAI(
geminiReason?: FinishReason,
): 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' {
if (!geminiReason) {
return 'stop';
}
switch (geminiReason) {
case 'STOP':
case 1: // FinishReason.STOP
case FinishReason.STOP:
return 'stop';
case 'MAX_TOKENS':
case 2: // FinishReason.MAX_TOKENS
case FinishReason.MAX_TOKENS:
return 'length';
case 'SAFETY':
case 3: // FinishReason.SAFETY
case FinishReason.SAFETY:
return 'content_filter';
case 'RECITATION':
case 4: // FinishReason.RECITATION
return 'content_filter';
case 'OTHER':
case 5: // FinishReason.OTHER
return 'stop';
default:
if (geminiReason === ('RECITATION' as FinishReason)) {
return 'content_filter';
}
return 'stop';
}
}

View File

@@ -7,7 +7,7 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { GenerateContentParameters } from '@google/genai';
import { EnhancedErrorHandler } from './errorHandler.js';
import type { RequestContext } from './telemetryService.js';
import type { RequestContext } from './errorHandler.js';
describe('EnhancedErrorHandler', () => {
let errorHandler: EnhancedErrorHandler;

View File

@@ -5,7 +5,15 @@
*/
import type { GenerateContentParameters } from '@google/genai';
import type { RequestContext } from './telemetryService.js';
export interface RequestContext {
userPromptId: string;
model: string;
authType: string;
startTime: number;
duration: number;
isStreaming: boolean;
}
export interface ErrorHandler {
handle(

View File

@@ -91,11 +91,4 @@ export function determineProvider(
return new DefaultOpenAICompatibleProvider(contentGeneratorConfig, cliConfig);
}
// Services
export {
type TelemetryService,
type RequestContext,
DefaultTelemetryService,
} from './telemetryService.js';
export { type ErrorHandler, EnhancedErrorHandler } from './errorHandler.js';

View File

@@ -11,7 +11,6 @@ import type {
} from '@google/genai';
import type { PipelineConfig } from './pipeline.js';
import { ContentGenerationPipeline } from './pipeline.js';
import { DefaultTelemetryService } from './telemetryService.js';
import { EnhancedErrorHandler } from './errorHandler.js';
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
import type { ContentGeneratorConfig } from '../contentGenerator.js';
@@ -29,11 +28,6 @@ export class OpenAIContentGenerator implements ContentGenerator {
cliConfig,
provider,
contentGeneratorConfig,
telemetryService: new DefaultTelemetryService(
cliConfig,
contentGeneratorConfig.enableOpenAILogging,
contentGeneratorConfig.openAILoggingDir,
),
errorHandler: new EnhancedErrorHandler(
(error: unknown, request: GenerateContentParameters) =>
this.shouldSuppressErrorLogging(error, request),

View File

@@ -15,7 +15,6 @@ import { OpenAIContentConverter } from './converter.js';
import type { Config } from '../../config/config.js';
import type { ContentGeneratorConfig, AuthType } from '../contentGenerator.js';
import type { OpenAICompatibleProvider } from './provider/index.js';
import type { TelemetryService } from './telemetryService.js';
import type { ErrorHandler } from './errorHandler.js';
// Mock dependencies
@@ -28,7 +27,6 @@ describe('ContentGenerationPipeline', () => {
let mockProvider: OpenAICompatibleProvider;
let mockClient: OpenAI;
let mockConverter: OpenAIContentConverter;
let mockTelemetryService: TelemetryService;
let mockErrorHandler: ErrorHandler;
let mockContentGeneratorConfig: ContentGeneratorConfig;
let mockCliConfig: Config;
@@ -63,13 +61,6 @@ describe('ContentGenerationPipeline', () => {
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
};
// Mock telemetry service
mockTelemetryService = {
logSuccess: vi.fn().mockResolvedValue(undefined),
logError: vi.fn().mockResolvedValue(undefined),
logStreamingSuccess: vi.fn().mockResolvedValue(undefined),
};
// Mock error handler
mockErrorHandler = {
handle: vi.fn().mockImplementation((error: unknown) => {
@@ -99,7 +90,6 @@ describe('ContentGenerationPipeline', () => {
cliConfig: mockCliConfig,
provider: mockProvider,
contentGeneratorConfig: mockContentGeneratorConfig,
telemetryService: mockTelemetryService,
errorHandler: mockErrorHandler,
};
@@ -172,17 +162,6 @@ describe('ContentGenerationPipeline', () => {
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
mockOpenAIResponse,
);
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: false,
}),
mockGeminiResponse,
expect.any(Object),
mockOpenAIResponse,
);
});
it('should handle tools in request', async () => {
@@ -268,16 +247,6 @@ describe('ContentGenerationPipeline', () => {
'API Error',
);
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: false,
}),
testError,
expect.any(Object),
);
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
testError,
expect.any(Object),
@@ -376,17 +345,6 @@ describe('ContentGenerationPipeline', () => {
signal: undefined,
}),
);
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
[mockGeminiResponse1, mockGeminiResponse2],
expect.any(Object),
[mockChunk1, mockChunk2],
);
});
it('should filter empty responses', async () => {
@@ -490,16 +448,6 @@ describe('ContentGenerationPipeline', () => {
expect(results).toHaveLength(0); // No results due to error
expect(mockConverter.resetStreamingToolCalls).toHaveBeenCalledTimes(2); // Once at start, once on error
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
testError,
expect.any(Object),
);
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
testError,
expect.any(Object),
@@ -650,18 +598,6 @@ describe('ContentGenerationPipeline', () => {
candidatesTokenCount: 20,
totalTokenCount: 30,
});
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
results,
expect.any(Object),
[mockChunk1, mockChunk2, mockChunk3],
);
});
it('should handle ideal case where last chunk has both finishReason and usageMetadata', async () => {
@@ -853,18 +789,6 @@ describe('ContentGenerationPipeline', () => {
candidatesTokenCount: 20,
totalTokenCount: 30,
});
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
results,
expect.any(Object),
[mockChunk1, mockChunk2, mockChunk3],
);
});
it('should handle providers that send finishReason and valid usage in same chunk', async () => {
@@ -1118,19 +1042,6 @@ describe('ContentGenerationPipeline', () => {
await pipeline.execute(request, userPromptId);
// Assert
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: false,
startTime: expect.any(Number),
duration: expect.any(Number),
}),
expect.any(Object),
expect.any(Object),
expect.any(Object),
);
});
it('should create context with correct properties for streaming request', async () => {
@@ -1173,19 +1084,6 @@ describe('ContentGenerationPipeline', () => {
}
// Assert
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
startTime: expect.any(Number),
duration: expect.any(Number),
}),
expect.any(Array),
expect.any(Object),
expect.any(Array),
);
});
it('should collect all OpenAI chunks for logging even when Gemini responses are filtered', async () => {
@@ -1329,22 +1227,6 @@ describe('ContentGenerationPipeline', () => {
// Should only yield the final response (empty ones are filtered)
expect(responses).toHaveLength(1);
expect(responses[0]).toBe(finalGeminiResponse);
// Verify telemetry was called with ALL OpenAI chunks, including the filtered ones
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
model: 'test-model',
duration: expect.any(Number),
userPromptId: 'test-prompt-id',
authType: 'openai',
}),
[finalGeminiResponse], // Only the non-empty Gemini response
expect.objectContaining({
model: 'test-model',
messages: [{ role: 'user', content: 'test' }],
}),
[partialToolCallChunk1, partialToolCallChunk2, finishChunk], // ALL OpenAI chunks
);
});
});
});

View File

@@ -13,14 +13,12 @@ import type { Config } from '../../config/config.js';
import type { ContentGeneratorConfig } from '../contentGenerator.js';
import type { OpenAICompatibleProvider } from './provider/index.js';
import { OpenAIContentConverter } from './converter.js';
import type { TelemetryService, RequestContext } from './telemetryService.js';
import type { ErrorHandler } from './errorHandler.js';
import type { ErrorHandler, RequestContext } from './errorHandler.js';
export interface PipelineConfig {
cliConfig: Config;
provider: OpenAICompatibleProvider;
contentGeneratorConfig: ContentGeneratorConfig;
telemetryService: TelemetryService;
errorHandler: ErrorHandler;
}
@@ -46,7 +44,7 @@ export class ContentGenerationPipeline {
request,
userPromptId,
false,
async (openaiRequest, context) => {
async (openaiRequest) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
{
@@ -57,14 +55,6 @@ export class ContentGenerationPipeline {
const geminiResponse =
this.converter.convertOpenAIResponseToGemini(openaiResponse);
// Log success
await this.config.telemetryService.logSuccess(
context,
geminiResponse,
openaiRequest,
openaiResponse,
);
return geminiResponse;
},
);
@@ -88,12 +78,7 @@ export class ContentGenerationPipeline {
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
// Stage 2: Process stream with conversion and logging
return this.processStreamWithLogging(
stream,
context,
openaiRequest,
request,
);
return this.processStreamWithLogging(stream, context, request);
},
);
}
@@ -110,11 +95,9 @@ export class ContentGenerationPipeline {
private async *processStreamWithLogging(
stream: AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,
context: RequestContext,
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
request: GenerateContentParameters,
): AsyncGenerator<GenerateContentResponse> {
const collectedGeminiResponses: GenerateContentResponse[] = [];
const collectedOpenAIChunks: OpenAI.Chat.ChatCompletionChunk[] = [];
// Reset streaming tool calls to prevent data pollution from previous streams
this.converter.resetStreamingToolCalls();
@@ -125,9 +108,6 @@ export class ContentGenerationPipeline {
try {
// Stage 2a: Convert and yield each chunk while preserving original
for await (const chunk of stream) {
// Always collect OpenAI chunks for logging, regardless of Gemini conversion result
collectedOpenAIChunks.push(chunk);
const response = this.converter.convertOpenAIChunkToGemini(chunk);
// Stage 2b: Filter empty responses to avoid downstream issues
@@ -164,15 +144,8 @@ export class ContentGenerationPipeline {
yield pendingFinishResponse;
}
// Stage 2e: Stream completed successfully - perform logging with original OpenAI chunks
// Stage 2e: Stream completed successfully
context.duration = Date.now() - context.startTime;
await this.config.telemetryService.logStreamingSuccess(
context,
collectedGeminiResponses,
openaiRequest,
collectedOpenAIChunks,
);
} catch (error) {
// Clear streaming tool calls on error to prevent data pollution
this.converter.resetStreamingToolCalls();
@@ -258,7 +231,7 @@ export class ContentGenerationPipeline {
const baseRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: this.contentGeneratorConfig.model,
messages,
...this.buildSamplingParameters(request),
...this.buildGenerateContentConfig(request),
};
// Add streaming options if present
@@ -280,7 +253,7 @@ export class ContentGenerationPipeline {
return this.config.provider.buildRequest(baseRequest, userPromptId);
}
private buildSamplingParameters(
private buildGenerateContentConfig(
request: GenerateContentParameters,
): Record<string, unknown> {
const defaultSamplingParams =
@@ -316,7 +289,7 @@ export class ContentGenerationPipeline {
return value !== undefined ? { [key]: value } : {};
};
const params = {
const params: Record<string, unknown> = {
// Parameters with request fallback but no defaults
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
...addParameterIfDefined('top_p', 'top_p', 'topP'),
@@ -337,11 +310,24 @@ export class ContentGenerationPipeline {
'frequency_penalty',
'frequencyPenalty',
),
...this.buildReasoningConfig(),
};
return params;
}
private buildReasoningConfig(): Record<string, unknown> {
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false) {
return {};
}
return {
reasoning_effort: reasoning?.effort ?? 'medium',
};
}
/**
* Common error handling wrapper for execute methods
*/
@@ -369,13 +355,7 @@ export class ContentGenerationPipeline {
return result;
} catch (error) {
// Use shared error handling logic
return await this.handleError(
error,
context,
request,
userPromptId,
isStreaming,
);
return await this.handleError(error, context, request);
}
}
@@ -387,37 +367,8 @@ export class ContentGenerationPipeline {
error: unknown,
context: RequestContext,
request: GenerateContentParameters,
userPromptId?: string,
isStreaming?: boolean,
): Promise<never> {
context.duration = Date.now() - context.startTime;
// Build request for logging (may fail, but we still want to log the error)
let openaiRequest: OpenAI.Chat.ChatCompletionCreateParams;
try {
if (userPromptId !== undefined && isStreaming !== undefined) {
openaiRequest = await this.buildRequest(
request,
userPromptId,
isStreaming,
);
} else {
// For processStreamWithLogging, we don't have userPromptId/isStreaming,
// so create a minimal request
openaiRequest = {
model: this.contentGeneratorConfig.model,
messages: [],
};
}
} catch (_buildError) {
// If we can't build the request, create a minimal one for logging
openaiRequest = {
model: this.contentGeneratorConfig.model,
messages: [],
};
}
await this.config.telemetryService.logError(context, error, openaiRequest);
this.config.errorHandler.handle(error, context, request);
}

View File

@@ -39,7 +39,8 @@ export class DashScopeOpenAICompatibleProvider
return (
authType === AuthType.QWEN_OAUTH ||
baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1' ||
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1'
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1' ||
!baseUrl
);
}
@@ -144,9 +145,7 @@ export class DashScopeOpenAICompatibleProvider
getDefaultGenerationConfig(): GenerateContentConfig {
return {
temperature: 0.7,
topP: 0.8,
topK: 20,
temperature: 0.3,
};
}

View File

@@ -1,275 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config } from '../../config/config.js';
import { logApiError, logApiResponse } from '../../telemetry/loggers.js';
import { ApiErrorEvent, ApiResponseEvent } from '../../telemetry/types.js';
import { OpenAILogger } from '../../utils/openaiLogger.js';
import type { GenerateContentResponse } from '@google/genai';
import type OpenAI from 'openai';
import type { ExtendedCompletionChunkDelta } from './converter.js';
export interface RequestContext {
userPromptId: string;
model: string;
authType: string;
startTime: number;
duration: number;
isStreaming: boolean;
}
export interface TelemetryService {
logSuccess(
context: RequestContext,
response: GenerateContentResponse,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiResponse?: OpenAI.Chat.ChatCompletion,
): Promise<void>;
logError(
context: RequestContext,
error: unknown,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
): Promise<void>;
logStreamingSuccess(
context: RequestContext,
responses: GenerateContentResponse[],
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
): Promise<void>;
}
export class DefaultTelemetryService implements TelemetryService {
private logger: OpenAILogger;
constructor(
private config: Config,
private enableOpenAILogging: boolean = false,
openAILoggingDir?: string,
) {
// Always create a new logger instance to ensure correct working directory
// If no custom directory is provided, undefined will use the default path
this.logger = new OpenAILogger(openAILoggingDir);
}
async logSuccess(
context: RequestContext,
response: GenerateContentResponse,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiResponse?: OpenAI.Chat.ChatCompletion,
): Promise<void> {
// Log API response event for UI telemetry
const responseEvent = new ApiResponseEvent(
response.responseId || 'unknown',
context.model,
context.duration,
context.userPromptId,
context.authType,
response.usageMetadata,
);
logApiResponse(this.config, responseEvent);
// Log interaction if enabled
if (this.enableOpenAILogging && openaiRequest && openaiResponse) {
await this.logger.logInteraction(openaiRequest, openaiResponse);
}
}
async logError(
context: RequestContext,
error: unknown,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
): Promise<void> {
const errorMessage = error instanceof Error ? error.message : String(error);
// Log API error event for UI telemetry
const errorEvent = new ApiErrorEvent(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any)?.requestID || 'unknown',
context.model,
errorMessage,
context.duration,
context.userPromptId,
context.authType,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any)?.type,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any)?.code,
);
logApiError(this.config, errorEvent);
// Log error interaction if enabled
if (this.enableOpenAILogging && openaiRequest) {
await this.logger.logInteraction(
openaiRequest,
undefined,
error as Error,
);
}
}
async logStreamingSuccess(
context: RequestContext,
responses: GenerateContentResponse[],
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
): Promise<void> {
// Get final usage metadata from the last response that has it
const finalUsageMetadata = responses
.slice()
.reverse()
.find((r) => r.usageMetadata)?.usageMetadata;
// Log API response event for UI telemetry
const responseEvent = new ApiResponseEvent(
responses[responses.length - 1]?.responseId || 'unknown',
context.model,
context.duration,
context.userPromptId,
context.authType,
finalUsageMetadata,
);
logApiResponse(this.config, responseEvent);
// Log interaction if enabled - combine chunks only when needed
if (
this.enableOpenAILogging &&
openaiRequest &&
openaiChunks &&
openaiChunks.length > 0
) {
const combinedResponse = this.combineOpenAIChunksForLogging(openaiChunks);
await this.logger.logInteraction(openaiRequest, combinedResponse);
}
}
/**
* Combine OpenAI chunks for logging purposes
* This method consolidates all OpenAI stream chunks into a single ChatCompletion response
* for telemetry and logging purposes, avoiding unnecessary format conversions
*/
private combineOpenAIChunksForLogging(
chunks: OpenAI.Chat.ChatCompletionChunk[],
): OpenAI.Chat.ChatCompletion {
if (chunks.length === 0) {
throw new Error('No chunks to combine');
}
const firstChunk = chunks[0];
// Combine all content from chunks
let combinedContent = '';
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
let finishReason:
| 'stop'
| 'length'
| 'tool_calls'
| 'content_filter'
| 'function_call'
| null = null;
let combinedReasoning = '';
let usage:
| {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
| undefined;
for (const chunk of chunks) {
const choice = chunk.choices?.[0];
if (choice) {
// Combine reasoning content
const reasoningContent = (choice.delta as ExtendedCompletionChunkDelta)
?.reasoning_content;
if (reasoningContent) {
combinedReasoning += reasoningContent;
}
// Combine text content
if (choice.delta?.content) {
combinedContent += choice.delta.content;
}
// Collect tool calls
if (choice.delta?.tool_calls) {
for (const toolCall of choice.delta.tool_calls) {
if (toolCall.index !== undefined) {
if (!toolCalls[toolCall.index]) {
toolCalls[toolCall.index] = {
id: toolCall.id || '',
type: toolCall.type || 'function',
function: { name: '', arguments: '' },
};
}
if (toolCall.function?.name) {
toolCalls[toolCall.index].function.name +=
toolCall.function.name;
}
if (toolCall.function?.arguments) {
toolCalls[toolCall.index].function.arguments +=
toolCall.function.arguments;
}
}
}
}
// Get finish reason from the last chunk
if (choice.finish_reason) {
finishReason = choice.finish_reason;
}
}
// Get usage from the last chunk that has it
if (chunk.usage) {
usage = chunk.usage;
}
}
// Create the combined ChatCompletion response
const message: OpenAI.Chat.ChatCompletionMessage = {
role: 'assistant',
content: combinedContent || null,
refusal: null,
};
if (combinedReasoning) {
// Attach reasoning content if any thought tokens were streamed
(message as { reasoning_content?: string }).reasoning_content =
combinedReasoning;
}
// Add tool calls if any
if (toolCalls.length > 0) {
message.tool_calls = toolCalls.filter((tc) => tc.id); // Filter out empty tool calls
}
const combinedResponse: OpenAI.Chat.ChatCompletion = {
id: firstChunk.id,
object: 'chat.completion',
created: firstChunk.created,
model: firstChunk.model,
choices: [
{
index: 0,
message,
finish_reason: finishReason || 'stop',
logprobs: null,
},
],
usage: usage || {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0,
},
system_fingerprint: firstChunk.system_fingerprint,
};
return combinedResponse;
}
}

View File

@@ -264,7 +264,7 @@ describe('loggers', () => {
'event.timestamp': '2025-01-01T00:00:00.000Z',
prompt_length: 11,
prompt_id: 'prompt-id-9',
auth_type: 'gemini-api-key',
auth_type: 'gemini',
},
});
});
@@ -333,7 +333,7 @@ describe('loggers', () => {
total_token_count: 0,
response_text: 'test-response',
prompt_id: 'prompt-id-1',
auth_type: 'gemini-api-key',
auth_type: 'gemini',
},
});

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/sdk",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.1.0",
"description": "TypeScript SDK for programmatic access to qwen-code CLI",
"main": "./dist/index.cjs",
"module": "./dist/index.mjs",

View File

@@ -272,8 +272,6 @@ export class Query implements AsyncIterable<SDKMessage> {
// Get only successfully connected SDK servers for CLI
const sdkMcpServersForCli = this.getSdkMcpServersForCli();
const mcpServersForCli = this.getMcpServersForCli();
logger.debug('SDK MCP servers for CLI:', sdkMcpServersForCli);
logger.debug('External MCP servers for CLI:', mcpServersForCli);
await this.sendControlRequest(ControlRequestType.INITIALIZE, {
hooks: null,
@@ -629,6 +627,11 @@ export class Query implements AsyncIterable<SDKMessage> {
return Promise.reject(new Error('Query is closed'));
}
if (subtype !== ControlRequestType.INITIALIZE) {
// Ensure all other control requests get processed after initialization
await this.initialized;
}
const requestId = randomUUID();
const request: CLIControlRequest = {

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.6.0-nightly.20251225.9f65bd3b",
"version": "0.6.0-nightly.20251228.2bc80795",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {

View File

@@ -50,6 +50,9 @@ vi.mock('vscode', () => ({
registerTextDocumentContentProvider: vi.fn(),
onDidChangeWorkspaceFolders: vi.fn(),
onDidGrantWorkspaceTrust: vi.fn(),
registerFileSystemProvider: vi.fn(() => ({
dispose: vi.fn(),
})),
},
commands: {
registerCommand: vi.fn(),

View File

@@ -16,6 +16,7 @@ import {
} from '@qwen-code/qwen-code-core/src/ide/detect-ide.js';
import { WebViewProvider } from './webview/WebViewProvider.js';
import { registerNewCommands } from './commands/index.js';
import { ReadonlyFileSystemProvider } from './services/readonlyFileSystemProvider.js';
const CLI_IDE_COMPANION_IDENTIFIER = 'qwenlm.qwen-code-vscode-ide-companion';
const INFO_MESSAGE_SHOWN_KEY = 'qwenCodeInfoMessageShown';
@@ -110,6 +111,19 @@ export async function activate(context: vscode.ExtensionContext) {
checkForUpdates(context, log);
// Create and register readonly file system provider
// The provider registers itself as a singleton in the constructor
const readonlyProvider = new ReadonlyFileSystemProvider();
context.subscriptions.push(
vscode.workspace.registerFileSystemProvider(
ReadonlyFileSystemProvider.getScheme(),
readonlyProvider,
{ isCaseSensitive: true, isReadonly: true },
),
readonlyProvider,
);
log('Readonly file system provider registered');
const diffContentProvider = new DiffContentProvider();
const diffManager = new DiffManager(
log,

View File

@@ -38,6 +38,10 @@ vi.mock('node:os', async (importOriginal) => {
};
});
vi.mock('@qwen-code/qwen-code-core/src/ide/detect-ide.js', () => ({
detectIdeFromEnv: vi.fn(() => ({ name: 'vscode', displayName: 'VS Code' })),
}));
const vscodeMock = vi.hoisted(() => ({
workspace: {
workspaceFolders: [
@@ -58,6 +62,13 @@ const vscodeMock = vi.hoisted(() => ({
vi.mock('vscode', () => vscodeMock);
vi.mock('@qwen-code/qwen-code-core/src/ide/detect-ide.js', () => ({
detectIdeFromEnv: vi.fn(() => ({
name: 'vscode',
displayName: 'VS Code',
})),
}));
vi.mock('./open-files-manager', () => {
const OpenFilesManager = vi.fn();
OpenFilesManager.prototype.onDidChange = vi.fn(() => ({ dispose: vi.fn() }));

View File

@@ -146,6 +146,8 @@ export class AcpConnection {
console.error(
`[ACP qwen] Process exited with code: ${code}, signal: ${signal}`,
);
// Clear pending requests when process exits
this.pendingRequests.clear();
});
// Wait for process to start

View File

@@ -8,6 +8,7 @@ import type {
AcpSessionUpdate,
AcpPermissionRequest,
AuthenticateUpdateNotification,
ModelInfo,
} from '../types/acpTypes.js';
import type { ApprovalModeValue } from '../types/approvalModeValueTypes.js';
import { QwenSessionReader, type QwenSession } from './qwenSessionReader.js';
@@ -17,6 +18,7 @@ import type {
PlanEntry,
ToolCallUpdateData,
QwenAgentCallbacks,
UsageStatsPayload,
} from '../types/chatTypes.js';
import {
QwenConnectionHandler,
@@ -24,6 +26,7 @@ import {
} from '../services/qwenConnectionHandler.js';
import { QwenSessionUpdateHandler } from './qwenSessionUpdateHandler.js';
import { authMethod } from '../types/acpTypes.js';
import { extractModelInfoFromNewSessionResult } from '../utils/acpModelInfo.js';
import { isAuthenticationRequiredError } from '../utils/authErrors.js';
import { handleAuthenticateUpdate } from '../utils/authNotificationHandler.js';
@@ -195,12 +198,16 @@ export class QwenAgentManager {
options?: AgentConnectOptions,
): Promise<QwenConnectionResult> {
this.currentWorkingDir = workingDir;
return this.connectionHandler.connect(
const res = await this.connectionHandler.connect(
this.connection,
workingDir,
cliEntryPath,
options,
);
if (res.modelInfo && this.callbacks.onModelInfo) {
this.callbacks.onModelInfo(res.modelInfo);
}
return res;
}
/**
@@ -1091,9 +1098,10 @@ export class QwenAgentManager {
this.sessionCreateInFlight = (async () => {
try {
let newSessionResult: unknown;
// Try to create a new ACP session. If Qwen asks for auth, let it handle authentication.
try {
await this.connection.newSession(workingDir);
newSessionResult = await this.connection.newSession(workingDir);
} catch (err) {
const requiresAuth = isAuthenticationRequiredError(err);
@@ -1115,7 +1123,7 @@ export class QwenAgentManager {
);
// Add a slight delay to ensure auth state is settled
await new Promise((resolve) => setTimeout(resolve, 300));
await this.connection.newSession(workingDir);
newSessionResult = await this.connection.newSession(workingDir);
} catch (reauthErr) {
console.error(
'[QwenAgentManager] Re-authentication failed:',
@@ -1127,6 +1135,13 @@ export class QwenAgentManager {
throw err;
}
}
const modelInfo =
extractModelInfoFromNewSessionResult(newSessionResult);
if (modelInfo && this.callbacks.onModelInfo) {
this.callbacks.onModelInfo(modelInfo);
}
const newSessionId = this.connection.currentSessionId;
console.log(
'[QwenAgentManager] New session created with ID:',
@@ -1257,6 +1272,22 @@ export class QwenAgentManager {
this.sessionUpdateHandler.updateCallbacks(this.callbacks);
}
/**
* Register callback for usage metadata updates
*/
onUsageUpdate(callback: (stats: UsageStatsPayload) => void): void {
this.callbacks.onUsageUpdate = callback;
this.sessionUpdateHandler.updateCallbacks(this.callbacks);
}
/**
* Register callback for model info updates
*/
onModelInfo(callback: (info: ModelInfo) => void): void {
this.callbacks.onModelInfo = callback;
this.sessionUpdateHandler.updateCallbacks(this.callbacks);
}
/**
* Disconnect
*/

View File

@@ -13,10 +13,13 @@
import type { AcpConnection } from './acpConnection.js';
import { isAuthenticationRequiredError } from '../utils/authErrors.js';
import { authMethod } from '../types/acpTypes.js';
import { extractModelInfoFromNewSessionResult } from '../utils/acpModelInfo.js';
import type { ModelInfo } from '../types/acpTypes.js';
export interface QwenConnectionResult {
sessionCreated: boolean;
requiresAuth: boolean;
modelInfo?: ModelInfo;
}
/**
@@ -44,6 +47,7 @@ export class QwenConnectionHandler {
const autoAuthenticate = options?.autoAuthenticate ?? true;
let sessionCreated = false;
let requiresAuth = false;
let modelInfo: ModelInfo | undefined;
// Build extra CLI arguments (only essential parameters)
const extraArgs: string[] = [];
@@ -66,13 +70,15 @@ export class QwenConnectionHandler {
console.log(
'[QwenAgentManager] Creating new session (letting CLI handle authentication)...',
);
await this.newSessionWithRetry(
const newSessionResult = await this.newSessionWithRetry(
connection,
workingDir,
3,
authMethod,
autoAuthenticate,
);
modelInfo =
extractModelInfoFromNewSessionResult(newSessionResult) || undefined;
console.log('[QwenAgentManager] New session created successfully');
sessionCreated = true;
} catch (sessionError) {
@@ -99,7 +105,7 @@ export class QwenConnectionHandler {
console.log(`\n========================================`);
console.log(`[QwenAgentManager] ✅ CONNECT() COMPLETED SUCCESSFULLY`);
console.log(`========================================\n`);
return { sessionCreated, requiresAuth };
return { sessionCreated, requiresAuth, modelInfo };
}
/**
@@ -115,15 +121,15 @@ export class QwenConnectionHandler {
maxRetries: number,
authMethod: string,
autoAuthenticate: boolean,
): Promise<void> {
): Promise<unknown> {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
console.log(
`[QwenAgentManager] Creating session (attempt ${attempt}/${maxRetries})...`,
);
await connection.newSession(workingDir);
const res = await connection.newSession(workingDir);
console.log('[QwenAgentManager] Session created successfully');
return;
return res;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
@@ -155,11 +161,11 @@ export class QwenConnectionHandler {
'[QwenAgentManager] newSessionWithRetry Authentication successful',
);
// Retry immediately after successful auth
await connection.newSession(workingDir);
const res = await connection.newSession(workingDir);
console.log(
'[QwenAgentManager] Session created successfully after auth',
);
return;
return res;
} catch (authErr) {
console.error(
'[QwenAgentManager] Re-authentication failed:',
@@ -180,5 +186,7 @@ export class QwenConnectionHandler {
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
throw new Error('Session creation failed unexpectedly');
}
}

View File

@@ -10,9 +10,12 @@
* Handles session updates from ACP and dispatches them to appropriate callbacks
*/
import type { AcpSessionUpdate } from '../types/acpTypes.js';
import type { AcpSessionUpdate, SessionUpdateMeta } from '../types/acpTypes.js';
import type { ApprovalModeValue } from '../types/approvalModeValueTypes.js';
import type { QwenAgentCallbacks } from '../types/chatTypes.js';
import type {
QwenAgentCallbacks,
UsageStatsPayload,
} from '../types/chatTypes.js';
/**
* Qwen Session Update Handler class
@@ -57,6 +60,7 @@ export class QwenSessionUpdateHandler {
if (update.content?.text && this.callbacks.onStreamChunk) {
this.callbacks.onStreamChunk(update.content.text);
}
this.emitUsageMeta(update._meta);
break;
case 'agent_thought_chunk':
@@ -71,6 +75,7 @@ export class QwenSessionUpdateHandler {
this.callbacks.onStreamChunk(update.content.text);
}
}
this.emitUsageMeta(update._meta);
break;
case 'tool_call': {
@@ -160,4 +165,17 @@ export class QwenSessionUpdateHandler {
break;
}
}
private emitUsageMeta(meta?: SessionUpdateMeta): void {
if (!meta || !this.callbacks.onUsageUpdate) {
return;
}
const payload: UsageStatsPayload = {
usage: meta.usage || undefined,
durationMs: meta.durationMs ?? undefined,
};
this.callbacks.onUsageUpdate(payload);
}
}

View File

@@ -0,0 +1,204 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import * as vscode from 'vscode';
/**
* Readonly file system provider for temporary files
* Uses custom URI scheme to create readonly documents in VS Code
*/
export class ReadonlyFileSystemProvider
implements vscode.FileSystemProvider, vscode.Disposable
{
private static readonly scheme = 'qwen-readonly';
private static instance: ReadonlyFileSystemProvider | null = null;
private readonly files = new Map<string, Uint8Array>();
private readonly emitter = new vscode.EventEmitter<
vscode.FileChangeEvent[]
>();
private readonly disposables: vscode.Disposable[] = [];
readonly onDidChangeFile = this.emitter.event;
constructor() {
// Ensure only one instance exists
if (ReadonlyFileSystemProvider.instance !== null) {
console.warn(
'[ReadonlyFileSystemProvider] Instance already exists, replacing with new instance',
);
}
this.disposables.push(this.emitter);
// Register as global singleton
ReadonlyFileSystemProvider.instance = this;
}
static getScheme(): string {
return ReadonlyFileSystemProvider.scheme;
}
/**
* Get the global singleton instance
* Returns null if not initialized yet
*/
static getInstance(): ReadonlyFileSystemProvider | null {
return ReadonlyFileSystemProvider.instance;
}
/**
* Create a URI for a readonly temporary file (static version)
*/
static createUri(fileName: string, content: string): vscode.Uri {
// For tool-call related filenames, keep the URI stable so repeated clicks focus the same document.
// Note: toolCallId can include underscores (e.g. "call_..."), so match everything after the prefix.
const isToolCallFile =
/^(bash-input|bash-output|execute-input|execute-output)-.+$/.test(
fileName,
);
if (isToolCallFile) {
return vscode.Uri.from({
scheme: ReadonlyFileSystemProvider.scheme,
path: `/${fileName}`,
});
}
// For other cases, keep the original approach with timestamp to avoid collisions.
const timestamp = Date.now();
const hash = Buffer.from(content.substring(0, 100)).toString('base64url');
const uniqueId = `${timestamp}-${hash.substring(0, 8)}`;
return vscode.Uri.from({
scheme: ReadonlyFileSystemProvider.scheme,
path: `/${fileName}-${uniqueId}`,
});
}
/**
* Create a URI for a readonly temporary file (instance method)
*/
createUri(fileName: string, content: string): vscode.Uri {
return ReadonlyFileSystemProvider.createUri(fileName, content);
}
/**
* Set content for a URI
*/
setContent(uri: vscode.Uri, content: string): void {
const buffer = Buffer.from(content, 'utf8');
const key = uri.toString();
const existed = this.files.has(key);
this.files.set(key, buffer);
this.emitter.fire([
{
type: existed
? vscode.FileChangeType.Changed
: vscode.FileChangeType.Created,
uri,
},
]);
}
/**
* Get content for a URI
*/
getContent(uri: vscode.Uri): string | undefined {
const buffer = this.files.get(uri.toString());
return buffer ? Buffer.from(buffer).toString('utf8') : undefined;
}
// FileSystemProvider implementation
watch(): vscode.Disposable {
// No watching needed for readonly files
return new vscode.Disposable(() => {});
}
stat(uri: vscode.Uri): vscode.FileStat {
const buffer = this.files.get(uri.toString());
if (!buffer) {
throw vscode.FileSystemError.FileNotFound(uri);
}
return {
type: vscode.FileType.File,
ctime: Date.now(),
mtime: Date.now(),
size: buffer.byteLength,
};
}
readDirectory(): Array<[string, vscode.FileType]> {
// Not needed for our use case
return [];
}
createDirectory(): void {
throw vscode.FileSystemError.NoPermissions('Readonly file system');
}
readFile(uri: vscode.Uri): Uint8Array {
const buffer = this.files.get(uri.toString());
if (!buffer) {
throw vscode.FileSystemError.FileNotFound(uri);
}
return buffer;
}
writeFile(
uri: vscode.Uri,
content: Uint8Array,
options: { create: boolean; overwrite: boolean },
): void {
// Check if file exists
const exists = this.files.has(uri.toString());
// For readonly files, only allow creation, not modification
if (exists && !options.overwrite) {
throw vscode.FileSystemError.FileExists(uri);
}
if (!exists && !options.create) {
throw vscode.FileSystemError.FileNotFound(uri);
}
this.files.set(uri.toString(), content);
this.emitter.fire([
{
type: exists
? vscode.FileChangeType.Changed
: vscode.FileChangeType.Created,
uri,
},
]);
}
delete(uri: vscode.Uri): void {
if (!this.files.has(uri.toString())) {
throw vscode.FileSystemError.FileNotFound(uri);
}
this.files.delete(uri.toString());
this.emitter.fire([{ type: vscode.FileChangeType.Deleted, uri }]);
}
rename(): void {
throw vscode.FileSystemError.NoPermissions('Readonly file system');
}
/**
* Clear all cached files
*/
clear(): void {
this.files.clear();
}
dispose(): void {
this.clear();
this.disposables.forEach((d) => d.dispose());
// Clear global instance on dispose
if (ReadonlyFileSystemProvider.instance === this) {
ReadonlyFileSystemProvider.instance = null;
}
}
}

View File

@@ -48,6 +48,35 @@ export interface ContentBlock {
uri?: string;
}
export interface UsageMetadata {
promptTokens?: number | null;
completionTokens?: number | null;
thoughtsTokens?: number | null;
totalTokens?: number | null;
cachedTokens?: number | null;
}
export interface SessionUpdateMeta {
usage?: UsageMetadata | null;
durationMs?: number | null;
}
export type AcpMeta = Record<string, unknown>;
export type ModelId = string;
export interface ModelInfo {
_meta?: AcpMeta | null;
description?: string | null;
modelId: ModelId;
name: string;
}
export interface SessionModelState {
_meta?: AcpMeta | null;
availableModels: ModelInfo[];
currentModelId: ModelId;
}
export interface UserMessageChunkUpdate extends BaseSessionUpdate {
update: {
sessionUpdate: 'user_message_chunk';
@@ -59,6 +88,7 @@ export interface AgentMessageChunkUpdate extends BaseSessionUpdate {
update: {
sessionUpdate: 'agent_message_chunk';
content: ContentBlock;
_meta?: SessionUpdateMeta;
};
}
@@ -66,6 +96,7 @@ export interface AgentThoughtChunkUpdate extends BaseSessionUpdate {
update: {
sessionUpdate: 'agent_thought_chunk';
content: ContentBlock;
_meta?: SessionUpdateMeta;
};
}

View File

@@ -3,7 +3,7 @@
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { AcpPermissionRequest } from './acpTypes.js';
import type { AcpPermissionRequest, ModelInfo } from './acpTypes.js';
import type { ApprovalModeValue } from './approvalModeValueTypes.js';
export interface ChatMessage {
@@ -28,6 +28,18 @@ export interface ToolCallUpdateData {
locations?: Array<{ path: string; line?: number | null }>;
}
export interface UsageStatsPayload {
usage?: {
promptTokens?: number | null;
completionTokens?: number | null;
thoughtsTokens?: number | null;
totalTokens?: number | null;
cachedTokens?: number | null;
} | null;
durationMs?: number | null;
tokenLimit?: number | null;
}
export interface QwenAgentCallbacks {
onMessage?: (message: ChatMessage) => void;
onStreamChunk?: (chunk: string) => void;
@@ -45,6 +57,8 @@ export interface QwenAgentCallbacks {
}>;
}) => void;
onModeChanged?: (modeId: ApprovalModeValue) => void;
onUsageUpdate?: (stats: UsageStatsPayload) => void;
onModelInfo?: (info: ModelInfo) => void;
}
export interface ToolCallUpdate {

View File

@@ -0,0 +1,77 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect, it } from 'vitest';
import { extractModelInfoFromNewSessionResult } from './acpModelInfo.js';
describe('extractModelInfoFromNewSessionResult', () => {
it('extracts from NewSessionResponse.models (SessionModelState)', () => {
expect(
extractModelInfoFromNewSessionResult({
sessionId: 's',
models: {
currentModelId: 'qwen3-coder-plus',
availableModels: [
{
modelId: 'qwen3-coder-plus',
name: 'Qwen3 Coder Plus',
description: null,
_meta: { contextLimit: 123 },
},
],
},
}),
).toEqual({
modelId: 'qwen3-coder-plus',
name: 'Qwen3 Coder Plus',
description: null,
_meta: { contextLimit: 123 },
});
});
it('skips invalid model entries and returns first valid one', () => {
expect(
extractModelInfoFromNewSessionResult({
models: {
currentModelId: 'ok',
availableModels: [
{ name: '', modelId: '' },
{ name: 'Ok', modelId: 'ok', _meta: { contextLimit: null } },
],
},
}),
).toEqual({ name: 'Ok', modelId: 'ok', _meta: { contextLimit: null } });
});
it('falls back to single `model` object', () => {
expect(
extractModelInfoFromNewSessionResult({
model: {
name: 'Single',
modelId: 'single',
_meta: { contextLimit: 999 },
},
}),
).toEqual({
name: 'Single',
modelId: 'single',
_meta: { contextLimit: 999 },
});
});
it('falls back to legacy `modelInfo`', () => {
expect(
extractModelInfoFromNewSessionResult({
modelInfo: { name: 'legacy' },
}),
).toEqual({ name: 'legacy', modelId: 'legacy' });
});
it('returns null when missing', () => {
expect(extractModelInfoFromNewSessionResult({})).toBeNull();
expect(extractModelInfoFromNewSessionResult(null)).toBeNull();
});
});

View File

@@ -0,0 +1,135 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { AcpMeta, ModelInfo } from '../types/acpTypes.js';
const asMeta = (value: unknown): AcpMeta | null | undefined => {
if (value === null) {
return null;
}
if (value && typeof value === 'object' && !Array.isArray(value)) {
return value as AcpMeta;
}
return undefined;
};
const normalizeModelInfo = (value: unknown): ModelInfo | null => {
if (!value || typeof value !== 'object') {
return null;
}
const obj = value as Record<string, unknown>;
const nameRaw = obj['name'];
const modelIdRaw = obj['modelId'];
const descriptionRaw = obj['description'];
const name = typeof nameRaw === 'string' ? nameRaw.trim() : '';
const modelId =
typeof modelIdRaw === 'string' && modelIdRaw.trim().length > 0
? modelIdRaw.trim()
: name;
if (!modelId || modelId.trim().length === 0 || !name) {
return null;
}
const description =
typeof descriptionRaw === 'string' || descriptionRaw === null
? descriptionRaw
: undefined;
const metaFromWire = asMeta(obj['_meta']);
// Back-compat: older implementations used `contextLimit` at the top-level.
const legacyContextLimit = obj['contextLimit'];
const contextLimit =
typeof legacyContextLimit === 'number' || legacyContextLimit === null
? legacyContextLimit
: undefined;
let mergedMeta: AcpMeta | null | undefined = metaFromWire;
if (typeof contextLimit !== 'undefined') {
if (mergedMeta === null) {
mergedMeta = { contextLimit };
} else if (typeof mergedMeta === 'undefined') {
mergedMeta = { contextLimit };
} else {
mergedMeta = { ...mergedMeta, contextLimit };
}
}
return {
modelId,
name,
...(typeof description !== 'undefined' ? { description } : {}),
...(typeof mergedMeta !== 'undefined' ? { _meta: mergedMeta } : {}),
};
};
/**
* Extract model info from ACP `session/new` result.
*
* Per Agent Client Protocol draft schema, NewSessionResponse includes `models`.
* We also accept legacy shapes for compatibility.
*/
export const extractModelInfoFromNewSessionResult = (
result: unknown,
): ModelInfo | null => {
if (!result || typeof result !== 'object') {
return null;
}
const obj = result as Record<string, unknown>;
const models = obj['models'];
// ACP draft: NewSessionResponse.models is a SessionModelState object.
if (models && typeof models === 'object' && !Array.isArray(models)) {
const state = models as Record<string, unknown>;
const availableModels = state['availableModels'];
const currentModelId = state['currentModelId'];
if (Array.isArray(availableModels)) {
const normalizedModels = availableModels
.map(normalizeModelInfo)
.filter((m): m is ModelInfo => Boolean(m));
if (normalizedModels.length > 0) {
if (typeof currentModelId === 'string' && currentModelId.length > 0) {
const selected = normalizedModels.find(
(m) => m.modelId === currentModelId,
);
if (selected) {
return selected;
}
}
return normalizedModels[0];
}
}
}
// Legacy: some implementations returned `models` as a raw array.
if (Array.isArray(models)) {
for (const entry of models) {
const normalized = normalizeModelInfo(entry);
if (normalized) {
return normalized;
}
}
}
// Some implementations may return a single model object.
const model = normalizeModelInfo(obj['model']);
if (model) {
return model;
}
// Legacy: modelInfo on initialize; allow as a fallback.
const legacy = normalizeModelInfo(obj['modelInfo']);
if (legacy) {
return legacy;
}
return null;
};

View File

@@ -53,11 +53,40 @@ export function findLeftGroupOfChatWebview(): vscode.ViewColumn | undefined {
}
}
/**
* Wait for a condition to become true, driven by tab-group change events.
* Falls back to a timeout to avoid hanging forever.
*/
function waitForTabGroupsCondition(
condition: () => boolean,
timeout: number = 2000,
): Promise<boolean> {
if (condition()) {
return Promise.resolve(true);
}
return new Promise<boolean>((resolve) => {
const subscription = vscode.window.tabGroups.onDidChangeTabGroups(() => {
if (!condition()) {
return;
}
clearTimeout(timeoutHandle);
subscription.dispose();
resolve(true);
});
const timeoutHandle = setTimeout(() => {
subscription.dispose();
resolve(false);
}, timeout);
});
}
/**
* Ensure there is an editor group directly to the left of the Qwen chat webview.
* - If one exists, return its ViewColumn.
* - If none exists, focus the chat panel and create a new group on its left,
* then return the new group's ViewColumn (which equals the chat's previous column).
* then return the new group's ViewColumn.
* - If the chat webview cannot be located, returns undefined.
*/
export async function ensureLeftGroupOfChatWebview(): Promise<
@@ -87,7 +116,7 @@ export async function ensureLeftGroupOfChatWebview(): Promise<
return undefined;
}
const previousChatColumn = webviewGroup.viewColumn;
const initialGroupCount = vscode.window.tabGroups.all.length;
// Make the chat group active by revealing the panel
try {
@@ -104,6 +133,22 @@ export async function ensureLeftGroupOfChatWebview(): Promise<
return undefined;
}
// Wait for the new group to actually be created (check that group count increased)
const groupCreated = await waitForTabGroupsCondition(
() => vscode.window.tabGroups.all.length > initialGroupCount,
1000, // 1 second timeout
);
if (!groupCreated) {
// Fallback if group creation didn't complete in time
return vscode.ViewColumn.One;
}
// After creating a new group to the left, the new group takes ViewColumn.One
// and all existing groups shift right. So the new left group is always ViewColumn.One.
// However, to be safe, let's query for it again.
const newLeftGroup = findLeftGroupOfChatWebview();
// Restore focus to chat (optional), so we don't disturb user focus
try {
await vscode.commands.executeCommand(openChatCommand);
@@ -111,6 +156,7 @@ export async function ensureLeftGroupOfChatWebview(): Promise<
// Ignore
}
// The new left group's column equals the chat's previous column
return previousChatColumn;
// If we successfully found the new left group, return it
// Otherwise, fallback to ViewColumn.One (the newly created group should be first)
return newLeftGroup ?? vscode.ViewColumn.One;
}

View File

@@ -27,7 +27,7 @@ import type { TextMessage } from './hooks/message/useMessageHandling.js';
import type { ToolCallData } from './components/messages/toolcalls/ToolCall.js';
import { PermissionDrawer } from './components/PermissionDrawer/PermissionDrawer.js';
import { ToolCall } from './components/messages/toolcalls/ToolCall.js';
import { hasToolCallOutput } from './components/messages/toolcalls/shared/utils.js';
import { hasToolCallOutput } from './utils/utils.js';
import { EmptyState } from './components/layout/EmptyState.js';
import { Onboarding } from './components/layout/Onboarding.js';
import { type CompletionItem } from '../types/completionItemTypes.js';
@@ -45,7 +45,12 @@ import { SessionSelector } from './components/layout/SessionSelector.js';
import { FileIcon, UserIcon } from './components/icons/index.js';
import { ApprovalMode, NEXT_APPROVAL_MODE } from '../types/acpTypes.js';
import type { ApprovalModeValue } from '../types/approvalModeValueTypes.js';
import type { PlanEntry } from '../types/chatTypes.js';
import type { PlanEntry, UsageStatsPayload } from '../types/chatTypes.js';
import type { ModelInfo } from '../types/acpTypes.js';
import {
DEFAULT_TOKEN_LIMIT,
tokenLimit,
} from '@qwen-code/qwen-code-core/src/core/tokenLimits.js';
export const App: React.FC = () => {
const vscode = useVSCode();
@@ -70,6 +75,8 @@ export const App: React.FC = () => {
const [planEntries, setPlanEntries] = useState<PlanEntry[]>([]);
const [isAuthenticated, setIsAuthenticated] = useState<boolean | null>(null);
const [isLoading, setIsLoading] = useState<boolean>(true); // Track if we're still initializing/loading
const [modelInfo, setModelInfo] = useState<ModelInfo | null>(null);
const [usageStats, setUsageStats] = useState<UsageStatsPayload | null>(null);
const messagesEndRef = useRef<HTMLDivElement>(
null,
) as React.RefObject<HTMLDivElement>;
@@ -160,6 +167,48 @@ export const App: React.FC = () => {
const completion = useCompletionTrigger(inputFieldRef, getCompletionItems);
const contextUsage = useMemo(() => {
if (!usageStats && !modelInfo) {
return null;
}
const modelName =
modelInfo?.modelId && typeof modelInfo.modelId === 'string'
? modelInfo.modelId
: modelInfo?.name && typeof modelInfo.name === 'string'
? modelInfo.name
: undefined;
const derivedLimit =
modelName && modelName.length > 0 ? tokenLimit(modelName) : undefined;
const metaLimitRaw = modelInfo?._meta?.['contextLimit'];
const metaLimit =
typeof metaLimitRaw === 'number' || metaLimitRaw === null
? metaLimitRaw
: undefined;
const limit =
usageStats?.tokenLimit ??
metaLimit ??
derivedLimit ??
DEFAULT_TOKEN_LIMIT;
const used = usageStats?.usage?.promptTokens ?? 0;
if (typeof limit !== 'number' || limit <= 0 || used < 0) {
return null;
}
const percentLeft = Math.max(
0,
Math.min(100, Math.round(((limit - used) / limit) * 100)),
);
return {
percentLeft,
usedTokens: used,
tokenLimit: limit,
};
}, [usageStats, modelInfo]);
// Track a lightweight signature of workspace files to detect content changes even when length is unchanged
const workspaceFilesSignature = useMemo(
() =>
@@ -248,6 +297,10 @@ export const App: React.FC = () => {
setInputText,
setEditMode,
setIsAuthenticated,
setUsageStats: (stats) => setUsageStats(stats ?? null),
setModelInfo: (info) => {
setModelInfo(info);
},
});
// Auto-scroll handling: keep the view pinned to bottom when new content arrives,
@@ -760,6 +813,7 @@ export const App: React.FC = () => {
activeFileName={fileContext.activeFileName}
activeSelection={fileContext.activeSelection}
skipAutoActiveContext={skipAutoActiveContext}
contextUsage={contextUsage}
onInputChange={setInputText}
onCompositionStart={() => setIsComposing(true)}
onCompositionEnd={() => setIsComposing(false)}

View File

@@ -118,6 +118,20 @@ export class WebViewProvider {
});
});
this.agentManager.onUsageUpdate((stats) => {
this.sendMessageToWebView({
type: 'usageStats',
data: stats,
});
});
this.agentManager.onModelInfo((info) => {
this.sendMessageToWebView({
type: 'modelInfo',
data: info,
});
});
// Setup end-turn handler from ACP stopReason notifications
this.agentManager.onEndTurn((reason) => {
// Ensure WebView exits streaming state even if no explicit streamEnd was emitted elsewhere

View File

@@ -0,0 +1,61 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type React from 'react';
interface TooltipProps {
children: React.ReactNode;
content: React.ReactNode;
position?: 'top' | 'bottom' | 'left' | 'right';
}
export const Tooltip: React.FC<TooltipProps> = ({
children,
content,
position = 'top',
}) => (
<div className="relative inline-block">
<div className="group relative">
{children}
<div
className={`
absolute z-50 px-2 py-1 text-xs rounded-md shadow-lg
bg-[var(--app-primary-background)] border border-[var(--app-input-border)]
text-[var(--app-primary-foreground)] whitespace-nowrap
opacity-0 group-hover:opacity-100 transition-opacity duration-150
-translate-x-1/2 left-1/2
${
position === 'top'
? '-translate-y-1 bottom-full mb-1'
: position === 'bottom'
? 'translate-y-1 top-full mt-1'
: position === 'left'
? '-translate-x-full left-0 translate-y-[-50%] top-1/2'
: 'translate-x-0 right-0 translate-y-[-50%] top-1/2'
}
pointer-events-none
`}
>
{content}
<div
className={`
absolute w-2 h-2 bg-[var(--app-primary-background)] border-l border-b border-[var(--app-input-border)]
-rotate-45
${
position === 'top'
? 'top-full left-1/2 -translate-x-1/2 -translate-y-1/2'
: position === 'bottom'
? 'bottom-full left-1/2 -translate-x-1/2 translate-y-1/2'
: position === 'left'
? 'right-full top-1/2 translate-x-1/2 -translate-y-1/2'
: 'left-full top-1/2 -translate-x-1/2 -translate-y-1/2'
}
`}
/>
</div>
</div>
</div>
);

View File

@@ -0,0 +1,88 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type React from 'react';
import { Tooltip } from '../Tooltip.js';
interface ContextUsage {
percentLeft: number;
usedTokens: number;
tokenLimit: number;
}
interface ContextIndicatorProps {
contextUsage: ContextUsage | null;
}
export const ContextIndicator: React.FC<ContextIndicatorProps> = ({
contextUsage,
}) => {
if (!contextUsage) {
return null;
}
// Calculate used percentage for the progress indicator
// contextUsage.percentLeft is the percentage remaining, so 100 - percentLeft = percent used
const percentUsed = 100 - contextUsage.percentLeft;
const percentFormatted = Math.max(0, Math.min(100, Math.round(percentUsed)));
const radius = 9;
const circumference = 2 * Math.PI * radius;
// To show the used portion, we need to offset the unused portion
// If 20% is used, we want to show 20% filled, so offset the remaining 80%
const dashOffset = ((100 - percentUsed) / 100) * circumference;
const formatNumber = (value: number) => {
if (value >= 1000) {
return `${(Math.round((value / 1000) * 10) / 10).toFixed(1)}k`;
}
return Math.round(value).toLocaleString();
};
// Create tooltip content with proper formatting
const tooltipContent = (
<div className="flex flex-col gap-1">
<div className="font-medium">
{percentFormatted}% {formatNumber(contextUsage.usedTokens)} /{' '}
{formatNumber(contextUsage.tokenLimit)} context used
</div>
</div>
);
return (
<Tooltip content={tooltipContent} position="top">
<button
className="btn-icon-compact"
aria-label={`${percentFormatted}% • ${formatNumber(contextUsage.usedTokens)} / ${formatNumber(contextUsage.tokenLimit)} context used`}
>
<svg viewBox="0 0 24 24" aria-hidden="true" role="presentation">
<circle
className="context-indicator__track"
cx="12"
cy="12"
r={radius}
fill="none"
stroke="currentColor"
opacity="0.2"
/>
<circle
className="context-indicator__progress"
cx="12"
cy="12"
r={radius}
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeDasharray={circumference}
strokeDashoffset={dashOffset}
style={{
transform: 'rotate(-90deg)',
transformOrigin: '50% 50%',
}}
/>
</svg>
</button>
</Tooltip>
);
};

View File

@@ -21,6 +21,7 @@ import { CompletionMenu } from '../layout/CompletionMenu.js';
import type { CompletionItem } from '../../../types/completionItemTypes.js';
import { getApprovalModeInfoFromString } from '../../../types/acpTypes.js';
import type { ApprovalModeValue } from '../../../types/approvalModeValueTypes.js';
import { ContextIndicator } from './ContextIndicator.js';
interface InputFormProps {
inputText: string;
@@ -36,6 +37,11 @@ interface InputFormProps {
activeSelection: { startLine: number; endLine: number } | null;
// Whether to auto-load the active editor selection/path into context
skipAutoActiveContext: boolean;
contextUsage: {
percentLeft: number;
usedTokens: number;
tokenLimit: number;
} | null;
onInputChange: (text: string) => void;
onCompositionStart: () => void;
onCompositionEnd: () => void;
@@ -96,6 +102,7 @@ export const InputForm: React.FC<InputFormProps> = ({
activeFileName,
activeSelection,
skipAutoActiveContext,
contextUsage,
onInputChange,
onCompositionStart,
onCompositionEnd,
@@ -240,6 +247,9 @@ export const InputForm: React.FC<InputFormProps> = ({
{/* Spacer */}
<div className="flex-1 min-w-0" />
{/* Context usage indicator */}
<ContextIndicator contextUsage={contextUsage} />
{/* @yiliang114. closed temporarily */}
{/* Thinking button */}
{/* <button

Some files were not shown because too many files have changed in this diff Show More