Compare commits

..

60 Commits

Author SHA1 Message Date
Mingholy
17eb20c134 Merge pull request #1322 from QwenLM/mingholy/feat/headless-slash-commands
feat: support /compress and /summary commands for non-interactive & ACP
2025-12-26 18:10:55 +08:00
mingholy.lmh
5d59ceb6f3 fix: explicit output if command is not supported 2025-12-26 17:55:03 +08:00
mingholy.lmh
7f645b9726 fix: wrong slash_command in systemMessage 2025-12-26 17:55:03 +08:00
mingholy.lmh
8c109be48c refactor: unified allow list of supported commands in ACP or non-interactive mode 2025-12-26 17:55:03 +08:00
mingholy.lmh
e9a1d9a927 fix: failed unit test cases 2025-12-26 17:55:02 +08:00
mingholy.lmh
8aceddffa2 feat: support /compress and /summary commands for non-interactive & ACP
integration
2025-12-26 17:55:02 +08:00
tanzhenxin
cebe0448d0 Merge pull request #1327 from QwenLM/feat/vscode-ide-companion-context-left
context left on vscode ide companion
2025-12-26 17:26:35 +08:00
tanzhenxin
919560e3a4 Merge pull request #1345 from QwenLM/feat/vscode-ida-companion-bash-toolcall-click-2
feat(vscode-ide-companion): in/output part in the bash toolcall can be clicked to open a temporary file
2025-12-26 16:55:35 +08:00
Mingholy
26bd4f882d Merge pull request #1334 from QwenLM/mingholy/chore/skip-bumping-unstable-sdk-version
chore: improve release-sdk workflow
2025-12-26 16:21:53 +08:00
tanzhenxin
3787e95572 Merge pull request #1349 from QwenLM/fix/integration-test-3
fix one flaky integration test
2025-12-26 09:43:02 +08:00
tanzhenxin
7233d37bd1 fix one flaky integration test 2025-12-26 09:20:24 +08:00
yiliang114
93dcca5147 fix(vscode-ide-companion): fix test 2025-12-26 00:28:45 +08:00
cwtuan
f7d04323f3 Enhance VS Code extension description with download link (#1341)
Updated the VS Code extension note with a download link for the Qwen Code Companion.
2025-12-25 23:58:52 +08:00
yiliang114
9a27857f10 feat(vscode-ide-companion): support context left 2025-12-25 23:53:55 +08:00
yiliang114
452f4f3c0e Merge branch 'main' of https://github.com/QwenLM/qwen-code into feat/vscode-ide-companion-context-left 2025-12-25 23:51:57 +08:00
yiliang114
5cc01e5e09 feat(vscode-ide-companion): support context left 2025-12-25 23:51:50 +08:00
yiliang114
ac0be9fb84 feat(vscode-ide-companion): in/output part in the bash toolcall can be clicked to open a temporary file. 2025-12-25 16:59:32 +08:00
tanzhenxin
257c6705e1 Merge pull request #1343 from QwenLM/fix/integration-test-2
fix one flaky integration test
2025-12-25 16:08:54 +08:00
tanzhenxin
27e7438b75 fix one flaky integration test 2025-12-25 16:08:06 +08:00
tanzhenxin
8a3ff8db12 Merge pull request #1340 from QwenLM/feat/anthropic-provider-1
Follow up on pr #1331
2025-12-25 15:44:52 +08:00
tanzhenxin
26f8b67d4f add missing file 2025-12-25 15:24:56 +08:00
tanzhenxin
b64d636280 anthropic provider support follow-up 2025-12-25 15:24:42 +08:00
tanzhenxin
781c57b438 Merge pull request #1331 from QwenLM/feat/support-anthropic-provider
feat: add Anthropic provider, normalize auth/env config, and centralize logging
2025-12-25 11:44:38 +08:00
mingholy.lmh
c81c24d45d chore: improve release-sdk workflow 2025-12-25 10:46:57 +08:00
tanzhenxin
c53bdde747 support reasoning.budget_tokens config option 2025-12-25 10:18:38 +08:00
tanzhenxin
99db18069d add interleaved-thinking-2025-05-14 beta header for anthropic content generator 2025-12-25 09:42:06 +08:00
tanzhenxin
a0a5b831d4 add a few more tests 2025-12-24 20:54:40 +08:00
tanzhenxin
8f74dd224c add tests for loggingContentGenerator 2025-12-24 19:41:46 +08:00
tanzhenxin
b931d28f35 feat(core,cli): add Anthropic provider, normalize auth/env config, and centralize logging 2025-12-24 19:00:56 +08:00
mingholy.lmh
4407597794 chore: skip bumping sdk version when release nightly/preview or dry run 2025-12-24 18:12:23 +08:00
Mingholy
9f65bd3b39 Merge pull request #1325 from QwenLM/mingholy/chore/revert-sdk-version
chore: revert sdk-typescript version to 0.1.0 and update release workflow
2025-12-24 17:22:02 +08:00
pomelo
2b3830cf83 Merge pull request #1314 from QwenLM/feat/skills
Add experimental Skills feature
2025-12-24 17:01:28 +08:00
yiliang114
90bf101040 chore(vscode-ide-companion): simplify the implementation of context remaining 2025-12-24 14:29:25 +08:00
mingholy.lmh
2b9140940d chore: update release-sdk.yml to sync lockfile 2025-12-24 11:43:46 +08:00
mingholy.lmh
4efdea0981 chore: revert sdk-typescript version to 0.1.0 and update release workflow 2025-12-24 11:04:33 +08:00
pomelo
05791d4200 Merge pull request #1302 from afarber/1095-figma-mcp-client-name
fix(mcp): update OAuth client name for Figma MCP server compatibility
2025-12-24 10:41:51 +08:00
Mingholy
add35d2904 Merge pull request #1321 from QwenLM/mingholy/fix/sdk-cli-path
fix: cli path parsing issue in Windows
2025-12-24 10:35:52 +08:00
yiliang114
660901e1fd Merge branch 'main' of https://github.com/QwenLM/qwen-code into feat/vscode-ide-companion-context-left 2025-12-24 10:10:32 +08:00
yiliang114
8e64c5acaf feat(vscode-ide-companion): support context left 2025-12-24 01:09:21 +08:00
tanzhenxin
bc2a7efcb3 Merge pull request #1297 from QwenLM/feat/gemini-3-integration
Add Gemini provider, remove legacy Google OAuth, and tune generation …
2025-12-23 22:16:39 +08:00
tanzhenxin
1dfd880e17 reset default topP to 0.95 as claude modes does not allow topP smaller than 0.95 2025-12-23 21:58:28 +08:00
mingholy.lmh
4f970c9987 fix: cli path parsing issue in Windows 2025-12-23 19:00:43 +08:00
tanzhenxin
251031cfc5 fix a link in skills.md 2025-12-23 15:09:23 +08:00
tanzhenxin
10a0c843c1 fix flaky tests 2025-12-23 14:52:03 +08:00
tanzhenxin
77c257d9d0 fix flaky tests 2025-12-23 14:50:47 +08:00
tanzhenxin
955547d523 minor updates to address review comments 2025-12-23 14:35:41 +08:00
tanzhenxin
3bc862df89 unset temperature, and set topP=0.8 for default provider 2025-12-23 13:56:06 +08:00
tanzhenxin
4311af96eb add docs 2025-12-23 10:53:09 +08:00
tanzhenxin
b49c11e9a2 add experimental-skills flag to enable skills feature 2025-12-23 10:24:57 +08:00
tanzhenxin
9cdd85c62a Merge branch 'main' into feat/skills 2025-12-22 16:00:57 +08:00
tanzhenxin
87d8d82be7 special handling for summarized thinking 2025-12-22 14:07:23 +08:00
tanzhenxin
fefc138485 Merge branch 'main' into feat/gemini-3-integration 2025-12-22 10:08:15 +08:00
Alexander Farber
18e9b2340b Change header to: Copyright 2025 Qwen Team 2025-12-20 09:37:57 +01:00
Alexander Farber
ad427da340 Move constants to a new file for SSOT 2025-12-20 09:35:12 +01:00
Alexander Farber
484e0fd943 Change the client name to: Gemini CLI MCP Client 2025-12-20 09:21:15 +01:00
tanzhenxin
b8a16d362a Merge branch 'main' into feat/gemini-3-integration 2025-12-19 16:39:42 +08:00
tanzhenxin
17129024f4 Add Gemini provider, remove legacy Google OAuth, and tune generation defaults 2025-12-19 16:26:54 +08:00
tanzhenxin
177fc42f04 Merge branch 'main' into feat/skills 2025-12-15 14:25:56 +08:00
tanzhenxin
2560c2d1a2 Merge branch 'main' into feat/skills 2025-12-11 14:50:07 +08:00
tanzhenxin
bd6e16d41b draft version of skill tool feature 2025-12-10 17:18:44 +08:00
219 changed files with 11751 additions and 11071 deletions

View File

@@ -33,6 +33,10 @@ on:
type: 'boolean'
default: false
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: false
jobs:
release-sdk:
runs-on: 'ubuntu-latest'
@@ -46,6 +50,7 @@ jobs:
packages: 'write'
id-token: 'write'
issues: 'write'
pull-requests: 'write'
outputs:
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
@@ -86,6 +91,8 @@ jobs:
with:
node-version-file: '.nvmrc'
cache: 'npm'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Install Dependencies'
run: |-
@@ -121,6 +128,14 @@ jobs:
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
MANUAL_VERSION: '${{ inputs.version }}'
- name: 'Set SDK package version (local only)'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
# Ensure the package version matches the computed release version.
# This is required for nightly/preview because npm does not allow re-publishing the same version.
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Build CLI Bundle'
run: |
npm run build
@@ -153,7 +168,21 @@ jobs:
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
- name: 'Create and switch to a release branch'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'release_branch'
env:
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
@@ -162,50 +191,22 @@ jobs:
git switch -c "${BRANCH_NAME}"
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
- name: 'Update package version'
working-directory: 'packages/sdk-typescript'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
npm version "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Commit and Conditionally Push package version'
- name: 'Commit and Push package version (stable only)'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
git add packages/sdk-typescript/package.json
# Only persist version bumps after a successful publish.
git add packages/sdk-typescript/package.json package-lock.json
if git diff --staged --quiet; then
echo "No version changes to commit"
else
git commit -m "chore(release): sdk-typescript ${RELEASE_TAG}"
fi
if [[ "${IS_DRY_RUN}" == "false" ]]; then
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
else
echo "Dry run enabled. Skipping push."
fi
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Configure npm for publishing'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
- name: 'Create GitHub Release and Tag'
if: |-
@@ -215,12 +216,68 @@ jobs:
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
REF: '${{ github.event.inputs.ref || github.sha }}'
run: |-
# For stable releases, use the release branch; for nightly/preview, use the current ref
if [[ "${IS_NIGHTLY}" == "true" || "${IS_PREVIEW}" == "true" ]]; then
TARGET="${REF}"
PRERELEASE_FLAG="--prerelease"
else
TARGET="${RELEASE_BRANCH}"
PRERELEASE_FLAG=""
fi
gh release create "sdk-typescript-${RELEASE_TAG}" \
--target "$RELEASE_BRANCH" \
--target "${TARGET}" \
--title "SDK TypeScript Release ${RELEASE_TAG}" \
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
--generate-notes
--generate-notes \
${PRERELEASE_FLAG}
- name: 'Create PR to merge release branch into main'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'pr'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
set -euo pipefail
pr_url="$(gh pr list --head "${RELEASE_BRANCH}" --base main --json url --jq '.[0].url')"
if [[ -z "${pr_url}" ]]; then
pr_url="$(gh pr create \
--base main \
--head "${RELEASE_BRANCH}" \
--title "chore(release): sdk-typescript ${RELEASE_TAG}" \
--body "Automated release PR for sdk-typescript ${RELEASE_TAG}.")"
fi
echo "PR_URL=${pr_url}" >> "${GITHUB_OUTPUT}"
- name: 'Wait for CI checks to complete'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
run: |-
set -euo pipefail
echo "Waiting for CI checks to complete..."
gh pr checks "${PR_URL}" --watch --interval 30
- name: 'Enable auto-merge for release PR'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
run: |-
set -euo pipefail
gh pr merge "${PR_URL}" --merge --auto
- name: 'Create Issue on Failure'
if: |-

View File

@@ -43,6 +43,7 @@ Qwen Code uses JSON settings files for persistent configuration. There are four
In addition to a project settings file, a project's `.qwen` directory can contain other project-specific files related to Qwen Code's operation, such as:
- [Custom sandbox profiles](../features/sandbox) (e.g. `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
- [Agent Skills](../features/skills) (experimental) under `.qwen/skills/` (each Skill is a directory containing a `SKILL.md`).
### Available settings in `settings.json`
@@ -380,6 +381,8 @@ Arguments passed directly when running the CLI can override other configurations
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](../../developers/development/telemetry) for more information. |
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
| `--checkpointing` | | Enables [checkpointing](../features/checkpointing). | | |
| `--experimental-acp` | | Enables ACP mode (Agent Control Protocol). Useful for IDE/editor integrations like [Zed](../integration-zed). | | Experimental. |
| `--experimental-skills` | | Enables experimental [Agent Skills](../features/skills) (registers the `skill` tool and loads Skills from `.qwen/skills/` and `~/.qwen/skills/`). | | Experimental. |
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |
| `--proxy` | | Sets the proxy for the CLI. | Proxy URL | Example: `--proxy http://localhost:7890`. |

View File

@@ -1,6 +1,7 @@
export default {
commands: 'Commands',
'sub-agents': 'SubAgents',
skills: 'Skills (Experimental)',
headless: 'Headless Mode',
checkpointing: {
display: 'hidden',
@@ -9,5 +10,4 @@ export default {
mcp: 'MCP',
'token-caching': 'Token Caching',
sandbox: 'Sandboxing',
language: 'i18n',
};

View File

@@ -48,7 +48,7 @@ Commands specifically for controlling interface and output language.
| → `ui [language]` | Set UI interface language | `/language ui zh-CN` |
| → `output [language]` | Set LLM output language | `/language output Chinese` |
- Available built-in UI languages: `zh-CN` (Simplified Chinese), `en-US` (English), `ru-RU` (Russian), `de-DE` (German)
- Available UI languages: `zh-CN` (Simplified Chinese), `en-US` (English)
- Output language examples: `Chinese`, `English`, `Japanese`, etc.
### 1.4 Tool and Model Management
@@ -72,16 +72,17 @@ Commands for managing AI tools and models.
Commands for obtaining information and performing system settings.
| Command | Description | Usage Examples |
| ----------- | ----------------------------------------------- | -------------------------------- |
| `/help` | Display help information for available commands | `/help` or `/?` |
| `/about` | Display version information | `/about` |
| `/stats` | Display detailed statistics for current session | `/stats` |
| `/settings` | Open settings editor | `/settings` |
| `/auth` | Change authentication method | `/auth` |
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
| `/copy` | Copy last output content to clipboard | `/copy` |
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
| Command | Description | Usage Examples |
| --------------- | ----------------------------------------------- | ------------------------------------------------ |
| `/help` | Display help information for available commands | `/help` or `/?` |
| `/about` | Display version information | `/about` |
| `/stats` | Display detailed statistics for current session | `/stats` |
| `/settings` | Open settings editor | `/settings` |
| `/auth` | Change authentication method | `/auth` |
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
| `/copy` | Copy last output content to clipboard | `/copy` |
| `/quit-confirm` | Show confirmation dialog before quitting | `/quit-confirm` (shortcut: press `Ctrl+C` twice) |
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
### 1.6 Common Shortcuts

View File

@@ -189,19 +189,20 @@ qwen -p "Write code" --output-format stream-json --include-partial-messages | jq
Key command-line options for headless usage:
| Option | Description | Example |
| ---------------------------- | --------------------------------------------------- | ------------------------------------------------------------------------ |
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
| Option | Description | Example |
| ---------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------ |
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
| `--experimental-skills` | Enable experimental Skills (registers the `skill` tool) | `qwen --experimental-skills -p "What Skills are available?"` |
For complete details on all available configuration options, settings files, and environment variables, see the [Configuration Guide](../configuration/settings).

View File

@@ -1,136 +0,0 @@
# Internationalization (i18n) & Language
Qwen Code is built for multilingual workflows: it supports UI localization (i18n/l10n) in the CLI, lets you choose the assistant output language, and allows custom UI language packs.
## Overview
From a user point of view, Qwen Codes “internationalization” spans multiple layers:
| Capability / Setting | What it controls | Where stored |
| ------------------------ | ---------------------------------------------------------------------- | ---------------------------- |
| `/language ui` | Terminal UI text (menus, system messages, prompts) | `~/.qwen/settings.json` |
| `/language output` | Language the AI responds in (an output preference, not UI translation) | `~/.qwen/output-language.md` |
| Custom UI language packs | Overrides/extends built-in UI translations | `~/.qwen/locales/*.js` |
## UI Language
This is the CLIs UI localization layer (i18n/l10n): it controls the language of menus, prompts, and system messages.
### Setting the UI Language
Use the `/language ui` command:
```bash
/language ui zh-CN # Chinese
/language ui en-US # English
/language ui ru-RU # Russian
/language ui de-DE # German
```
Aliases are also supported:
```bash
/language ui zh # Chinese
/language ui en # English
/language ui ru # Russian
/language ui de # German
```
### Auto-detection
On first startup, Qwen Code detects your system locale and sets the UI language automatically.
Detection priority:
1. `QWEN_CODE_LANG` environment variable
2. `LANG` environment variable
3. System locale via JavaScript Intl API
4. Default: English
## LLM Output Language
The LLM output language controls what language the AI assistant responds in, regardless of what language you type your questions in.
### How It Works
The LLM output language is controlled by a rule file at `~/.qwen/output-language.md`. This file is automatically included in the LLM's context during startup, instructing it to respond in the specified language.
### Auto-detection
On first startup, if no `output-language.md` file exists, Qwen Code automatically creates one based on your system locale. For example:
- System locale `zh` creates a rule for Chinese responses
- System locale `en` creates a rule for English responses
- System locale `ru` creates a rule for Russian responses
- System locale `de` creates a rule for German responses
### Manual Setting
Use `/language output <language>` to change:
```bash
/language output Chinese
/language output English
/language output Japanese
/language output German
```
Any language name works. The LLM will be instructed to respond in that language.
> [!note]
>
> After changing the output language, restart Qwen Code for the change to take effect.
### File Location
```
~/.qwen/output-language.md
```
## Configuration
### Via Settings Dialog
1. Run `/settings`
2. Find "Language" under General
3. Select your preferred UI language
### Via Environment Variable
```bash
export QWEN_CODE_LANG=zh
```
This influences auto-detection on first startup (if you havent set a UI language and no `output-language.md` file exists yet).
## Custom Language Packs
For UI translations, you can create custom language packs in `~/.qwen/locales/`:
- Example: `~/.qwen/locales/es.js` for Spanish
- Example: `~/.qwen/locales/fr.js` for French
User directory takes precedence over built-in translations.
> [!tip]
>
> Contributions are welcome! If youd like to improve built-in translations or add new languages.
> For a concrete example, see [PR #1238: feat(i18n): add Russian language support](https://github.com/QwenLM/qwen-code/pull/1238).
### Language Pack Format
```javascript
// ~/.qwen/locales/es.js
export default {
Hello: 'Hola',
Settings: 'Configuracion',
// ... more translations
};
```
## Related Commands
- `/language` - Show current language settings
- `/language ui [lang]` - Set UI language
- `/language output <language>` - Set LLM output language
- `/settings` - Open settings dialog

View File

@@ -0,0 +1,282 @@
# Agent Skills (Experimental)
> Create, manage, and share Skills to extend Qwen Codes capabilities.
This guide shows you how to create, use, and manage Agent Skills in **Qwen Code**. Skills are modular capabilities that extend the models effectiveness through organized folders containing instructions (and optionally scripts/resources).
> [!note]
>
> Skills are currently **experimental** and must be enabled with `--experimental-skills`.
## Prerequisites
- Qwen Code (recent version)
- Run with the experimental flag enabled:
```bash
qwen --experimental-skills
```
- Basic familiarity with Qwen Code ([Quickstart](../quickstart.md))
## What are Agent Skills?
Agent Skills package expertise into discoverable capabilities. Each Skill consists of a `SKILL.md` file with instructions that the model can load when relevant, plus optional supporting files like scripts and templates.
### How Skills are invoked
Skills are **model-invoked** — the model autonomously decides when to use them based on your request and the Skills description. This is different from slash commands, which are **user-invoked** (you explicitly type `/command`).
### Benefits
- Extend Qwen Code for your workflows
- Share expertise across your team via git
- Reduce repetitive prompting
- Compose multiple Skills for complex tasks
## Create a Skill
Skills are stored as directories containing a `SKILL.md` file.
### Personal Skills
Personal Skills are available across all your projects. Store them in `~/.qwen/skills/`:
```bash
mkdir -p ~/.qwen/skills/my-skill-name
```
Use personal Skills for:
- Your individual workflows and preferences
- Experimental Skills youre developing
- Personal productivity helpers
### Project Skills
Project Skills are shared with your team. Store them in `.qwen/skills/` within your project:
```bash
mkdir -p .qwen/skills/my-skill-name
```
Use project Skills for:
- Team workflows and conventions
- Project-specific expertise
- Shared utilities and scripts
Project Skills can be checked into git and automatically become available to teammates.
## Write `SKILL.md`
Create a `SKILL.md` file with YAML frontmatter and Markdown content:
```yaml
---
name: your-skill-name
description: Brief description of what this Skill does and when to use it
---
# Your Skill Name
## Instructions
Provide clear, step-by-step guidance for Qwen Code.
## Examples
Show concrete examples of using this Skill.
```
### Field requirements
Qwen Code currently validates that:
- `name` is a non-empty string
- `description` is a non-empty string
Recommended conventions (not strictly enforced yet):
- Use lowercase letters, numbers, and hyphens in `name`
- Make `description` specific: include both **what** the Skill does and **when** to use it (key words users will naturally mention)
## Add supporting files
Create additional files alongside `SKILL.md`:
```text
my-skill/
├── SKILL.md (required)
├── reference.md (optional documentation)
├── examples.md (optional examples)
├── scripts/
│ └── helper.py (optional utility)
└── templates/
└── template.txt (optional template)
```
Reference these files from `SKILL.md`:
````markdown
For advanced usage, see [reference.md](reference.md).
Run the helper script:
```bash
python scripts/helper.py input.txt
```
````
## View available Skills
When `--experimental-skills` is enabled, Qwen Code discovers Skills from:
- Personal Skills: `~/.qwen/skills/`
- Project Skills: `.qwen/skills/`
To view available Skills, ask Qwen Code directly:
```text
What Skills are available?
```
Or inspect the filesystem:
```bash
# List personal Skills
ls ~/.qwen/skills/
# List project Skills (if in a project directory)
ls .qwen/skills/
# View a specific Skills content
cat ~/.qwen/skills/my-skill/SKILL.md
```
## Test a Skill
After creating a Skill, test it by asking questions that match your description.
Example: if your description mentions “PDF files”:
```text
Can you help me extract text from this PDF?
```
The model autonomously decides to use your Skill if it matches the request — you dont need to explicitly invoke it.
## Debug a Skill
If Qwen Code doesnt use your Skill, check these common issues:
### Make the description specific
Too vague:
```yaml
description: Helps with documents
```
Specific:
```yaml
description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDFs, forms, or document extraction.
```
### Verify file path
- Personal Skills: `~/.qwen/skills/<skill-name>/SKILL.md`
- Project Skills: `.qwen/skills/<skill-name>/SKILL.md`
```bash
# Personal
ls ~/.qwen/skills/my-skill/SKILL.md
# Project
ls .qwen/skills/my-skill/SKILL.md
```
### Check YAML syntax
Invalid YAML prevents the Skill metadata from loading correctly.
```bash
cat SKILL.md | head -n 15
```
Ensure:
- Opening `---` on line 1
- Closing `---` before Markdown content
- Valid YAML syntax (no tabs, correct indentation)
### View errors
Run Qwen Code with debug mode to see Skill loading errors:
```bash
qwen --experimental-skills --debug
```
## Share Skills with your team
You can share Skills through project repositories:
1. Add the Skill under `.qwen/skills/`
2. Commit and push
3. Teammates pull the changes and run with `--experimental-skills`
```bash
git add .qwen/skills/
git commit -m "Add team Skill for PDF processing"
git push
```
## Update a Skill
Edit `SKILL.md` directly:
```bash
# Personal Skill
code ~/.qwen/skills/my-skill/SKILL.md
# Project Skill
code .qwen/skills/my-skill/SKILL.md
```
Changes take effect the next time you start Qwen Code. If Qwen Code is already running, restart it to load the updates.
## Remove a Skill
Delete the Skill directory:
```bash
# Personal
rm -rf ~/.qwen/skills/my-skill
# Project
rm -rf .qwen/skills/my-skill
git commit -m "Remove unused Skill"
```
## Best practices
### Keep Skills focused
One Skill should address one capability:
- Focused: “PDF form filling”, “Excel analysis”, “Git commit messages”
- Too broad: “Document processing” (split into smaller Skills)
### Write clear descriptions
Help the model discover when to use Skills by including specific triggers:
```yaml
description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or .xlsx data.
```
### Test with your team
- Does the Skill activate when expected?
- Are the instructions clear?
- Are there missing examples or edge cases?

View File

@@ -1,4 +1,6 @@
# Qwen Code overview
[![@qwen-code/qwen-code downloads](https://img.shields.io/npm/dw/@qwen-code/qwen-code.svg)](https://npm-compare.com/@qwen-code/qwen-code)
[![@qwen-code/qwen-code version](https://img.shields.io/npm/v/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
> Learn about Qwen Code, Qwen's agentic coding tool that lives in your terminal and helps you turn ideas into code faster than ever before.
@@ -46,7 +48,7 @@ You'll be prompted to log in on first use. That's it! [Continue with Quickstart
> [!note]
>
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. You can search for **Qwen Code** in the VS Code Marketplace and download it.
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. Download and install the [Qwen Code Companion](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion) now.
## What Qwen Code does for you

View File

@@ -24,8 +24,6 @@ export default tseslint.config(
'.integration-tests/**',
'packages/**/.integration-test/**',
'dist/**',
'docs-site/.next/**',
'docs-site/out/**',
],
},
eslint.configs.recommended,

View File

@@ -5,8 +5,6 @@
*/
import { describe, it, expect } from 'vitest';
import { existsSync } from 'node:fs';
import * as path from 'node:path';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe('file-system', () => {
@@ -202,8 +200,8 @@ describe('file-system', () => {
const readAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'read_file',
);
const writeAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'write_file',
const editAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'edit_file',
);
const successfulReplace = toolLogs.find(
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
@@ -226,15 +224,15 @@ describe('file-system', () => {
// CRITICAL: Verify that no matter what the model did, it never successfully
// wrote or replaced anything.
if (writeAttempt) {
if (editAttempt) {
console.error(
'A write_file attempt was made when no file should be written.',
'A edit_file attempt was made when no file should be written.',
);
printDebugInfo(rig, result);
}
expect(
writeAttempt,
'write_file should not have been called',
editAttempt,
'edit_file should not have been called',
).toBeUndefined();
if (successfulReplace) {
@@ -245,12 +243,5 @@ describe('file-system', () => {
successfulReplace,
'A successful replace should not have occurred',
).toBeUndefined();
// Final verification: ensure the file was not created.
const filePath = path.join(rig.testDir!, fileName);
const fileExists = existsSync(filePath);
expect(fileExists, 'The non-existent file should not be created').toBe(
false,
);
});
});

View File

@@ -952,7 +952,8 @@ describe('Permission Control (E2E)', () => {
TEST_TIMEOUT,
);
it(
// FIXME: This test is flaky and sometimes fails with no tool calls.
it.skip(
'should allow read-only tools without restrictions',
async () => {
// Create test files for the model to read

View File

@@ -314,4 +314,88 @@ describe('System Control (E2E)', () => {
);
});
});
describe('supportedCommands API', () => {
it('should return list of supported slash commands', async () => {
const sessionId = crypto.randomUUID();
const generator = (async function* () {
yield {
type: 'user',
session_id: sessionId,
message: { role: 'user', content: 'Hello' },
parent_tool_use_id: null,
} as SDKUserMessage;
})();
const q = query({
prompt: generator,
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
model: 'qwen3-max',
debug: false,
},
});
try {
const result = await q.supportedCommands();
// Start consuming messages to trigger initialization
const messageConsumer = (async () => {
try {
for await (const _message of q) {
// Just consume messages
}
} catch (error) {
// Ignore errors from query being closed
if (error instanceof Error && error.message !== 'Query is closed') {
throw error;
}
}
})();
// Verify result structure
expect(result).toBeDefined();
expect(result).toHaveProperty('commands');
expect(Array.isArray(result?.['commands'])).toBe(true);
const commands = result?.['commands'] as string[];
// Verify default allowed built-in commands are present
expect(commands).toContain('init');
expect(commands).toContain('summary');
expect(commands).toContain('compress');
// Verify commands are sorted
const sortedCommands = [...commands].sort();
expect(commands).toEqual(sortedCommands);
// Verify all commands are strings
commands.forEach((cmd) => {
expect(typeof cmd).toBe('string');
expect(cmd.length).toBeGreaterThan(0);
});
await q.close();
await messageConsumer;
} catch (error) {
await q.close();
throw error;
}
});
it('should throw error when supportedCommands is called on closed query', async () => {
const q = query({
prompt: 'Hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
model: 'qwen3-max',
},
});
await q.close();
await expect(q.supportedCommands()).rejects.toThrow('Query is closed');
});
});
});

2074
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,9 +18,6 @@
"scripts": {
"start": "cross-env node scripts/start.js",
"debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js",
"auth:npm": "npx google-artifactregistry-auth",
"auth:docker": "gcloud auth configure-docker us-west1-docker.pkg.dev",
"auth": "npm run auth:npm && npm run auth:docker",
"generate": "node scripts/generate-git-commit-info.js",
"build": "node scripts/build.js",
"build-and-start": "npm run build && npm run start",
@@ -95,7 +92,6 @@
"eslint-plugin-react-hooks": "^5.2.0",
"glob": "^10.5.0",
"globals": "^16.0.0",
"google-artifactregistry-auth": "^3.4.0",
"husky": "^9.1.7",
"json": "^11.0.0",
"lint-staged": "^16.1.6",

View File

@@ -36,10 +36,10 @@
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
},
"dependencies": {
"@google/genai": "1.16.0",
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
"@qwen-code/qwen-code-core": "file:../core",
"@modelcontextprotocol/sdk": "^1.15.1",
"@modelcontextprotocol/sdk": "^1.25.1",
"@types/update-notifier": "^6.0.8",
"ansi-regex": "^6.2.2",
"command-exists": "^1.2.9",

View File

@@ -98,6 +98,14 @@ export class AgentSideConnection implements Client {
);
}
/**
* Sends a custom notification to the client.
* Used for extension-specific notifications that are not part of the core ACP protocol.
*/
async sendCustomNotification<T>(method: string, params: T): Promise<void> {
return await this.#connection.sendNotification(method, params);
}
/**
* Request permission before running a tool
*
@@ -374,6 +382,7 @@ export interface Client {
): Promise<schema.RequestPermissionResponse>;
sessionUpdate(params: schema.SessionNotification): Promise<void>;
authenticateUpdate(params: schema.AuthenticateUpdate): Promise<void>;
sendCustomNotification<T>(method: string, params: T): Promise<void>;
writeTextFile(
params: schema.WriteTextFileRequest,
): Promise<schema.WriteTextFileResponse>;

View File

@@ -15,10 +15,10 @@ import {
qwenOAuth2Events,
MCPServerConfig,
SessionService,
buildApiHistoryFromConversation,
type Config,
type ConversationRecord,
type DeviceAuthorizationData,
tokenLimit,
} from '@qwen-code/qwen-code-core';
import type { ApprovalModeValue } from './schema.js';
import * as acp from './acp.js';
@@ -165,9 +165,30 @@ class GeminiAgent {
this.setupFileSystem(config);
const session = await this.createAndStoreSession(config);
const configuredModel = (
config.getModel() ||
this.config.getModel() ||
''
).trim();
const modelId = configuredModel || 'default';
const modelName = configuredModel || modelId;
return {
sessionId: session.getId(),
models: {
currentModelId: modelId,
availableModels: [
{
modelId,
name: modelName,
description: null,
_meta: {
contextLimit: tokenLimit(modelId),
},
},
],
_meta: null,
},
};
}
@@ -327,12 +348,20 @@ class GeminiAgent {
const sessionId = config.getSessionId();
const geminiClient = config.getGeminiClient();
const history = conversation
? buildApiHistoryFromConversation(conversation)
: undefined;
const chat = history
? await geminiClient.startChat(history)
: await geminiClient.startChat();
// Use GeminiClient to manage chat lifecycle properly
// This ensures geminiClient.chat is in sync with the session's chat
//
// Note: When loading a session, config.initialize() has already been called
// in newSessionConfig(), which in turn calls geminiClient.initialize().
// The GeminiClient.initialize() method checks config.getResumedSessionData()
// and automatically loads the conversation history into the chat instance.
// So we only need to initialize if it hasn't been done yet.
if (!geminiClient.isInitialized()) {
await geminiClient.initialize();
}
// Now get the chat instance that's managed by GeminiClient
const chat = geminiClient.getChat();
const session = new Session(
sessionId,

View File

@@ -93,6 +93,7 @@ export type ModeInfo = z.infer<typeof modeInfoSchema>;
export type ModesData = z.infer<typeof modesDataSchema>;
export type AgentInfo = z.infer<typeof agentInfoSchema>;
export type ModelInfo = z.infer<typeof modelInfoSchema>;
export type PromptCapabilities = z.infer<typeof promptCapabilitiesSchema>;
@@ -254,8 +255,26 @@ export const authenticateUpdateSchema = z.object({
export type AuthenticateUpdate = z.infer<typeof authenticateUpdateSchema>;
export const acpMetaSchema = z.record(z.unknown()).nullable().optional();
export const modelIdSchema = z.string();
export const modelInfoSchema = z.object({
_meta: acpMetaSchema,
description: z.string().nullable().optional(),
modelId: modelIdSchema,
name: z.string(),
});
export const sessionModelStateSchema = z.object({
_meta: acpMetaSchema,
availableModels: z.array(modelInfoSchema),
currentModelId: modelIdSchema,
});
export const newSessionResponseSchema = z.object({
sessionId: z.string(),
models: sessionModelStateSchema,
});
export const loadSessionResponseSchema = z.null();
@@ -514,6 +533,13 @@ export const currentModeUpdateSchema = z.object({
export type CurrentModeUpdate = z.infer<typeof currentModeUpdateSchema>;
export const currentModelUpdateSchema = z.object({
sessionUpdate: z.literal('current_model_update'),
model: modelInfoSchema,
});
export type CurrentModelUpdate = z.infer<typeof currentModelUpdateSchema>;
export const sessionUpdateSchema = z.union([
z.object({
content: contentBlockSchema,
@@ -555,6 +581,7 @@ export const sessionUpdateSchema = z.union([
sessionUpdate: z.literal('plan'),
}),
currentModeUpdateSchema,
currentModelUpdateSchema,
availableCommandsUpdateSchema,
]);

View File

@@ -41,9 +41,11 @@ import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { z } from 'zod';
import { getErrorMessage } from '../../utils/errors.js';
import { normalizePartList } from '../../utils/nonInteractiveHelpers.js';
import {
handleSlashCommand,
getAvailableCommands,
type NonInteractiveSlashCommandResult,
} from '../../nonInteractiveCliCommands.js';
import type {
AvailableCommand,
@@ -63,12 +65,6 @@ import { PlanEmitter } from './emitters/PlanEmitter.js';
import { MessageEmitter } from './emitters/MessageEmitter.js';
import { SubAgentTracker } from './SubAgentTracker.js';
/**
* Built-in commands that are allowed in ACP integration mode.
* Only safe, read-only commands that don't require interactive UI.
*/
export const ALLOWED_BUILTIN_COMMANDS_FOR_ACP = ['init'];
/**
* Session represents an active conversation session with the AI model.
* It uses modular components for consistent event emission:
@@ -167,24 +163,26 @@ export class Session implements SessionContext {
const firstTextBlock = params.prompt.find((block) => block.type === 'text');
const inputText = firstTextBlock?.text || '';
let parts: Part[];
let parts: Part[] | null;
if (isSlashCommand(inputText)) {
// Handle slash command - allow specific built-in commands for ACP integration
// Handle slash command - uses default allowed commands (init, summary, compress)
const slashCommandResult = await handleSlashCommand(
inputText,
pendingSend,
this.config,
this.settings,
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
);
if (slashCommandResult) {
// Use the result from the slash command
parts = slashCommandResult as Part[];
} else {
// Slash command didn't return a prompt, continue with normal processing
parts = await this.#resolvePrompt(params.prompt, pendingSend.signal);
parts = await this.#processSlashCommandResult(
slashCommandResult,
params.prompt,
);
// If parts is null, the command was fully handled (e.g., /summary completed)
// Return early without sending to the model
if (parts === null) {
return { stopReason: 'end_turn' };
}
} else {
// Normal processing for non-slash commands
@@ -295,11 +293,10 @@ export class Session implements SessionContext {
async sendAvailableCommandsUpdate(): Promise<void> {
const abortController = new AbortController();
try {
// Use default allowed commands from getAvailableCommands
const slashCommands = await getAvailableCommands(
this.config,
this.settings,
abortController.signal,
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
);
// Convert SlashCommand[] to AvailableCommand[] format for ACP protocol
@@ -647,6 +644,103 @@ export class Session implements SessionContext {
}
}
/**
* Processes the result of a slash command execution.
*
* Supported result types in ACP mode:
* - submit_prompt: Submits content to the model
* - stream_messages: Streams multiple messages to the client (ACP-specific)
* - unsupported: Command cannot be executed in ACP mode
* - no_command: No command was found, use original prompt
*
* Note: 'message' type is not supported in ACP mode - commands should use
* 'stream_messages' instead for consistent async handling.
*
* @param result The result from handleSlashCommand
* @param originalPrompt The original prompt blocks
* @returns Parts to use for the prompt, or null if command was handled without needing model interaction
*/
async #processSlashCommandResult(
result: NonInteractiveSlashCommandResult,
originalPrompt: acp.ContentBlock[],
): Promise<Part[] | null> {
switch (result.type) {
case 'submit_prompt':
// Command wants to submit a prompt to the model
// Convert PartListUnion to Part[]
return normalizePartList(result.content);
case 'message': {
// 'message' type is not ideal for ACP mode, but we handle it for compatibility
// by converting it to a stream_messages-like notification
await this.client.sendCustomNotification('_qwencode/slash_command', {
sessionId: this.sessionId,
command: originalPrompt
.filter((block) => block.type === 'text')
.map((block) => (block.type === 'text' ? block.text : ''))
.join(' '),
messageType: result.messageType,
message: result.content || '',
});
if (result.messageType === 'error') {
// Throw error to stop execution
throw new Error(result.content || 'Slash command failed.');
}
// For info messages, return null to indicate command was handled
return null;
}
case 'stream_messages': {
// Command returns multiple messages via async generator (ACP-preferred)
const command = originalPrompt
.filter((block) => block.type === 'text')
.map((block) => (block.type === 'text' ? block.text : ''))
.join(' ');
// Stream all messages to the client
for await (const msg of result.messages) {
await this.client.sendCustomNotification('_qwencode/slash_command', {
sessionId: this.sessionId,
command,
messageType: msg.messageType,
message: msg.content,
});
// If we encounter an error message, throw after sending
if (msg.messageType === 'error') {
throw new Error(msg.content || 'Slash command failed.');
}
}
// All messages sent successfully, return null to indicate command was handled
return null;
}
case 'unsupported': {
// Command returned an unsupported result type
const unsupportedError = `Slash command not supported in ACP integration: ${result.reason}`;
throw new Error(unsupportedError);
}
case 'no_command':
// No command was found or executed, use original prompt
return originalPrompt.map((block) => {
if (block.type === 'text') {
return { text: block.text };
}
throw new Error(`Unsupported block type: ${block.type}`);
});
default: {
// Exhaustiveness check
const _exhaustive: never = result;
const unknownError = `Unknown slash command result type: ${(_exhaustive as NonInteractiveSlashCommandResult).type}`;
throw new Error(unknownError);
}
}
}
async #resolvePrompt(
message: acp.ContentBlock[],
abortSignal: AbortSignal,

View File

@@ -26,5 +26,37 @@ export function validateAuthMethod(authMethod: string): string | null {
return null;
}
if (authMethod === AuthType.USE_ANTHROPIC) {
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
if (!hasApiKey) {
return 'ANTHROPIC_API_KEY environment variable not found.';
}
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
if (!hasBaseUrl) {
return 'ANTHROPIC_BASE_URL environment variable not found.';
}
return null;
}
if (authMethod === AuthType.USE_GEMINI) {
const hasApiKey = process.env['GEMINI_API_KEY'];
if (!hasApiKey) {
return 'GEMINI_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
}
return null;
}
if (authMethod === AuthType.USE_VERTEX_AI) {
const hasApiKey = process.env['GOOGLE_API_KEY'];
if (!hasApiKey) {
return 'GOOGLE_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
}
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
return null;
}
return 'Invalid auth method selected.';
}

View File

@@ -2114,7 +2114,14 @@ describe('loadCliConfig model selection', () => {
});
it('always prefers model from argvs', async () => {
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{
@@ -2134,7 +2141,14 @@ describe('loadCliConfig model selection', () => {
});
it('selects the model from argvs if provided', async () => {
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{

View File

@@ -112,6 +112,7 @@ export interface CliArgs {
allowedMcpServerNames: string[] | undefined;
allowedTools: string[] | undefined;
experimentalAcp: boolean | undefined;
experimentalSkills: boolean | undefined;
extensions: string[] | undefined;
listExtensions: boolean | undefined;
openaiLogging: boolean | undefined;
@@ -307,6 +308,11 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
type: 'boolean',
description: 'Starts the agent in ACP mode',
})
.option('experimental-skills', {
type: 'boolean',
description: 'Enable experimental Skills feature',
default: false,
})
.option('channel', {
type: 'string',
choices: ['VSCode', 'ACP', 'SDK', 'CI'],
@@ -460,7 +466,13 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
})
.option('auth-type', {
type: 'string',
choices: [AuthType.USE_OPENAI, AuthType.QWEN_OAUTH],
choices: [
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.QWEN_OAUTH,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
],
description: 'Authentication type',
})
.deprecateOption(
@@ -865,11 +877,30 @@ export async function loadCliConfig(
);
}
const selectedAuthType =
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType;
const apiKey =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey
: '') || '';
const baseUrl =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl
: '') || '';
const resolvedModel =
argv.model ||
process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name;
(selectedAuthType === AuthType.USE_OPENAI
? process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name
: '') ||
'';
const sandboxConfig = await loadSandboxConfig(settings, argv);
const screenReader =
@@ -951,27 +982,20 @@ export async function loadCliConfig(
maxSessionTurns:
argv.maxSessionTurns ?? settings.model?.maxSessionTurns ?? -1,
experimentalZedIntegration: argv.experimentalAcp || false,
experimentalSkills: argv.experimentalSkills || false,
listExtensions: argv.listExtensions || false,
extensions: allExtensions,
blockedMcpServers,
noBrowser: !!process.env['NO_BROWSER'],
authType:
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType,
authType: selectedAuthType,
inputFormat,
outputFormat,
includePartialMessages,
generationConfig: {
...(settings.model?.generationConfig || {}),
model: resolvedModel,
apiKey:
argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey,
baseUrl:
argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl,
apiKey,
baseUrl,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging

View File

@@ -56,6 +56,17 @@ vi.mock('simple-git', () => ({
}),
}));
vi.mock('./extensions/github.js', async (importOriginal) => {
const actual =
await importOriginal<typeof import('./extensions/github.js')>();
return {
...actual,
downloadFromGitHubRelease: vi
.fn()
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
};
});
vi.mock('os', async (importOriginal) => {
const mockedOs = await importOriginal<typeof os>();
return {

View File

@@ -41,6 +41,17 @@ vi.mock('simple-git', () => ({
}),
}));
vi.mock('../extensions/github.js', async (importOriginal) => {
const actual =
await importOriginal<typeof import('../extensions/github.js')>();
return {
...actual,
downloadFromGitHubRelease: vi
.fn()
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
};
});
vi.mock('os', async (importOriginal) => {
const mockedOs = await importOriginal<typeof os>();
return {

View File

@@ -15,7 +15,6 @@ import { type LoadedSettings, SettingScope } from '../config/settings.js';
import { performInitialAuth } from './auth.js';
import { validateTheme } from './theme.js';
import { initializeI18n } from '../i18n/index.js';
import { initializeLlmOutputLanguage } from '../ui/commands/languageCommand.js';
export interface InitializationResult {
authError: string | null;
@@ -42,9 +41,6 @@ export async function initializeApp(
'auto';
await initializeI18n(languageSetting);
// Auto-detect and set LLM output language on first use
initializeLlmOutputLanguage();
const authType = settings.merged.security?.auth?.selectedType;
const authError = await performInitialAuth(config, authType);

View File

@@ -461,6 +461,7 @@ describe('gemini.tsx main function kitty protocol', () => {
allowedMcpServerNames: undefined,
allowedTools: undefined,
experimentalAcp: undefined,
experimentalSkills: undefined,
extensions: undefined,
listExtensions: undefined,
openaiLogging: undefined,

View File

@@ -4,13 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config } from '@qwen-code/qwen-code-core';
import {
AuthType,
getOauthClient,
InputFormat,
logUserPrompt,
} from '@qwen-code/qwen-code-core';
import type { Config, AuthType } from '@qwen-code/qwen-code-core';
import { InputFormat, logUserPrompt } from '@qwen-code/qwen-code-core';
import { render } from 'ink';
import dns from 'node:dns';
import os from 'node:os';
@@ -399,15 +394,6 @@ export async function main() {
initializationResult = await initializeApp(config, settings);
}
if (
settings.merged.security?.auth?.selectedType ===
AuthType.LOGIN_WITH_GOOGLE &&
config.isBrowserLaunchSuppressed()
) {
// Do oauth before app renders to make copying the link possible.
await getOauthClient(settings.merged.security.auth.selectedType, config);
}
if (config.getExperimentalZedIntegration()) {
return runAcpAgent(config, settings, extensions, argv);
}

View File

@@ -1,6 +1,6 @@
/**
* @license
* Copyright 2025 Qwen team
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
@@ -8,21 +8,15 @@ import * as fs from 'node:fs';
import * as path from 'node:path';
import { fileURLToPath, pathToFileURL } from 'node:url';
import { homedir } from 'node:os';
import {
type SupportedLanguage,
getLanguageNameFromLocale,
} from './languages.js';
export type { SupportedLanguage };
export { getLanguageNameFromLocale };
export type SupportedLanguage = 'en' | 'zh' | 'ru' | string; // Allow custom language codes
// State
let currentLanguage: SupportedLanguage = 'en';
let translations: Record<string, string | string[]> = {};
let translations: Record<string, string> = {};
// Cache
type TranslationValue = string | string[];
type TranslationDict = Record<string, TranslationValue>;
type TranslationDict = Record<string, string>;
const translationCache: Record<string, TranslationDict> = {};
const loadingPromises: Record<string, Promise<TranslationDict>> = {};
@@ -58,13 +52,11 @@ export function detectSystemLanguage(): SupportedLanguage {
if (envLang?.startsWith('zh')) return 'zh';
if (envLang?.startsWith('en')) return 'en';
if (envLang?.startsWith('ru')) return 'ru';
if (envLang?.startsWith('de')) return 'de';
try {
const locale = Intl.DateTimeFormat().resolvedOptions().locale;
if (locale.startsWith('zh')) return 'zh';
if (locale.startsWith('ru')) return 'ru';
if (locale.startsWith('de')) return 'de';
} catch {
// Fallback to default
}
@@ -232,25 +224,9 @@ export function getCurrentLanguage(): SupportedLanguage {
export function t(key: string, params?: Record<string, string>): string {
const translation = translations[key] ?? key;
if (Array.isArray(translation)) {
return key;
}
return interpolate(translation, params);
}
/**
* Get a translation that is an array of strings.
* @param key The translation key
* @returns The array of strings, or an empty array if not found or not an array
*/
export function ta(key: string): string[] {
const translation = translations[key];
if (Array.isArray(translation)) {
return translation;
}
return [];
}
export async function initializeI18n(
lang?: SupportedLanguage | 'auto',
): Promise<void> {

View File

@@ -1,48 +0,0 @@
/**
* @license
* Copyright 2025 Qwen team
* SPDX-License-Identifier: Apache-2.0
*/
export type SupportedLanguage = 'en' | 'zh' | 'ru' | 'de' | string;
export interface LanguageDefinition {
/** The internal locale code used by the i18n system (e.g., 'en', 'zh'). */
code: SupportedLanguage;
/** The standard name used in UI settings (e.g., 'en-US', 'zh-CN'). */
id: string;
/** The full English name of the language (e.g., 'English', 'Chinese'). */
fullName: string;
}
export const SUPPORTED_LANGUAGES: readonly LanguageDefinition[] = [
{
code: 'en',
id: 'en-US',
fullName: 'English',
},
{
code: 'zh',
id: 'zh-CN',
fullName: 'Chinese',
},
{
code: 'ru',
id: 'ru-RU',
fullName: 'Russian',
},
{
code: 'de',
id: 'de-DE',
fullName: 'German',
},
];
/**
* Maps a locale code to its English language name.
* Used for LLM output language instructions.
*/
export function getLanguageNameFromLocale(locale: SupportedLanguage): string {
const lang = SUPPORTED_LANGUAGES.find((l) => l.code === locale);
return lang?.fullName || 'English';
}

View File

@@ -102,8 +102,8 @@ export default {
'Theme "{{themeName}}" not found.': 'Theme "{{themeName}}" not found.',
'Theme "{{themeName}}" not found in selected scope.':
'Theme "{{themeName}}" not found in selected scope.',
'Clear conversation history and free up context':
'Clear conversation history and free up context',
'clear the screen and conversation history':
'clear the screen and conversation history',
'Compresses the context by replacing it with a summary.':
'Compresses the context by replacing it with a summary.',
'open full Qwen Code documentation in your browser':
@@ -258,6 +258,8 @@ export default {
', Tab to change focus': ', Tab to change focus',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.',
'The command "/{{command}}" is not supported in non-interactive mode.':
'The command "/{{command}}" is not supported in non-interactive mode.',
// ============================================================================
// Settings Labels
// ============================================================================
@@ -590,6 +592,12 @@ export default {
'No conversation found to summarize.': 'No conversation found to summarize.',
'Failed to generate project context summary: {{error}}':
'Failed to generate project context summary: {{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'Saved project summary to {{filePathForDisplay}}.',
'Saving project summary...': 'Saving project summary...',
'Generating project summary...': 'Generating project summary...',
'Failed to generate summary - no text content received from LLM response':
'Failed to generate summary - no text content received from LLM response',
// ============================================================================
// Commands - Model
@@ -604,10 +612,9 @@ export default {
// ============================================================================
// Commands - Clear
// ============================================================================
'Starting a new session, resetting chat, and clearing terminal.':
'Starting a new session, resetting chat, and clearing terminal.',
'Starting a new session and clearing.':
'Starting a new session and clearing.',
'Clearing terminal and resetting chat.':
'Clearing terminal and resetting chat.',
'Clearing terminal.': 'Clearing terminal.',
// ============================================================================
// Commands - Compress
@@ -928,138 +935,192 @@ export default {
// ============================================================================
'Waiting for user confirmation...': 'Waiting for user confirmation...',
'(esc to cancel, {{time}})': '(esc to cancel, {{time}})',
// ============================================================================
// Loading Phrases
// ============================================================================
WITTY_LOADING_PHRASES: [
"I'm Feeling Lucky",
'Shipping awesomeness... ',
'Painting the serifs back on...',
'Navigating the slime mold...',
'Consulting the digital spirits...',
'Reticulating splines...',
'Warming up the AI hamsters...',
'Asking the magic conch shell...',
'Generating witty retort...',
'Polishing the algorithms...',
"I'm Feeling Lucky": "I'm Feeling Lucky",
'Shipping awesomeness... ': 'Shipping awesomeness... ',
'Painting the serifs back on...': 'Painting the serifs back on...',
'Navigating the slime mold...': 'Navigating the slime mold...',
'Consulting the digital spirits...': 'Consulting the digital spirits...',
'Reticulating splines...': 'Reticulating splines...',
'Warming up the AI hamsters...': 'Warming up the AI hamsters...',
'Asking the magic conch shell...': 'Asking the magic conch shell...',
'Generating witty retort...': 'Generating witty retort...',
'Polishing the algorithms...': 'Polishing the algorithms...',
"Don't rush perfection (or my code)...":
"Don't rush perfection (or my code)...",
'Brewing fresh bytes...',
'Counting electrons...',
'Engaging cognitive processors...',
'Brewing fresh bytes...': 'Brewing fresh bytes...',
'Counting electrons...': 'Counting electrons...',
'Engaging cognitive processors...': 'Engaging cognitive processors...',
'Checking for syntax errors in the universe...':
'Checking for syntax errors in the universe...',
'One moment, optimizing humor...',
'Shuffling punchlines...',
'Untangling neural nets...',
'Compiling brilliance...',
'Loading wit.exe...',
'Summoning the cloud of wisdom...',
'Preparing a witty response...',
'One moment, optimizing humor...': 'One moment, optimizing humor...',
'Shuffling punchlines...': 'Shuffling punchlines...',
'Untangling neural nets...': 'Untangling neural nets...',
'Compiling brilliance...': 'Compiling brilliance...',
'Loading wit.exe...': 'Loading wit.exe...',
'Summoning the cloud of wisdom...': 'Summoning the cloud of wisdom...',
'Preparing a witty response...': 'Preparing a witty response...',
"Just a sec, I'm debugging reality...":
"Just a sec, I'm debugging reality...",
'Confuzzling the options...',
'Tuning the cosmic frequencies...',
'Confuzzling the options...': 'Confuzzling the options...',
'Tuning the cosmic frequencies...': 'Tuning the cosmic frequencies...',
'Crafting a response worthy of your patience...':
'Crafting a response worthy of your patience...',
'Compiling the 1s and 0s...',
'Compiling the 1s and 0s...': 'Compiling the 1s and 0s...',
'Resolving dependencies... and existential crises...':
'Resolving dependencies... and existential crises...',
'Defragmenting memories... both RAM and personal...':
'Defragmenting memories... both RAM and personal...',
'Rebooting the humor module...',
'Rebooting the humor module...': 'Rebooting the humor module...',
'Caching the essentials (mostly cat memes)...':
'Caching the essentials (mostly cat memes)...',
'Optimizing for ludicrous speed',
'Optimizing for ludicrous speed': 'Optimizing for ludicrous speed',
"Swapping bits... don't tell the bytes...":
"Swapping bits... don't tell the bytes...",
'Garbage collecting... be right back...':
'Garbage collecting... be right back...',
'Assembling the interwebs...',
'Converting coffee into code...',
'Updating the syntax for reality...',
'Rewiring the synapses...',
'Assembling the interwebs...': 'Assembling the interwebs...',
'Converting coffee into code...': 'Converting coffee into code...',
'Updating the syntax for reality...': 'Updating the syntax for reality...',
'Rewiring the synapses...': 'Rewiring the synapses...',
'Looking for a misplaced semicolon...':
'Looking for a misplaced semicolon...',
"Greasin' the cogs of the machine...",
'Pre-heating the servers...',
'Calibrating the flux capacitor...',
'Engaging the improbability drive...',
'Channeling the Force...',
"Greasin' the cogs of the machine...": "Greasin' the cogs of the machine...",
'Pre-heating the servers...': 'Pre-heating the servers...',
'Calibrating the flux capacitor...': 'Calibrating the flux capacitor...',
'Engaging the improbability drive...': 'Engaging the improbability drive...',
'Channeling the Force...': 'Channeling the Force...',
'Aligning the stars for optimal response...':
'Aligning the stars for optimal response...',
'So say we all...',
'Loading the next great idea...',
"Just a moment, I'm in the zone...",
'So say we all...': 'So say we all...',
'Loading the next great idea...': 'Loading the next great idea...',
"Just a moment, I'm in the zone...": "Just a moment, I'm in the zone...",
'Preparing to dazzle you with brilliance...':
'Preparing to dazzle you with brilliance...',
"Just a tick, I'm polishing my wit...":
"Just a tick, I'm polishing my wit...",
"Hold tight, I'm crafting a masterpiece...":
"Hold tight, I'm crafting a masterpiece...",
"Just a jiffy, I'm debugging the universe...":
"Just a jiffy, I'm debugging the universe...",
"Just a moment, I'm aligning the pixels...":
"Just a moment, I'm aligning the pixels...",
"Just a sec, I'm optimizing the humor...":
"Just a sec, I'm optimizing the humor...",
"Just a moment, I'm tuning the algorithms...":
"Just a moment, I'm tuning the algorithms...",
'Warp speed engaged...',
'Warp speed engaged...': 'Warp speed engaged...',
'Mining for more Dilithium crystals...':
'Mining for more Dilithium crystals...',
"Don't panic...",
'Following the white rabbit...',
"Don't panic...": "Don't panic...",
'Following the white rabbit...': 'Following the white rabbit...',
'The truth is in here... somewhere...':
'The truth is in here... somewhere...',
'Blowing on the cartridge...',
'Loading... Do a barrel roll!',
'Waiting for the respawn...',
'Blowing on the cartridge...': 'Blowing on the cartridge...',
'Loading... Do a barrel roll!': 'Loading... Do a barrel roll!',
'Waiting for the respawn...': 'Waiting for the respawn...',
'Finishing the Kessel Run in less than 12 parsecs...':
'Finishing the Kessel Run in less than 12 parsecs...',
"The cake is not a lie, it's just still loading...":
"The cake is not a lie, it's just still loading...",
'Fiddling with the character creation screen...':
'Fiddling with the character creation screen...',
"Just a moment, I'm finding the right meme...":
"Just a moment, I'm finding the right meme...",
"Pressing 'A' to continue...",
'Herding digital cats...',
'Polishing the pixels...',
"Pressing 'A' to continue...": "Pressing 'A' to continue...",
'Herding digital cats...': 'Herding digital cats...',
'Polishing the pixels...': 'Polishing the pixels...',
'Finding a suitable loading screen pun...':
'Finding a suitable loading screen pun...',
'Distracting you with this witty phrase...':
'Distracting you with this witty phrase...',
'Almost there... probably...',
'Almost there... probably...': 'Almost there... probably...',
'Our hamsters are working as fast as they can...':
'Our hamsters are working as fast as they can...',
'Giving Cloudy a pat on the head...',
'Petting the cat...',
'Rickrolling my boss...',
'Giving Cloudy a pat on the head...': 'Giving Cloudy a pat on the head...',
'Petting the cat...': 'Petting the cat...',
'Rickrolling my boss...': 'Rickrolling my boss...',
'Never gonna give you up, never gonna let you down...':
'Never gonna give you up, never gonna let you down...',
'Slapping the bass...',
'Tasting the snozberries...',
'Slapping the bass...': 'Slapping the bass...',
'Tasting the snozberries...': 'Tasting the snozberries...',
"I'm going the distance, I'm going for speed...":
"I'm going the distance, I'm going for speed...",
'Is this the real life? Is this just fantasy?...':
'Is this the real life? Is this just fantasy?...',
"I've got a good feeling about this...":
"I've got a good feeling about this...",
'Poking the bear...',
'Poking the bear...': 'Poking the bear...',
'Doing research on the latest memes...':
'Doing research on the latest memes...',
'Figuring out how to make this more witty...':
'Figuring out how to make this more witty...',
'Hmmm... let me think...',
'Hmmm... let me think...': 'Hmmm... let me think...',
'What do you call a fish with no eyes? A fsh...':
'What do you call a fish with no eyes? A fsh...',
'Why did the computer go to therapy? It had too many bytes...':
'Why did the computer go to therapy? It had too many bytes...',
"Why don't programmers like nature? It has too many bugs...":
"Why don't programmers like nature? It has too many bugs...",
'Why do programmers prefer dark mode? Because light attracts bugs...':
'Why do programmers prefer dark mode? Because light attracts bugs...',
'Why did the developer go broke? Because they used up all their cache...':
'Why did the developer go broke? Because they used up all their cache...',
"What can you do with a broken pencil? Nothing, it's pointless...":
"What can you do with a broken pencil? Nothing, it's pointless...",
'Applying percussive maintenance...',
'Applying percussive maintenance...': 'Applying percussive maintenance...',
'Searching for the correct USB orientation...':
'Searching for the correct USB orientation...',
'Ensuring the magic smoke stays inside the wires...':
'Ensuring the magic smoke stays inside the wires...',
'Rewriting in Rust for no particular reason...':
'Rewriting in Rust for no particular reason...',
'Trying to exit Vim...',
'Spinning up the hamster wheel...',
'Trying to exit Vim...': 'Trying to exit Vim...',
'Spinning up the hamster wheel...': 'Spinning up the hamster wheel...',
"That's not a bug, it's an undocumented feature...":
"That's not a bug, it's an undocumented feature...",
'Engage.',
"I'll be back... with an answer.",
'My other process is a TARDIS...',
'Engage.': 'Engage.',
"I'll be back... with an answer.": "I'll be back... with an answer.",
'My other process is a TARDIS...': 'My other process is a TARDIS...',
'Communing with the machine spirit...':
'Communing with the machine spirit...',
'Letting the thoughts marinate...',
'Letting the thoughts marinate...': 'Letting the thoughts marinate...',
'Just remembered where I put my keys...':
'Just remembered where I put my keys...',
'Pondering the orb...',
'Pondering the orb...': 'Pondering the orb...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
'Initiating thoughtful gaze...',
'Initiating thoughtful gaze...': 'Initiating thoughtful gaze...',
"What's a computer's favorite snack? Microchips.":
"What's a computer's favorite snack? Microchips.",
"Why do Java developers wear glasses? Because they don't C#.":
"Why do Java developers wear glasses? Because they don't C#.",
'Charging the laser... pew pew!',
'Dividing by zero... just kidding!',
'Charging the laser... pew pew!': 'Charging the laser... pew pew!',
'Dividing by zero... just kidding!': 'Dividing by zero... just kidding!',
'Looking for an adult superviso... I mean, processing.':
'Looking for an adult superviso... I mean, processing.',
'Making it go beep boop.',
'Making it go beep boop.': 'Making it go beep boop.',
'Buffering... because even AIs need a moment.':
'Buffering... because even AIs need a moment.',
'Entangling quantum particles for a faster response...':
'Entangling quantum particles for a faster response...',
'Polishing the chrome... on the algorithms.':
'Polishing the chrome... on the algorithms.',
'Are you not entertained? (Working on it!)':
'Are you not entertained? (Working on it!)',
'Summoning the code gremlins... to help, of course.':
'Summoning the code gremlins... to help, of course.',
'Just waiting for the dial-up tone to finish...':
'Just waiting for the dial-up tone to finish...',
'Recalibrating the humor-o-meter.',
'Recalibrating the humor-o-meter.': 'Recalibrating the humor-o-meter.',
'My other loading screen is even funnier.':
'My other loading screen is even funnier.',
"Pretty sure there's a cat walking on the keyboard somewhere...":
"Pretty sure there's a cat walking on the keyboard somewhere...",
'Enhancing... Enhancing... Still loading.':
'Enhancing... Enhancing... Still loading.',
"It's not a bug, it's a feature... of this loading screen.":
"It's not a bug, it's a feature... of this loading screen.",
'Have you tried turning it off and on again? (The loading screen, not me.)':
'Have you tried turning it off and on again? (The loading screen, not me.)',
'Constructing additional pylons...',
],
'Constructing additional pylons...': 'Constructing additional pylons...',
};

View File

@@ -103,8 +103,8 @@ export default {
'Theme "{{themeName}}" not found.': 'Тема "{{themeName}}" не найдена.',
'Theme "{{themeName}}" not found in selected scope.':
'Тема "{{themeName}}" не найдена в выбранной области.',
'Clear conversation history and free up context':
'Очистить историю диалога и освободить контекст',
'clear the screen and conversation history':
'Очистка экрана и истории диалога',
'Compresses the context by replacing it with a summary.':
'Сжатие контекста заменой на краткую сводку',
'open full Qwen Code documentation in your browser':
@@ -260,7 +260,8 @@ export default {
', Tab to change focus': ', Tab для смены фокуса',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'Для применения изменений необходимо перезапустить Qwen Code. Нажмите r для выхода и применения изменений.',
'The command "/{{command}}" is not supported in non-interactive mode.':
'Команда "/{{command}}" не поддерживается в неинтерактивном режиме.',
// ============================================================================
// Метки настроек
// ============================================================================
@@ -313,7 +314,6 @@ export default {
'Tool Output Truncation Lines': 'Лимит строк вывода инструментов',
'Folder Trust': 'Доверие к папке',
'Vision Model Preview': 'Визуальная модель (предпросмотр)',
'Tool Schema Compliance': 'Соответствие схеме инструмента',
// Варианты перечислений настроек
'Auto (detect from system)': 'Авто (определить из системы)',
Text: 'Текст',
@@ -342,8 +342,8 @@ export default {
'Установка предпочитаемого внешнего редактора',
'Manage extensions': 'Управление расширениями',
'List active extensions': 'Показать активные расширения',
'Update extensions. Usage: update <extension-names>|--all':
'Обновить расширения. Использование: update <extension-names>|--all',
'Update extensions. Usage: update |--all':
'Обновить расширения. Использование: update |--all',
'manage IDE integration': 'Управление интеграцией с IDE',
'check status of IDE integration': 'Проверить статус интеграции с IDE',
'install required IDE companion for {{ideName}}':
@@ -401,8 +401,7 @@ export default {
'Set LLM output language': 'Установка языка вывода LLM',
'Usage: /language ui [zh-CN|en-US]':
'Использование: /language ui [zh-CN|en-US|ru-RU]',
'Usage: /language output <language>':
'Использование: /language output <language>',
'Usage: /language output ': 'Использование: /language output ',
'Example: /language output 中文': 'Пример: /language output 中文',
'Example: /language output English': 'Пример: /language output English',
'Example: /language output 日本語': 'Пример: /language output 日本語',
@@ -419,8 +418,9 @@ export default {
'To request additional UI language packs, please open an issue on GitHub.':
'Для запроса дополнительных языковых пакетов интерфейса, пожалуйста, создайте обращение на GitHub.',
'Available options:': 'Доступные варианты:',
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
' - en-US: English': ' - en-US: Английский',
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
' - en-US: English': ' - en-US: Английский',
' - ru-RU: Russian': ' - ru-RU: Русский',
'Set UI language to Simplified Chinese (zh-CN)':
'Установить язык интерфейса на упрощенный китайский (zh-CN)',
'Set UI language to English (en-US)':
@@ -436,8 +436,8 @@ export default {
'Режим подтверждения изменен на: {{mode}}',
'Approval mode changed to: {{mode}} (saved to {{scope}} settings{{location}})':
'Режим подтверждения изменен на: {{mode}} (сохранено в настройках {{scope}}{{location}})',
'Usage: /approval-mode <mode> [--session|--user|--project]':
'Использование: /approval-mode <mode> [--session|--user|--project]',
'Usage: /approval-mode [--session|--user|--project]':
'Использование: /approval-mode [--session|--user|--project]',
'Scope subcommands do not accept additional arguments.':
'Подкоманды области не принимают дополнительных аргументов.',
'Plan mode - Analyze only, do not modify files or execute commands':
@@ -589,8 +589,8 @@ export default {
'Ошибка при экспорте диалога: {{error}}',
'Conversation shared to {{filePath}}': 'Диалог экспортирован в {{filePath}}',
'No conversation found to share.': 'Нет диалога для экспорта.',
'Share the current conversation to a markdown or json file. Usage: /chat share <file>':
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <файл>',
'Share the current conversation to a markdown or json file. Usage: /chat share <путь-к-файлу>':
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <путь-к-файлу>',
// ============================================================================
// Команды - Резюме
@@ -605,6 +605,12 @@ export default {
'Не найдено диалогов для создания сводки.',
'Failed to generate project context summary: {{error}}':
'Не удалось сгенерировать сводку контекста проекта: {{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'Сводка проекта сохранена в {{filePathForDisplay}}',
'Saving project summary...': 'Сохранение сводки проекта...',
'Generating project summary...': 'Генерация сводки проекта...',
'Failed to generate summary - no text content received from LLM response':
'Не удалось сгенерировать сводку - не получен текстовый контент из ответа LLM',
// ============================================================================
// Команды - Модель
@@ -619,9 +625,8 @@ export default {
// ============================================================================
// Команды - Очистка
// ============================================================================
'Starting a new session, resetting chat, and clearing terminal.':
'Начало новой сессии, сброс чата и очистка терминала.',
'Starting a new session and clearing.': 'Начало новой сессии и очистка.',
'Clearing terminal and resetting chat.': 'Очистка терминала и сброс чата.',
'Clearing terminal.': 'Очистка терминала.',
// ============================================================================
// Команды - Сжатие
@@ -652,8 +657,8 @@ export default {
'Команда /directory add не поддерживается в ограничительных профилях песочницы. Пожалуйста, используйте --include-directories при запуске сессии.',
"Error adding '{{path}}': {{error}}":
"Ошибка при добавлении '{{path}}': {{error}}",
'Successfully added QWEN.md files from the following directories if there are:\n- {{directories}}':
'Успешно добавлены файлы QWEN.md из следующих директорий (если они есть):\n- {{directories}}',
'Successfully added GEMINI.md files from the following directories if there are:\n- {{directories}}':
'Успешно добавлены файлы GEMINI.md из следующих директорий (если они есть):\n- {{directories}}',
'Error refreshing memory: {{error}}':
'Ошибка при обновлении памяти: {{error}}',
'Successfully added directories:\n- {{directories}}':
@@ -886,7 +891,6 @@ export default {
// Экран выхода / Статистика
// ============================================================================
'Agent powering down. Goodbye!': 'Агент завершает работу. До свидания!',
'To continue this session, run': 'Для продолжения этой сессии, выполните',
'Interaction Summary': 'Сводка взаимодействия',
'Session ID:': 'ID сессии:',
'Tool Calls:': 'Вызовы инструментов:',
@@ -946,140 +950,179 @@ export default {
'Waiting for user confirmation...':
'Ожидание подтверждения от пользователя...',
'(esc to cancel, {{time}})': '(esc для отмены, {{time}})',
// ============================================================================
// ============================================================================
// Loading Phrases
// ============================================================================
WITTY_LOADING_PHRASES: [
'Мне повезёт!',
'Доставляем крутизну... ',
'Рисуем засечки на буквах...',
'Пробираемся через слизевиков..',
'Советуемся с цифровыми духами...',
'Сглаживание сплайнов...',
'Разогреваем ИИ-хомячков...',
'Спрашиваем волшебную ракушку...',
'Генерируем остроумный ответ...',
'Полируем алгоритмы...',
"I'm Feeling Lucky": 'Мне повезёт!',
'Shipping awesomeness... ': 'Доставляем крутизну... ',
'Painting the serifs back on...': 'Рисуем засечки на буквах...',
'Navigating the slime mold...': 'Пробираемся через слизевиков..',
'Consulting the digital spirits...': 'Советуемся с цифровыми духами...',
'Reticulating splines...': 'Сглаживание сплайнов...',
'Warming up the AI hamsters...': 'Разогреваем ИИ-хомячков...',
'Asking the magic conch shell...': 'Спрашиваем волшебную ракушку...',
'Generating witty retort...': 'Генерируем остроумный ответ...',
'Polishing the algorithms...': 'Полируем алгоритмы...',
"Don't rush perfection (or my code)...":
'Не торопите совершенство (или мой код)...',
'Завариваем свежие байты...',
'Пересчитываем электроны...',
'Задействуем когнитивные процессоры...',
'Brewing fresh bytes...': 'Завариваем свежие байты...',
'Counting electrons...': 'Пересчитываем электроны...',
'Engaging cognitive processors...': 'Задействуем когнитивные процессоры...',
'Checking for syntax errors in the universe...':
'Ищем синтаксические ошибки во вселенной...',
'Секундочку, оптимизируем юмор...',
'Перетасовываем панчлайны...',
'Распутаваем нейросети...',
'Компилируем гениальность...',
'Загружаем yumor.exe...',
'Призываем облако мудрости...',
'Готовим остроумный ответ...',
'Секунду, идёт отладка реальности...',
'Запутываем варианты...',
'Настраиваем космические частоты...',
'One moment, optimizing humor...': 'Секундочку, оптимизируем юмор...',
'Shuffling punchlines...': 'Перетасовываем панчлайны...',
'Untangling neural nets...': 'Распутаваем нейросети...',
'Compiling brilliance...': 'Компилируем гениальность...',
'Loading wit.exe...': 'Загружаем yumor.exe...',
'Summoning the cloud of wisdom...': 'Призываем облако мудрости...',
'Preparing a witty response...': 'Готовим остроумный ответ...',
"Just a sec, I'm debugging reality...": 'Секунду, идёт отладка реальности...',
'Confuzzling the options...': 'Запутываем варианты...',
'Tuning the cosmic frequencies...': 'Настраиваем космические частоты...',
'Crafting a response worthy of your patience...':
'Создаем ответ, достойный вашего терпения...',
'Компилируем единички и нолики...',
'Compiling the 1s and 0s...': 'Компилируем единички и нолики...',
'Resolving dependencies... and existential crises...':
'Разрешаем зависимости... и экзистенциальные кризисы...',
'Defragmenting memories... both RAM and personal...':
'Дефрагментация памяти... и оперативной, и личной...',
'Перезагрузка модуля юмора...',
'Rebooting the humor module...': 'Перезагрузка модуля юмора...',
'Caching the essentials (mostly cat memes)...':
'Кэшируем самое важное (в основном мемы с котиками)...',
'Оптимизация для безумной скорости',
'Optimizing for ludicrous speed': 'Оптимизация для безумной скорости',
"Swapping bits... don't tell the bytes...":
'Меняем биты... только байтам не говорите...',
'Сборка мусора... скоро вернусь...',
'Сборка интернетов...',
'Превращаем кофе в код...',
'Обновляем синтаксис реальности...',
'Переподключаем синапсы...',
'Ищем лишнюю точку с запятой...',
'Смазываем шестерёнки машины...',
'Разогреваем серверы...',
'Калибруем потоковый накопитель...',
'Включаем двигатель невероятности...',
'Направляем Силу...',
'Garbage collecting... be right back...': 'Сборка мусора... скоро вернусь...',
'Assembling the interwebs...': 'Сборка интернетов...',
'Converting coffee into code...': 'Превращаем кофе в код...',
'Updating the syntax for reality...': 'Обновляем синтаксис реальности...',
'Rewiring the synapses...': 'Переподключаем синапсы...',
'Looking for a misplaced semicolon...': 'Ищем лишнюю точку с запятой...',
"Greasin' the cogs of the machine...": 'Смазываем шестерёнки машины...',
'Pre-heating the servers...': 'Разогреваем серверы...',
'Calibrating the flux capacitor...': 'Калибруем потоковый накопитель...',
'Engaging the improbability drive...': 'Включаем двигатель невероятности...',
'Channeling the Force...': 'Направляем Силу...',
'Aligning the stars for optimal response...':
'Выравниваем звёзды для оптимального ответа...',
'Так скажем мы все...',
'Загрузка следующей великой идеи...',
'Минутку, я в потоке...',
'So say we all...': 'Так скажем мы все...',
'Loading the next great idea...': 'Загрузка следующей великой идеи...',
"Just a moment, I'm in the zone...": 'Минутку, я в потоке...',
'Preparing to dazzle you with brilliance...':
'Готовлюсь ослепить вас гениальностью...',
'Секунду, полирую остроумие...',
'Держитесь, создаю шедевр...',
"Just a tick, I'm polishing my wit...": 'Секунду, полирую остроумие...',
"Hold tight, I'm crafting a masterpiece...": 'Держитесь, создаю шедевр...',
"Just a jiffy, I'm debugging the universe...":
'Мигом, отлаживаю вселенную...',
'Момент, выравниваю пиксели...',
'Секунду, оптимизирую юмор...',
"Just a moment, I'm aligning the pixels...": 'Момент, выравниваю пиксели...',
"Just a sec, I'm optimizing the humor...": 'Секунду, оптимизирую юмор...',
"Just a moment, I'm tuning the algorithms...":
'Момент, настраиваю алгоритмы...',
'Варп-прыжок активирован...',
'Добываем кристаллы дилития...',
'Без паники...',
'Следуем за белым кроликом...',
'Истина где-то здесь... внутри...',
'Продуваем картридж...',
'Загрузка... Сделай бочку!',
'Ждем респауна...',
'Warp speed engaged...': 'Варп-скорость включена...',
'Mining for more Dilithium crystals...': 'Добываем кристаллы дилития...',
"Don't panic...": 'Без паники...',
'Following the white rabbit...': 'Следуем за белым кроликом...',
'The truth is in here... somewhere...': 'Истина где-то здесь... внутри...',
'Blowing on the cartridge...': 'Продуваем картридж...',
'Loading... Do a barrel roll!': 'Загрузка... Сделай бочку!',
'Waiting for the respawn...': 'Ждем респауна...',
'Finishing the Kessel Run in less than 12 parsecs...':
'Делаем Дугу Кесселя менее чем за 12 парсеков...',
"The cake is not a lie, it's just still loading...":
'Тортик — не ложь, он просто ещё грузится...',
'Fiddling with the character creation screen...':
'Возимся с экраном создания персонажа...',
"Just a moment, I'm finding the right meme...":
'Минутку, ищу подходящий мем...',
"Нажимаем 'A' для продолжения...",
'Пасём цифровых котов...',
'Полируем пиксели...',
"Pressing 'A' to continue...": "Нажимаем 'A' для продолжения...",
'Herding digital cats...': 'Пасём цифровых котов...',
'Polishing the pixels...': 'Полируем пиксели...',
'Finding a suitable loading screen pun...':
'Ищем подходящий каламбур для экрана загрузки...',
'Distracting you with this witty phrase...':
'Отвлекаем вас этой остроумной фразой...',
'Почти готово... вроде...',
'Almost there... probably...': 'Почти готово... вроде...',
'Our hamsters are working as fast as they can...':
'Наши хомячки работают изо всех сил...',
'Гладим Облачко по голове...',
'Гладим кота...',
'Рикроллим начальника...',
'Giving Cloudy a pat on the head...': 'Гладим Облачко по голове...',
'Petting the cat...': 'Гладим кота...',
'Rickrolling my boss...': 'Рикроллим начальника...',
'Never gonna give you up, never gonna let you down...':
'Never gonna give you up, never gonna let you down...',
'Лабаем бас-гитару...',
'Пробуем снузберри на вкус...',
'Slapping the bass...': 'Лабаем бас-гитару...',
'Tasting the snozberries...': 'Пробуем снузберри на вкус...',
"I'm going the distance, I'm going for speed...":
'Иду до конца, иду на скорость...',
'Is this the real life? Is this just fantasy?...':
'Is this the real life? Is this just fantasy?...',
'У меня хорошее предчувствие...',
'Дразним медведя... (Не лезь...)',
'Изучаем свежие мемы...',
"I've got a good feeling about this...": 'У меня хорошее предчувствие...',
'Poking the bear...': 'Дразним медведя... (Не лезь...)',
'Doing research on the latest memes...': 'Изучаем свежие мемы...',
'Figuring out how to make this more witty...':
'Думаем, как сделать это остроумнее...',
'Хмм... дайте подумать...',
'Hmmm... let me think...': 'Хмм... дайте подумать...',
'What do you call a fish with no eyes? A fsh...':
'Как называется бумеранг, который не возвращается? Палка...',
'Why did the computer go to therapy? It had too many bytes...':
'Почему компьютер простудился? Потому что оставил окна открытыми...',
"Why don't programmers like nature? It has too many bugs...":
'Почему программисты не любят гулять на улице? Там среда не настроена...',
'Why do programmers prefer dark mode? Because light attracts bugs...':
'Почему программисты предпочитают тёмную тему? Потому что в темноте не видно багов...',
'Why did the developer go broke? Because they used up all their cache...':
'Почему разработчик разорился? Потому что потратил весь свой кэш...',
"What can you do with a broken pencil? Nothing, it's pointless...":
'Что можно делать со сломанным карандашом? Ничего — он тупой...',
'Провожу настройку методом тыка...',
'Applying percussive maintenance...': 'Провожу настройку методом тыка...',
'Searching for the correct USB orientation...':
'Ищем, какой стороной вставлять флешку...',
'Ensuring the magic smoke stays inside the wires...':
'Следим, чтобы волшебный дым не вышел из проводов...',
'Rewriting in Rust for no particular reason...':
'Переписываем всё на Rust без особой причины...',
'Пытаемся выйти из Vim...',
'Раскручиваем колесо для хомяка...',
'Это не баг, а фича...',
'Поехали!',
'Я вернусь... с ответом.',
'Мой другой процесс — это ТАРДИС...',
'Общаемся с духом машины...',
'Даем мыслям замариноваться...',
'Trying to exit Vim...': 'Пытаемся выйти из Vim...',
'Spinning up the hamster wheel...': 'Раскручиваем колесо для хомяка...',
"That's not a bug, it's an undocumented feature...": 'Это не баг, а фича...',
'Engage.': 'Поехали!',
"I'll be back... with an answer.": 'Я вернусь... с ответом.',
'My other process is a TARDIS...': 'Мой другой процесс — это ТАРДИС...',
'Communing with the machine spirit...': 'Общаемся с духом машины...',
'Letting the thoughts marinate...': 'Даем мыслям замариноваться...',
'Just remembered where I put my keys...':
'Только что вспомнил, куда положил ключи...',
'Размышляю над сферой...',
'Я видел такое, что вам, людям, и не снилось... пользователя, читающего эти сообщения.',
'Инициируем задумчивый взгляд...',
'Pondering the orb...': 'Размышляю над сферой...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
'Я видел такое, во что вы, люди, просто не поверите... например, пользователя, читающего сообщения загрузки.',
'Initiating thoughtful gaze...': 'Инициируем задумчивый взгляд...',
"What's a computer's favorite snack? Microchips.":
'Что сервер заказывает в баре? Пинг-коладу.',
"Why do Java developers wear glasses? Because they don't C#.":
'Почему Java-разработчики не убираются дома? Они ждут сборщик мусора...',
'Заряжаем лазер... пиу-пиу!',
'Делим на ноль... шучу!',
'Charging the laser... pew pew!': 'Заряжаем лазер... пиу-пиу!',
'Dividing by zero... just kidding!': 'Делим на ноль... шучу!',
'Looking for an adult superviso... I mean, processing.':
'Ищу взрослых для присмот... в смысле, обрабатываю.',
'Делаем бип-буп.',
'Буферизация... даже ИИ нужно время подумать.',
'Making it go beep boop.': 'Делаем бип-буп.',
'Buffering... because even AIs need a moment.':
'Буферизация... даже ИИ нужно мгновение.',
'Entangling quantum particles for a faster response...':
'Запутываем квантовые частицы для быстрого ответа...',
'Polishing the chrome... on the algorithms.':
'Полируем хром... на алгоритмах.',
'Are you not entertained? (Working on it!)':
'Вы ещё не развлеклись?! Разве вы не за этим сюда пришли?!',
'Summoning the code gremlins... to help, of course.':
'Призываем гремлинов кода... для помощи, конечно же.',
'Just waiting for the dial-up tone to finish...':
'Ждем, пока закончится звук dial-up модема...',
'Перекалибровка юморометра.',
'Recalibrating the humor-o-meter.': 'Перекалибровка юморометра.',
'My other loading screen is even funnier.':
'Мой другой экран загрузки ещё смешнее.',
"Pretty sure there's a cat walking on the keyboard somewhere...":
'Кажется, где-то по клавиатуре гуляет кот...',
'Enhancing... Enhancing... Still loading.':
'Улучшаем... Ещё улучшаем... Всё ещё грузится.',
"It's not a bug, it's a feature... of this loading screen.":
'Это не баг, это фича... экрана загрузки.',
'Have you tried turning it off and on again? (The loading screen, not me.)':
'Пробовали выключить и включить снова? (Экран загрузки, не меня!)',
'Нужно построить больше пилонов...',
],
'Constructing additional pylons...': 'Нужно построить больше пилонов...',
};

View File

@@ -101,7 +101,7 @@ export default {
'Theme "{{themeName}}" not found.': '未找到主题 "{{themeName}}"。',
'Theme "{{themeName}}" not found in selected scope.':
'在所选作用域中未找到主题 "{{themeName}}"。',
'Clear conversation history and free up context': '清除对话历史并释放上下文',
'clear the screen and conversation history': '清屏并清除对话历史',
'Compresses the context by replacing it with a summary.':
'通过用摘要替换来压缩上下文',
'open full Qwen Code documentation in your browser':
@@ -249,6 +249,8 @@ export default {
', Tab to change focus': 'Tab 切换焦点',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'要查看更改,必须重启 Qwen Code。按 r 退出并立即应用更改。',
'The command "/{{command}}" is not supported in non-interactive mode.':
'不支持在非交互模式下使用命令 "/{{command}}"。',
// ============================================================================
// Settings Labels
// ============================================================================
@@ -560,6 +562,12 @@ export default {
'No conversation found to summarize.': '未找到要总结的对话',
'Failed to generate project context summary: {{error}}':
'生成项目上下文摘要失败:{{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'项目摘要已保存到 {{filePathForDisplay}}',
'Saving project summary...': '正在保存项目摘要...',
'Generating project summary...': '正在生成项目摘要...',
'Failed to generate summary - no text content received from LLM response':
'生成摘要失败 - 未从 LLM 响应中接收到文本内容',
// ============================================================================
// Commands - Model
@@ -573,9 +581,8 @@ export default {
// ============================================================================
// Commands - Clear
// ============================================================================
'Starting a new session, resetting chat, and clearing terminal.':
'正在开始新会话,重置聊天并清屏',
'Starting a new session and clearing.': '正在开始新会话并清屏。',
'Clearing terminal and resetting chat.': '正在清屏并重置聊天',
'Clearing terminal.': '正在清屏',
// ============================================================================
// Commands - Compress
@@ -881,39 +888,165 @@ export default {
// ============================================================================
'Waiting for user confirmation...': '等待用户确认...',
'(esc to cancel, {{time}})': '(按 esc 取消,{{time}}',
WITTY_LOADING_PHRASES: [
// --- 职场搬砖系列 ---
'正在努力搬砖,请稍候...',
'老板在身后,快加载啊!',
'头发掉光前,一定能加载完...',
'服务器正在深呼吸,准备放大招...',
'正在向服务器投喂咖啡...',
// --- 大厂黑话系列 ---
'正在赋能全链路,寻找关键抓手...',
'正在降本增效,优化加载路径...',
'正在打破部门壁垒,沉淀方法论...',
'正在拥抱变化,迭代核心价值...',
'正在对齐颗粒度,打磨底层逻辑...',
'大力出奇迹,正在强行加载...',
// --- 程序员自嘲系列 ---
'只要我不写代码,代码就没有 Bug...',
'正在把 Bug 转化为 Feature...',
'只要我不尴尬Bug 就追不上我...',
'正在试图理解去年的自己写了什么...',
'正在猿力觉醒中,请耐心等待...',
// --- 合作愉快系列 ---
'正在询问产品经理:这需求是真的吗?',
'正在给产品经理画饼,请稍等...',
// --- 温暖治愈系列 ---
'每一行代码,都在努力让世界变得更好一点点...',
'每一个伟大的想法,都值得这份耐心的等待...',
'别急,美好的事物总是需要一点时间去酝酿...',
'愿你的代码永无 Bug愿你的梦想终将成真...',
'哪怕只有 0.1% 的进度,也是在向目标靠近...',
'加载的是字节,承载的是对技术的热爱...',
],
"I'm Feeling Lucky": '我感觉很幸运',
'Shipping awesomeness... ': '正在运送精彩内容... ',
'Painting the serifs back on...': '正在重新绘制衬线...',
'Navigating the slime mold...': '正在导航粘液霉菌...',
'Consulting the digital spirits...': '正在咨询数字精灵...',
'Reticulating splines...': '正在网格化样条曲线...',
'Warming up the AI hamsters...': '正在预热 AI 仓鼠...',
'Asking the magic conch shell...': '正在询问魔法海螺壳...',
'Generating witty retort...': '正在生成机智的反驳...',
'Polishing the algorithms...': '正在打磨算法...',
"Don't rush perfection (or my code)...": '不要急于追求完美(或我的代码)...',
'Brewing fresh bytes...': '正在酿造新鲜字节...',
'Counting electrons...': '正在计算电子...',
'Engaging cognitive processors...': '正在启动认知处理器...',
'Checking for syntax errors in the universe...':
'正在检查宇宙中的语法错误...',
'One moment, optimizing humor...': '稍等片刻,正在优化幽默感...',
'Shuffling punchlines...': '正在洗牌笑点...',
'Untangling neural nets...': '正在解开神经网络...',
'Compiling brilliance...': '正在编译智慧...',
'Loading wit.exe...': '正在加载 wit.exe...',
'Summoning the cloud of wisdom...': '正在召唤智慧云...',
'Preparing a witty response...': '正在准备机智的回复...',
"Just a sec, I'm debugging reality...": '稍等片刻,我正在调试现实...',
'Confuzzling the options...': '正在混淆选项...',
'Tuning the cosmic frequencies...': '正在调谐宇宙频率...',
'Crafting a response worthy of your patience...':
'正在制作值得您耐心等待的回复...',
'Compiling the 1s and 0s...': '正在编译 1 和 0...',
'Resolving dependencies... and existential crises...':
'正在解决依赖关系...和存在主义危机...',
'Defragmenting memories... both RAM and personal...':
'正在整理记忆碎片...包括 RAM 和个人记忆...',
'Rebooting the humor module...': '正在重启幽默模块...',
'Caching the essentials (mostly cat memes)...':
'正在缓存必需品(主要是猫咪表情包)...',
'Optimizing for ludicrous speed': '正在优化到荒谬的速度',
"Swapping bits... don't tell the bytes...": '正在交换位...不要告诉字节...',
'Garbage collecting... be right back...': '正在垃圾回收...马上回来...',
'Assembling the interwebs...': '正在组装互联网...',
'Converting coffee into code...': '正在将咖啡转换为代码...',
'Updating the syntax for reality...': '正在更新现实的语法...',
'Rewiring the synapses...': '正在重新连接突触...',
'Looking for a misplaced semicolon...': '正在寻找放错位置的分号...',
"Greasin' the cogs of the machine...": '正在给机器的齿轮上油...',
'Pre-heating the servers...': '正在预热服务器...',
'Calibrating the flux capacitor...': '正在校准通量电容器...',
'Engaging the improbability drive...': '正在启动不可能性驱动器...',
'Channeling the Force...': '正在引导原力...',
'Aligning the stars for optimal response...': '正在对齐星星以获得最佳回复...',
'So say we all...': '我们都说...',
'Loading the next great idea...': '正在加载下一个伟大的想法...',
"Just a moment, I'm in the zone...": '稍等片刻,我正进入状态...',
'Preparing to dazzle you with brilliance...': '正在准备用智慧让您眼花缭乱...',
"Just a tick, I'm polishing my wit...": '稍等片刻,我正在打磨我的智慧...',
"Hold tight, I'm crafting a masterpiece...": '请稍等,我正在制作杰作...',
"Just a jiffy, I'm debugging the universe...": '稍等片刻,我正在调试宇宙...',
"Just a moment, I'm aligning the pixels...": '稍等片刻,我正在对齐像素...',
"Just a sec, I'm optimizing the humor...": '稍等片刻,我正在优化幽默感...',
"Just a moment, I'm tuning the algorithms...": '稍等片刻,我正在调整算法...',
'Warp speed engaged...': '曲速已启动...',
'Mining for more Dilithium crystals...': '正在挖掘更多二锂晶体...',
"Don't panic...": '不要惊慌...',
'Following the white rabbit...': '正在跟随白兔...',
'The truth is in here... somewhere...': '真相在这里...某个地方...',
'Blowing on the cartridge...': '正在吹卡带...',
'Loading... Do a barrel roll!': '正在加载...做个桶滚!',
'Waiting for the respawn...': '等待重生...',
'Finishing the Kessel Run in less than 12 parsecs...':
'正在以不到 12 秒差距完成凯塞尔航线...',
"The cake is not a lie, it's just still loading...":
'蛋糕不是谎言,只是还在加载...',
'Fiddling with the character creation screen...': '正在摆弄角色创建界面...',
"Just a moment, I'm finding the right meme...":
'稍等片刻,我正在寻找合适的表情包...',
"Pressing 'A' to continue...": "按 'A' 继续...",
'Herding digital cats...': '正在放牧数字猫...',
'Polishing the pixels...': '正在打磨像素...',
'Finding a suitable loading screen pun...': '正在寻找合适的加载屏幕双关语...',
'Distracting you with this witty phrase...':
'正在用这个机智的短语分散您的注意力...',
'Almost there... probably...': '快到了...可能...',
'Our hamsters are working as fast as they can...':
'我们的仓鼠正在尽可能快地工作...',
'Giving Cloudy a pat on the head...': '正在拍拍 Cloudy 的头...',
'Petting the cat...': '正在抚摸猫咪...',
'Rickrolling my boss...': '正在 Rickroll 我的老板...',
'Never gonna give you up, never gonna let you down...':
'永远不会放弃你,永远不会让你失望...',
'Slapping the bass...': '正在拍打低音...',
'Tasting the snozberries...': '正在品尝 snozberries...',
"I'm going the distance, I'm going for speed...":
'我要走得更远,我要追求速度...',
'Is this the real life? Is this just fantasy?...':
'这是真实的生活吗?还是只是幻想?...',
"I've got a good feeling about this...": '我对这个感觉很好...',
'Poking the bear...': '正在戳熊...',
'Doing research on the latest memes...': '正在研究最新的表情包...',
'Figuring out how to make this more witty...': '正在想办法让这更有趣...',
'Hmmm... let me think...': '嗯...让我想想...',
'What do you call a fish with no eyes? A fsh...':
'没有眼睛的鱼叫什么?一条鱼...',
'Why did the computer go to therapy? It had too many bytes...':
'为什么电脑去看心理医生?因为它有太多字节...',
"Why don't programmers like nature? It has too many bugs...":
'为什么程序员不喜欢大自然?因为虫子太多了...',
'Why do programmers prefer dark mode? Because light attracts bugs...':
'为什么程序员喜欢暗色模式?因为光会吸引虫子...',
'Why did the developer go broke? Because they used up all their cache...':
'为什么开发者破产了?因为他们用完了所有缓存...',
"What can you do with a broken pencil? Nothing, it's pointless...":
'你能用断了的铅笔做什么?什么都不能,因为它没有笔尖...',
'Applying percussive maintenance...': '正在应用敲击维护...',
'Searching for the correct USB orientation...': '正在寻找正确的 USB 方向...',
'Ensuring the magic smoke stays inside the wires...':
'确保魔法烟雾留在电线内...',
'Rewriting in Rust for no particular reason...':
'正在用 Rust 重写,没有特别的原因...',
'Trying to exit Vim...': '正在尝试退出 Vim...',
'Spinning up the hamster wheel...': '正在启动仓鼠轮...',
"That's not a bug, it's an undocumented feature...":
'这不是一个错误,这是一个未记录的功能...',
'Engage.': '启动。',
"I'll be back... with an answer.": '我会回来的...带着答案。',
'My other process is a TARDIS...': '我的另一个进程是 TARDIS...',
'Communing with the machine spirit...': '正在与机器精神交流...',
'Letting the thoughts marinate...': '让想法慢慢酝酿...',
'Just remembered where I put my keys...': '刚刚想起我把钥匙放在哪里了...',
'Pondering the orb...': '正在思考球体...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
'我见过你们不会相信的事情...比如一个阅读加载消息的用户。',
'Initiating thoughtful gaze...': '正在启动深思凝视...',
"What's a computer's favorite snack? Microchips.":
'电脑最喜欢的零食是什么?微芯片。',
"Why do Java developers wear glasses? Because they don't C#.":
'为什么 Java 开发者戴眼镜?因为他们不会 C#。',
'Charging the laser... pew pew!': '正在给激光充电...砰砰!',
'Dividing by zero... just kidding!': '除以零...只是开玩笑!',
'Looking for an adult superviso... I mean, processing.':
'正在寻找成人监督...我是说,处理中。',
'Making it go beep boop.': '让它发出哔哔声。',
'Buffering... because even AIs need a moment.':
'正在缓冲...因为即使是 AI 也需要片刻。',
'Entangling quantum particles for a faster response...':
'正在纠缠量子粒子以获得更快的回复...',
'Polishing the chrome... on the algorithms.': '正在打磨铬...在算法上。',
'Are you not entertained? (Working on it!)': '你不觉得有趣吗?(正在努力!)',
'Summoning the code gremlins... to help, of course.':
'正在召唤代码小精灵...当然是来帮忙的。',
'Just waiting for the dial-up tone to finish...': '只是等待拨号音结束...',
'Recalibrating the humor-o-meter.': '正在重新校准幽默计。',
'My other loading screen is even funnier.': '我的另一个加载屏幕更有趣。',
"Pretty sure there's a cat walking on the keyboard somewhere...":
'很确定有只猫在某个地方键盘上走...',
'Enhancing... Enhancing... Still loading.':
'正在增强...正在增强...仍在加载。',
"It's not a bug, it's a feature... of this loading screen.":
'这不是一个错误,这是一个功能...这个加载屏幕的功能。',
'Have you tried turning it off and on again? (The loading screen, not me.)':
'你试过把它关掉再打开吗?(加载屏幕,不是我。)',
'Constructing additional pylons...': '正在建造额外的能量塔...',
};

View File

@@ -20,8 +20,7 @@ import type {
CLIControlSetModelRequest,
CLIMcpServerConfig,
} from '../../types.js';
import { CommandService } from '../../../services/CommandService.js';
import { BuiltinCommandLoader } from '../../../services/BuiltinCommandLoader.js';
import { getAvailableCommands } from '../../../nonInteractiveCliCommands.js';
import {
MCPServerConfig,
AuthProviderType,
@@ -407,7 +406,7 @@ export class SystemController extends BaseController {
}
/**
* Load slash command names using CommandService
* Load slash command names using getAvailableCommands
*
* @param signal - AbortSignal to respect for cancellation
* @returns Promise resolving to array of slash command names
@@ -418,21 +417,14 @@ export class SystemController extends BaseController {
}
try {
const service = await CommandService.create(
[new BuiltinCommandLoader(this.context.config)],
signal,
);
const commands = await getAvailableCommands(this.context.config, signal);
if (signal.aborted) {
return [];
}
const names = new Set<string>();
const commands = service.getCommands();
for (const command of commands) {
names.add(command.name);
}
return Array.from(names).sort();
// Extract command names and sort
return commands.map((cmd) => cmd.name).sort();
} catch (error) {
// Check if the error is due to abort
if (signal.aborted) {

View File

@@ -610,8 +610,6 @@ export abstract class BaseJsonOutputAdapter {
const errorText = parseAndFormatApiError(
event.value.error,
this.config.getContentGeneratorConfig()?.authType,
undefined,
this.config.getModel(),
);
this.appendText(state, errorText, null);
break;

View File

@@ -68,6 +68,7 @@ describe('runNonInteractive', () => {
let mockShutdownTelemetry: Mock;
let consoleErrorSpy: MockInstance;
let processStdoutSpy: MockInstance;
let processStderrSpy: MockInstance;
let mockGeminiClient: {
sendMessageStream: Mock;
getChatRecordingService: Mock;
@@ -86,6 +87,9 @@ describe('runNonInteractive', () => {
processStdoutSpy = vi
.spyOn(process.stdout, 'write')
.mockImplementation(() => true);
processStderrSpy = vi
.spyOn(process.stderr, 'write')
.mockImplementation(() => true);
vi.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit(${code}) called`);
});
@@ -139,6 +143,8 @@ describe('runNonInteractive', () => {
setModel: vi.fn(async (model: string) => {
currentModel = model;
}),
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
isInteractive: vi.fn().mockReturnValue(false),
} as unknown as Config;
mockSettings = {
@@ -852,7 +858,7 @@ describe('runNonInteractive', () => {
expect(processStdoutSpy).toHaveBeenCalledWith('Response from command');
});
it('should throw FatalInputError if a command requires confirmation', async () => {
it('should handle command that requires confirmation by returning early', async () => {
const mockCommand = {
name: 'confirm',
description: 'a command that needs confirmation',
@@ -864,15 +870,16 @@ describe('runNonInteractive', () => {
};
mockGetCommands.mockReturnValue([mockCommand]);
await expect(
runNonInteractive(
mockConfig,
mockSettings,
'/confirm',
'prompt-id-confirm',
),
).rejects.toThrow(
'Exiting due to a confirmation prompt requested by the command.',
await runNonInteractive(
mockConfig,
mockSettings,
'/confirm',
'prompt-id-confirm',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.\n',
);
});
@@ -909,7 +916,30 @@ describe('runNonInteractive', () => {
expect(processStdoutSpy).toHaveBeenCalledWith('Response to unknown');
});
it('should throw for unhandled command result types', async () => {
it('should handle known but unsupported slash commands like /help by returning early', async () => {
// Mock a built-in command that exists but is not in the allowed list
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
await runNonInteractive(
mockConfig,
mockSettings,
'/help',
'prompt-id-help',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'The command "/help" is not supported in non-interactive mode.\n',
);
});
it('should handle unhandled command result types by returning early with error', async () => {
const mockCommand = {
name: 'noaction',
description: 'unhandled type',
@@ -920,15 +950,16 @@ describe('runNonInteractive', () => {
};
mockGetCommands.mockReturnValue([mockCommand]);
await expect(
runNonInteractive(
mockConfig,
mockSettings,
'/noaction',
'prompt-id-unhandled',
),
).rejects.toThrow(
'Exiting due to command result that is not supported in non-interactive mode.',
await runNonInteractive(
mockConfig,
mockSettings,
'/noaction',
'prompt-id-unhandled',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'Unknown command result type: unhandled\n',
);
});

View File

@@ -42,6 +42,55 @@ import {
computeUsageFromMetrics,
} from './utils/nonInteractiveHelpers.js';
/**
* Emits a final message for slash command results.
* Note: systemMessage should already be emitted before calling this function.
*/
async function emitNonInteractiveFinalMessage(params: {
message: string;
isError: boolean;
adapter?: JsonOutputAdapterInterface;
config: Config;
startTimeMs: number;
}): Promise<void> {
const { message, isError, adapter, config } = params;
if (!adapter) {
// Text output mode: write directly to stdout/stderr
const target = isError ? process.stderr : process.stdout;
target.write(`${message}\n`);
return;
}
// JSON output mode: emit assistant message and result
// (systemMessage should already be emitted by caller)
adapter.startAssistantMessage();
adapter.processEvent({
type: GeminiEventType.Content,
value: message,
} as unknown as Parameters<JsonOutputAdapterInterface['processEvent']>[0]);
adapter.finalizeAssistantMessage();
const metrics = uiTelemetryService.getMetrics();
const usage = computeUsageFromMetrics(metrics);
const outputFormat = config.getOutputFormat();
const stats =
outputFormat === OutputFormat.JSON
? uiTelemetryService.getMetrics()
: undefined;
adapter.emitResult({
isError,
durationMs: Date.now() - params.startTimeMs,
apiDurationMs: 0,
numTurns: 0,
errorMessage: isError ? message : undefined,
usage,
stats,
summary: message,
});
}
/**
* Provides optional overrides for `runNonInteractive` execution.
*
@@ -115,6 +164,16 @@ export async function runNonInteractive(
process.on('SIGINT', shutdownHandler);
process.on('SIGTERM', shutdownHandler);
// Emit systemMessage first (always the first message in JSON mode)
if (adapter) {
const systemMessage = await buildSystemMessage(
config,
sessionId,
permissionMode,
);
adapter.emitMessage(systemMessage);
}
let initialPartList: PartListUnion | null = extractPartsFromUserMessage(
options.userMessage,
);
@@ -128,10 +187,45 @@ export async function runNonInteractive(
config,
settings,
);
if (slashCommandResult) {
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
initialPartList = slashCommandResult as PartListUnion;
slashHandled = true;
switch (slashCommandResult.type) {
case 'submit_prompt':
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
initialPartList = slashCommandResult.content;
slashHandled = true;
break;
case 'message': {
// systemMessage already emitted above
await emitNonInteractiveFinalMessage({
message: slashCommandResult.content,
isError: slashCommandResult.messageType === 'error',
adapter,
config,
startTimeMs: startTime,
});
return;
}
case 'stream_messages':
throw new FatalInputError(
'Stream messages mode is not supported in non-interactive CLI',
);
case 'unsupported': {
await emitNonInteractiveFinalMessage({
message: slashCommandResult.reason,
isError: true,
adapter,
config,
startTimeMs: startTime,
});
return;
}
case 'no_command':
break;
default: {
const _exhaustive: never = slashCommandResult;
throw new FatalInputError(
`Unhandled slash command result type: ${(_exhaustive as { type: string }).type}`,
);
}
}
}
@@ -163,15 +257,6 @@ export async function runNonInteractive(
const initialParts = normalizePartList(initialPartList);
let currentMessages: Content[] = [{ role: 'user', parts: initialParts }];
if (adapter) {
const systemMessage = await buildSystemMessage(
config,
sessionId,
permissionMode,
);
adapter.emitMessage(systemMessage);
}
let isFirstTurn = true;
while (true) {
turnCount++;
@@ -221,8 +306,6 @@ export async function runNonInteractive(
const errorText = parseAndFormatApiError(
event.value.error,
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
);
process.stderr.write(`${errorText}\n`);
}

View File

@@ -0,0 +1,242 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { handleSlashCommand } from './nonInteractiveCliCommands.js';
import type { Config } from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from './config/settings.js';
import { CommandKind } from './ui/commands/types.js';
// Mock the CommandService
const mockGetCommands = vi.hoisted(() => vi.fn());
const mockCommandServiceCreate = vi.hoisted(() => vi.fn());
vi.mock('./services/CommandService.js', () => ({
CommandService: {
create: mockCommandServiceCreate,
},
}));
describe('handleSlashCommand', () => {
let mockConfig: Config;
let mockSettings: LoadedSettings;
let abortController: AbortController;
beforeEach(() => {
mockCommandServiceCreate.mockResolvedValue({
getCommands: mockGetCommands,
});
mockConfig = {
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
isInteractive: vi.fn().mockReturnValue(false),
getSessionId: vi.fn().mockReturnValue('test-session'),
getFolderTrustFeature: vi.fn().mockReturnValue(false),
getFolderTrust: vi.fn().mockReturnValue(false),
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
storage: {},
} as unknown as Config;
mockSettings = {
system: { path: '', settings: {} },
systemDefaults: { path: '', settings: {} },
user: { path: '', settings: {} },
workspace: { path: '', settings: {} },
} as LoadedSettings;
abortController = new AbortController();
});
it('should return no_command for non-slash input', async () => {
const result = await handleSlashCommand(
'regular text',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return no_command for unknown slash commands', async () => {
mockGetCommands.mockReturnValue([]);
const result = await handleSlashCommand(
'/unknowncommand',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return unsupported for known built-in commands not in allowed list', async () => {
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
const result = await handleSlashCommand(
'/help',
abortController,
mockConfig,
mockSettings,
[], // Empty allowed list
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toContain('/help');
expect(result.reason).toContain('not supported');
}
});
it('should return unsupported for /help when using default allowed list', async () => {
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
const result = await handleSlashCommand(
'/help',
abortController,
mockConfig,
mockSettings,
// Default allowed list: ['init', 'summary', 'compress']
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toBe(
'The command "/help" is not supported in non-interactive mode.',
);
}
});
it('should execute allowed built-in commands', async () => {
const mockInitCommand = {
name: 'init',
description: 'Initialize project',
kind: CommandKind.BUILT_IN,
action: vi.fn().mockResolvedValue({
type: 'message',
messageType: 'info',
content: 'Project initialized',
}),
};
mockGetCommands.mockReturnValue([mockInitCommand]);
const result = await handleSlashCommand(
'/init',
abortController,
mockConfig,
mockSettings,
['init'], // init is in the allowed list
);
expect(result.type).toBe('message');
if (result.type === 'message') {
expect(result.content).toBe('Project initialized');
}
});
it('should execute file commands regardless of allowed list', async () => {
const mockFileCommand = {
name: 'custom',
description: 'Custom file command',
kind: CommandKind.FILE,
action: vi.fn().mockResolvedValue({
type: 'submit_prompt',
content: [{ text: 'Custom prompt' }],
}),
};
mockGetCommands.mockReturnValue([mockFileCommand]);
const result = await handleSlashCommand(
'/custom',
abortController,
mockConfig,
mockSettings,
[], // Empty allowed list, but FILE commands should still work
);
expect(result.type).toBe('submit_prompt');
if (result.type === 'submit_prompt') {
expect(result.content).toEqual([{ text: 'Custom prompt' }]);
}
});
it('should return unsupported for other built-in commands like /quit', async () => {
const mockQuitCommand = {
name: 'quit',
description: 'Quit application',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockQuitCommand]);
const result = await handleSlashCommand(
'/quit',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toContain('/quit');
expect(result.reason).toContain('not supported');
}
});
it('should handle command with no action', async () => {
const mockCommand = {
name: 'noaction',
description: 'Command without action',
kind: CommandKind.FILE,
// No action property
};
mockGetCommands.mockReturnValue([mockCommand]);
const result = await handleSlashCommand(
'/noaction',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return message when command returns void', async () => {
const mockCommand = {
name: 'voidcmd',
description: 'Command that returns void',
kind: CommandKind.FILE,
action: vi.fn().mockResolvedValue(undefined),
};
mockGetCommands.mockReturnValue([mockCommand]);
const result = await handleSlashCommand(
'/voidcmd',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('message');
if (result.type === 'message') {
expect(result.content).toBe('Command executed successfully.');
expect(result.messageType).toBe('info');
}
});
});

View File

@@ -7,7 +7,6 @@
import type { PartListUnion } from '@google/genai';
import { parseSlashCommand } from './utils/commands.js';
import {
FatalInputError,
Logger,
uiTelemetryService,
type Config,
@@ -19,10 +18,164 @@ import {
CommandKind,
type CommandContext,
type SlashCommand,
type SlashCommandActionReturn,
} from './ui/commands/types.js';
import { createNonInteractiveUI } from './ui/noninteractive/nonInteractiveUi.js';
import type { LoadedSettings } from './config/settings.js';
import type { SessionStatsState } from './ui/contexts/SessionContext.js';
import { t } from './i18n/index.js';
/**
* Built-in commands that are allowed in non-interactive modes (CLI and ACP).
* Only safe, read-only commands that don't require interactive UI.
*
* These commands are:
* - init: Initialize project configuration
* - summary: Generate session summary
* - compress: Compress conversation history
*/
export const ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE = [
'init',
'summary',
'compress',
] as const;
/**
* Result of handling a slash command in non-interactive mode.
*
* Supported types:
* - 'submit_prompt': Submits content to the model (supports all modes)
* - 'message': Returns a single message (supports non-interactive JSON/text only)
* - 'stream_messages': Streams multiple messages (supports ACP only)
* - 'unsupported': Command cannot be executed in this mode
* - 'no_command': No command was found or executed
*/
export type NonInteractiveSlashCommandResult =
| {
type: 'submit_prompt';
content: PartListUnion;
}
| {
type: 'message';
messageType: 'info' | 'error';
content: string;
}
| {
type: 'stream_messages';
messages: AsyncGenerator<
{ messageType: 'info' | 'error'; content: string },
void,
unknown
>;
}
| {
type: 'unsupported';
reason: string;
originalType: string;
}
| {
type: 'no_command';
};
/**
* Converts a SlashCommandActionReturn to a NonInteractiveSlashCommandResult.
*
* Only the following result types are supported in non-interactive mode:
* - submit_prompt: Submits content to the model (all modes)
* - message: Returns a single message (non-interactive JSON/text only)
* - stream_messages: Streams multiple messages (ACP only)
*
* All other result types are converted to 'unsupported'.
*
* @param result The result from executing a slash command action
* @returns A NonInteractiveSlashCommandResult describing the outcome
*/
function handleCommandResult(
result: SlashCommandActionReturn,
): NonInteractiveSlashCommandResult {
switch (result.type) {
case 'submit_prompt':
return {
type: 'submit_prompt',
content: result.content,
};
case 'message':
return {
type: 'message',
messageType: result.messageType,
content: result.content,
};
case 'stream_messages':
return {
type: 'stream_messages',
messages: result.messages,
};
/**
* Currently return types below are never generated due to the
* whitelist of allowed slash commands in ACP and non-interactive mode.
* We'll try to add more supported return types in the future.
*/
case 'tool':
return {
type: 'unsupported',
reason:
'Tool execution from slash commands is not supported in non-interactive mode.',
originalType: 'tool',
};
case 'quit':
return {
type: 'unsupported',
reason:
'Quit command is not supported in non-interactive mode. The process will exit naturally after completion.',
originalType: 'quit',
};
case 'dialog':
return {
type: 'unsupported',
reason: `Dialog '${result.dialog}' cannot be opened in non-interactive mode.`,
originalType: 'dialog',
};
case 'load_history':
return {
type: 'unsupported',
reason:
'Loading history is not supported in non-interactive mode. Each invocation starts with a fresh context.',
originalType: 'load_history',
};
case 'confirm_shell_commands':
return {
type: 'unsupported',
reason:
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.',
originalType: 'confirm_shell_commands',
};
case 'confirm_action':
return {
type: 'unsupported',
reason:
'Action confirmation is not supported in non-interactive mode. Commands requiring confirmation cannot be executed.',
originalType: 'confirm_action',
};
default: {
// Exhaustiveness check
const _exhaustive: never = result;
return {
type: 'unsupported',
reason: `Unknown command result type: ${(_exhaustive as SlashCommandActionReturn).type}`,
originalType: 'unknown',
};
}
}
}
/**
* Filters commands based on the allowed built-in command names.
@@ -62,122 +215,146 @@ function filterCommandsForNonInteractive(
* @param config The configuration object
* @param settings The loaded settings
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
* allowed. If not provided or empty, only file commands are available.
* @returns A Promise that resolves to `PartListUnion` if a valid command is
* found and results in a prompt, or `undefined` otherwise.
* @throws {FatalInputError} if the command result is not supported in
* non-interactive mode.
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
* Pass an empty array to only allow file commands.
* @returns A Promise that resolves to a `NonInteractiveSlashCommandResult` describing
* the outcome of the command execution.
*/
export const handleSlashCommand = async (
rawQuery: string,
abortController: AbortController,
config: Config,
settings: LoadedSettings,
allowedBuiltinCommandNames?: string[],
): Promise<PartListUnion | undefined> => {
allowedBuiltinCommandNames: string[] = [
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
],
): Promise<NonInteractiveSlashCommandResult> => {
const trimmed = rawQuery.trim();
if (!trimmed.startsWith('/')) {
return;
return { type: 'no_command' };
}
const isAcpMode = config.getExperimentalZedIntegration();
const isInteractive = config.isInteractive();
const executionMode = isAcpMode
? 'acp'
: isInteractive
? 'interactive'
: 'non_interactive';
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);
// Only load BuiltinCommandLoader if there are allowed built-in commands
const loaders =
allowedBuiltinSet.size > 0
? [new BuiltinCommandLoader(config), new FileCommandLoader(config)]
: [new FileCommandLoader(config)];
// Load all commands to check if the command exists but is not allowed
const allLoaders = [
new BuiltinCommandLoader(config),
new FileCommandLoader(config),
];
const commandService = await CommandService.create(
loaders,
allLoaders,
abortController.signal,
);
const commands = commandService.getCommands();
const allCommands = commandService.getCommands();
const filteredCommands = filterCommandsForNonInteractive(
commands,
allCommands,
allowedBuiltinSet,
);
// First, try to parse with filtered commands
const { commandToExecute, args } = parseSlashCommand(
rawQuery,
filteredCommands,
);
if (commandToExecute) {
if (commandToExecute.action) {
// Not used by custom commands but may be in the future.
const sessionStats: SessionStatsState = {
sessionId: config?.getSessionId(),
sessionStartTime: new Date(),
metrics: uiTelemetryService.getMetrics(),
lastPromptTokenCount: 0,
promptCount: 1,
if (!commandToExecute) {
// Check if this is a known command that's just not allowed
const { commandToExecute: knownCommand } = parseSlashCommand(
rawQuery,
allCommands,
);
if (knownCommand) {
// Command exists but is not allowed in non-interactive mode
return {
type: 'unsupported',
reason: t(
'The command "/{{command}}" is not supported in non-interactive mode.',
{ command: knownCommand.name },
),
originalType: 'filtered_command',
};
const logger = new Logger(config?.getSessionId() || '', config?.storage);
const context: CommandContext = {
services: {
config,
settings,
git: undefined,
logger,
},
ui: createNonInteractiveUI(),
session: {
stats: sessionStats,
sessionShellAllowlist: new Set(),
},
invocation: {
raw: trimmed,
name: commandToExecute.name,
args,
},
};
const result = await commandToExecute.action(context, args);
if (result) {
switch (result.type) {
case 'submit_prompt':
return result.content;
case 'confirm_shell_commands':
// This result indicates a command attempted to confirm shell commands.
// However note that currently, ShellTool is excluded in non-interactive
// mode unless 'YOLO mode' is active, so confirmation actually won't
// occur because of YOLO mode.
// This ensures that if a command *does* request confirmation (e.g.
// in the future with more granular permissions), it's handled appropriately.
throw new FatalInputError(
'Exiting due to a confirmation prompt requested by the command.',
);
default:
throw new FatalInputError(
'Exiting due to command result that is not supported in non-interactive mode.',
);
}
}
}
return { type: 'no_command' };
}
return;
if (!commandToExecute.action) {
return { type: 'no_command' };
}
// Not used by custom commands but may be in the future.
const sessionStats: SessionStatsState = {
sessionId: config?.getSessionId(),
sessionStartTime: new Date(),
metrics: uiTelemetryService.getMetrics(),
lastPromptTokenCount: 0,
promptCount: 1,
};
const logger = new Logger(config?.getSessionId() || '', config?.storage);
const context: CommandContext = {
executionMode,
services: {
config,
settings,
git: undefined,
logger,
},
ui: createNonInteractiveUI(),
session: {
stats: sessionStats,
sessionShellAllowlist: new Set(),
},
invocation: {
raw: trimmed,
name: commandToExecute.name,
args,
},
};
const result = await commandToExecute.action(context, args);
if (!result) {
// Command executed but returned no result (e.g., void return)
return {
type: 'message',
messageType: 'info',
content: 'Command executed successfully.',
};
}
// Handle different result types
return handleCommandResult(result);
};
/**
* Retrieves all available slash commands for the current configuration.
*
* @param config The configuration object
* @param settings The loaded settings
* @param abortSignal Signal to cancel the loading process
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
* allowed. If not provided or empty, only file commands are available.
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
* Pass an empty array to only include file commands.
* @returns A Promise that resolves to an array of SlashCommand objects
*/
export const getAvailableCommands = async (
config: Config,
settings: LoadedSettings,
abortSignal: AbortSignal,
allowedBuiltinCommandNames?: string[],
allowedBuiltinCommandNames: string[] = [
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
],
): Promise<SlashCommand[]> => {
try {
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);

View File

@@ -28,7 +28,7 @@ const mockPrompt = {
{ name: 'trail', required: false, description: "The animal's trail." },
],
invoke: vi.fn().mockResolvedValue({
messages: [{ content: { text: 'Hello, world!' } }],
messages: [{ content: { type: 'text', text: 'Hello, world!' } }],
}),
};

View File

@@ -123,7 +123,10 @@ export class McpPromptLoader implements ICommandLoader {
};
}
if (!result.messages?.[0]?.content?.['text']) {
const firstMessage = result.messages?.[0];
const content = firstMessage?.content;
if (content?.type !== 'text') {
return {
type: 'message',
messageType: 'error',
@@ -134,7 +137,7 @@ export class McpPromptLoader implements ICommandLoader {
return {
type: 'submit_prompt',
content: JSON.stringify(result.messages[0].content.text),
content: JSON.stringify(content.text),
};
} catch (error) {
return {

View File

@@ -23,7 +23,6 @@ import {
} from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from '../config/settings.js';
import type { InitializationResult } from '../core/initializer.js';
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
import { UIStateContext, type UIState } from './contexts/UIStateContext.js';
import {
UIActionsContext,
@@ -56,7 +55,6 @@ vi.mock('./App.js', () => ({
App: TestContextConsumer,
}));
vi.mock('./hooks/useQuotaAndFallback.js');
vi.mock('./hooks/useHistoryManager.js');
vi.mock('./hooks/useThemeCommand.js');
vi.mock('./auth/useAuth.js');
@@ -122,7 +120,6 @@ describe('AppContainer State Management', () => {
let mockInitResult: InitializationResult;
// Create typed mocks for all hooks
const mockedUseQuotaAndFallback = useQuotaAndFallback as Mock;
const mockedUseHistory = useHistory as Mock;
const mockedUseThemeCommand = useThemeCommand as Mock;
const mockedUseAuthCommand = useAuthCommand as Mock;
@@ -164,10 +161,6 @@ describe('AppContainer State Management', () => {
capturedUIActions = null!;
// **Provide a default return value for EVERY mocked hook.**
mockedUseQuotaAndFallback.mockReturnValue({
proQuotaRequest: null,
handleProQuotaChoice: vi.fn(),
});
mockedUseHistory.mockReturnValue({
history: [],
addItem: vi.fn(),
@@ -567,75 +560,6 @@ describe('AppContainer State Management', () => {
});
});
describe('Quota and Fallback Integration', () => {
it('passes a null proQuotaRequest to UIStateContext by default', () => {
// The default mock from beforeEach already sets proQuotaRequest to null
render(
<AppContainer
config={mockConfig}
settings={mockSettings}
version="1.0.0"
initializationResult={mockInitResult}
/>,
);
// Assert that the context value is as expected
expect(capturedUIState.proQuotaRequest).toBeNull();
});
it('passes a valid proQuotaRequest to UIStateContext when provided by the hook', () => {
// Arrange: Create a mock request object that a UI dialog would receive
const mockRequest = {
failedModel: 'gemini-pro',
fallbackModel: 'gemini-flash',
resolve: vi.fn(),
};
mockedUseQuotaAndFallback.mockReturnValue({
proQuotaRequest: mockRequest,
handleProQuotaChoice: vi.fn(),
});
// Act: Render the container
render(
<AppContainer
config={mockConfig}
settings={mockSettings}
version="1.0.0"
initializationResult={mockInitResult}
/>,
);
// Assert: The mock request is correctly passed through the context
expect(capturedUIState.proQuotaRequest).toEqual(mockRequest);
});
it('passes the handleProQuotaChoice function to UIActionsContext', () => {
// Arrange: Create a mock handler function
const mockHandler = vi.fn();
mockedUseQuotaAndFallback.mockReturnValue({
proQuotaRequest: null,
handleProQuotaChoice: mockHandler,
});
// Act: Render the container
render(
<AppContainer
config={mockConfig}
settings={mockSettings}
version="1.0.0"
initializationResult={mockInitResult}
/>,
);
// Assert: The action in the context is the mock handler we provided
expect(capturedUIActions.handleProQuotaChoice).toBe(mockHandler);
// You can even verify that the plumbed function is callable
capturedUIActions.handleProQuotaChoice('auth');
expect(mockHandler).toHaveBeenCalledWith('auth');
});
});
describe('Terminal Title Update Feature', () => {
beforeEach(() => {
// Reset mock stdout for each test

View File

@@ -32,7 +32,6 @@ import {
type Config,
type IdeInfo,
type IdeContext,
type UserTierId,
DEFAULT_GEMINI_FLASH_MODEL,
IdeClient,
ideContextStore,
@@ -48,7 +47,6 @@ import { useHistory } from './hooks/useHistoryManager.js';
import { useMemoryMonitor } from './hooks/useMemoryMonitor.js';
import { useThemeCommand } from './hooks/useThemeCommand.js';
import { useAuthCommand } from './auth/useAuth.js';
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
import { useEditorSettings } from './hooks/useEditorSettings.js';
import { useSettingsCommand } from './hooks/useSettingsCommand.js';
import { useModelCommand } from './hooks/useModelCommand.js';
@@ -192,8 +190,6 @@ export const AppContainer = (props: AppContainerProps) => {
const [currentModel, setCurrentModel] = useState(getEffectiveModel());
const [userTier] = useState<UserTierId | undefined>(undefined);
const [isConfigInitialized, setConfigInitialized] = useState(false);
const [userMessages, setUserMessages] = useState<string[]>([]);
@@ -367,14 +363,6 @@ export const AppContainer = (props: AppContainerProps) => {
cancelAuthentication,
} = useAuthCommand(settings, config, historyManager.addItem);
const { proQuotaRequest, handleProQuotaChoice } = useQuotaAndFallback({
config,
historyManager,
userTier,
setAuthState,
setModelSwitchedFromQuotaError,
});
useInitializationAuthError(initializationResult.authError, onAuthError);
// Sync user tier from config when authentication changes
@@ -752,8 +740,7 @@ export const AppContainer = (props: AppContainerProps) => {
!initError &&
!isProcessing &&
(streamingState === StreamingState.Idle ||
streamingState === StreamingState.Responding) &&
!proQuotaRequest;
streamingState === StreamingState.Responding);
const [controlsHeight, setControlsHeight] = useState(0);
@@ -1206,7 +1193,6 @@ export const AppContainer = (props: AppContainerProps) => {
isAuthenticating ||
isEditorDialogOpen ||
showIdeRestartPrompt ||
!!proQuotaRequest ||
isSubagentCreateDialogOpen ||
isAgentsManagerDialogOpen ||
isApprovalModeDialogOpen ||
@@ -1277,8 +1263,6 @@ export const AppContainer = (props: AppContainerProps) => {
showWorkspaceMigrationDialog,
workspaceExtensions,
currentModel,
userTier,
proQuotaRequest,
contextFileNames,
errorCount,
availableTerminalHeight,
@@ -1367,8 +1351,6 @@ export const AppContainer = (props: AppContainerProps) => {
showAutoAcceptIndicator,
showWorkspaceMigrationDialog,
workspaceExtensions,
userTier,
proQuotaRequest,
contextFileNames,
errorCount,
availableTerminalHeight,
@@ -1430,7 +1412,6 @@ export const AppContainer = (props: AppContainerProps) => {
handleClearScreen,
onWorkspaceMigrationDialogOpen,
onWorkspaceMigrationDialogClose,
handleProQuotaChoice,
// Vision switch dialog
handleVisionSwitchSelect,
// Welcome back dialog
@@ -1468,7 +1449,6 @@ export const AppContainer = (props: AppContainerProps) => {
handleClearScreen,
onWorkspaceMigrationDialogOpen,
onWorkspaceMigrationDialogClose,
handleProQuotaChoice,
handleVisionSwitchSelect,
handleWelcomeBackSelection,
handleWelcomeBackClose,

View File

@@ -168,7 +168,7 @@ describe('AuthDialog', () => {
it('should not show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to something else', () => {
process.env['GEMINI_API_KEY'] = 'foobar';
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.LOGIN_WITH_GOOGLE;
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
const settings: LoadedSettings = new LoadedSettings(
{
@@ -212,7 +212,7 @@ describe('AuthDialog', () => {
it('should show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to use api key', () => {
process.env['GEMINI_API_KEY'] = 'foobar';
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_GEMINI;
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
const settings: LoadedSettings = new LoadedSettings(
{
@@ -504,12 +504,12 @@ describe('AuthDialog', () => {
},
{
settings: {
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
security: { auth: { selectedType: AuthType.USE_OPENAI } },
ui: { customThemes: {} },
mcpServers: {},
},
originalSettings: {
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
security: { auth: { selectedType: AuthType.USE_OPENAI } },
ui: { customThemes: {} },
mcpServers: {},
},

View File

@@ -225,16 +225,26 @@ export const useAuthCommand = (
const defaultAuthType = process.env['QWEN_DEFAULT_AUTH_TYPE'];
if (
defaultAuthType &&
![AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].includes(
defaultAuthType as AuthType,
)
![
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].includes(defaultAuthType as AuthType)
) {
onAuthError(
t(
'Invalid QWEN_DEFAULT_AUTH_TYPE value: "{{value}}". Valid values are: {{validValues}}',
{
value: defaultAuthType,
validValues: [AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].join(', '),
validValues: [
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].join(', '),
},
),
);

View File

@@ -19,7 +19,9 @@ export const compressCommand: SlashCommand = {
kind: CommandKind.BUILT_IN,
action: async (context) => {
const { ui } = context;
if (ui.pendingItem) {
const executionMode = context.executionMode ?? 'interactive';
if (executionMode === 'interactive' && ui.pendingItem) {
ui.addItem(
{
type: MessageType.ERROR,
@@ -40,13 +42,80 @@ export const compressCommand: SlashCommand = {
},
};
try {
ui.setPendingItem(pendingMessage);
const config = context.services.config;
const geminiClient = config?.getGeminiClient();
if (!config || !geminiClient) {
return {
type: 'message',
messageType: 'error',
content: t('Config not loaded.'),
};
}
const doCompress = async () => {
const promptId = `compress-${Date.now()}`;
const compressed = await context.services.config
?.getGeminiClient()
?.tryCompressChat(promptId, true);
if (compressed) {
return await geminiClient.tryCompressChat(promptId, true);
};
if (executionMode === 'acp') {
const messages = async function* () {
try {
yield {
messageType: 'info' as const,
content: 'Compressing context...',
};
const compressed = await doCompress();
if (!compressed) {
yield {
messageType: 'error' as const,
content: t('Failed to compress chat history.'),
};
return;
}
yield {
messageType: 'info' as const,
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
};
} catch (e) {
yield {
messageType: 'error' as const,
content: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
};
}
};
return { type: 'stream_messages', messages: messages() };
}
try {
if (executionMode === 'interactive') {
ui.setPendingItem(pendingMessage);
}
const compressed = await doCompress();
if (!compressed) {
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history.'),
},
Date.now(),
);
return;
}
return {
type: 'message',
messageType: 'error',
content: t('Failed to compress chat history.'),
};
}
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.COMPRESSION,
@@ -59,27 +128,39 @@ export const compressCommand: SlashCommand = {
} as HistoryItemCompression,
Date.now(),
);
} else {
return;
}
return {
type: 'message',
messageType: 'info',
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
};
} catch (e) {
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history.'),
text: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
},
Date.now(),
);
return;
}
} catch (e) {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
},
Date.now(),
);
return {
type: 'message',
messageType: 'error',
content: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
};
} finally {
ui.setPendingItem(null);
if (executionMode === 'interactive') {
ui.setPendingItem(null);
}
}
},
};

View File

@@ -15,7 +15,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original = await importOriginal<typeof core>();
return {
...original,
getOauthClient: vi.fn(original.getOauthClient),
getIdeInstaller: vi.fn(original.getIdeInstaller),
IdeClient: {
getInstance: vi.fn(),

View File

@@ -13,16 +13,6 @@ import { createMockCommandContext } from '../../test-utils/mockCommandContext.js
vi.mock('../../i18n/index.js', () => ({
setLanguageAsync: vi.fn().mockResolvedValue(undefined),
getCurrentLanguage: vi.fn().mockReturnValue('en'),
detectSystemLanguage: vi.fn().mockReturnValue('en'),
getLanguageNameFromLocale: vi.fn((locale: string) => {
const map: Record<string, string> = {
zh: 'Chinese',
en: 'English',
ru: 'Russian',
de: 'German',
};
return map[locale] || 'English';
}),
t: vi.fn((key: string) => key),
}));
@@ -71,10 +61,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
// Import modules after mocking
import * as i18n from '../../i18n/index.js';
import {
languageCommand,
initializeLlmOutputLanguage,
} from './languageCommand.js';
import { languageCommand } from './languageCommand.js';
describe('languageCommand', () => {
let mockContext: CommandContext;
@@ -199,39 +186,6 @@ describe('languageCommand', () => {
content: expect.stringContaining('Chinese'),
});
});
it('should parse Unicode LLM output language from marker', async () => {
vi.mocked(fs.existsSync).mockReturnValue(true);
vi.mocked(fs.readFileSync).mockReturnValue(
[
'# ⚠️ CRITICAL: 中文 Output Language Rule - HIGHEST PRIORITY ⚠️',
'<!-- qwen-code:llm-output-language: 中文 -->',
'',
'Some other content...',
].join('\n'),
);
vi.mocked(i18n.t).mockImplementation(
(key: string, params?: Record<string, string>) => {
if (params && key.includes('{{lang}}')) {
return key.replace('{{lang}}', params['lang'] || '');
}
return key;
},
);
if (!languageCommand.action) {
throw new Error('The language command must have an action.');
}
const result = await languageCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: expect.stringContaining('中文'),
});
});
});
describe('main command action - config not available', () => {
@@ -446,34 +400,6 @@ describe('languageCommand', () => {
});
});
it('should normalize locale code "ru" to "Russian"', async () => {
if (!languageCommand.action) {
throw new Error('The language command must have an action.');
}
await languageCommand.action(mockContext, 'output ru');
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('Russian'),
'utf-8',
);
});
it('should normalize locale code "de" to "German"', async () => {
if (!languageCommand.action) {
throw new Error('The language command must have an action.');
}
await languageCommand.action(mockContext, 'output de');
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('German'),
'utf-8',
);
});
it('should handle file write errors gracefully', async () => {
vi.mocked(fs.writeFileSync).mockImplementation(() => {
throw new Error('Permission denied');
@@ -555,8 +481,6 @@ describe('languageCommand', () => {
const nestedNames = uiSubcommand?.subCommands?.map((c) => c.name);
expect(nestedNames).toContain('zh-CN');
expect(nestedNames).toContain('en-US');
expect(nestedNames).toContain('ru-RU');
expect(nestedNames).toContain('de-DE');
});
it('should have action that sets language', async () => {
@@ -618,9 +542,16 @@ describe('languageCommand', () => {
const enUSSubcommand = uiSubcommand?.subCommands?.find(
(c) => c.name === 'en-US',
);
const deDESubcommand = uiSubcommand?.subCommands?.find(
(c) => c.name === 'de-DE',
);
it('zh-CN should have aliases', () => {
expect(zhCNSubcommand?.altNames).toContain('zh');
expect(zhCNSubcommand?.altNames).toContain('chinese');
});
it('en-US should have aliases', () => {
expect(enUSSubcommand?.altNames).toContain('en');
expect(enUSSubcommand?.altNames).toContain('english');
});
it('zh-CN action should set Chinese', async () => {
if (!zhCNSubcommand?.action) {
@@ -652,21 +583,6 @@ describe('languageCommand', () => {
});
});
it('de-DE action should set German', async () => {
if (!deDESubcommand?.action) {
throw new Error('de-DE subcommand must have an action.');
}
const result = await deDESubcommand.action(mockContext, '');
expect(i18n.setLanguageAsync).toHaveBeenCalledWith('de');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: expect.stringContaining('UI language changed'),
});
});
it('should reject extra arguments', async () => {
if (!zhCNSubcommand?.action) {
throw new Error('zh-CN subcommand must have an action.');
@@ -681,74 +597,4 @@ describe('languageCommand', () => {
});
});
});
describe('initializeLlmOutputLanguage', () => {
beforeEach(() => {
vi.clearAllMocks();
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(fs.mkdirSync).mockImplementation(() => undefined);
vi.mocked(fs.writeFileSync).mockImplementation(() => undefined);
});
it('should create file when it does not exist', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('en');
initializeLlmOutputLanguage();
expect(fs.mkdirSync).toHaveBeenCalled();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('English'),
'utf-8',
);
});
it('should NOT overwrite existing file', () => {
vi.mocked(fs.existsSync).mockReturnValue(true);
initializeLlmOutputLanguage();
expect(fs.writeFileSync).not.toHaveBeenCalled();
});
it('should detect Chinese locale and create Chinese rule file', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('zh');
initializeLlmOutputLanguage();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('Chinese'),
'utf-8',
);
});
it('should detect Russian locale and create Russian rule file', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('ru');
initializeLlmOutputLanguage();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('Russian'),
'utf-8',
);
});
it('should detect German locale and create German rule file', () => {
vi.mocked(fs.existsSync).mockReturnValue(false);
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('de');
initializeLlmOutputLanguage();
expect(fs.writeFileSync).toHaveBeenCalledWith(
expect.stringContaining('output-language.md'),
expect.stringContaining('German'),
'utf-8',
);
});
});
});

View File

@@ -1,6 +1,6 @@
/**
* @license
* Copyright 2025 Qwen team
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
@@ -15,72 +15,51 @@ import { SettingScope } from '../../config/settings.js';
import {
setLanguageAsync,
getCurrentLanguage,
detectSystemLanguage,
getLanguageNameFromLocale,
type SupportedLanguage,
t,
} from '../../i18n/index.js';
import {
SUPPORTED_LANGUAGES,
type LanguageDefinition,
} from '../../i18n/languages.js';
import * as fs from 'node:fs';
import * as path from 'node:path';
import { Storage } from '@qwen-code/qwen-code-core';
const LLM_OUTPUT_LANGUAGE_RULE_FILENAME = 'output-language.md';
const LLM_OUTPUT_LANGUAGE_MARKER_PREFIX = 'qwen-code:llm-output-language:';
function parseUiLanguageArg(input: string): SupportedLanguage | null {
const lowered = input.trim().toLowerCase();
if (!lowered) return null;
for (const lang of SUPPORTED_LANGUAGES) {
if (
lowered === lang.code ||
lowered === lang.id.toLowerCase() ||
lowered === lang.fullName.toLowerCase()
) {
return lang.code;
}
}
return null;
}
function formatUiLanguageDisplay(lang: SupportedLanguage): string {
const option = SUPPORTED_LANGUAGES.find((o) => o.code === lang);
return option ? `${option.fullName}${option.id}` : lang;
}
function sanitizeLanguageForMarker(language: string): string {
// HTML comments cannot contain "--" or end markers like "-->" or "--!>" safely.
// Also avoid newlines to keep the marker single-line and robust to parsing.
return language
.replace(/[\r\n]/g, ' ')
.replace(/--!?>/g, '')
.replace(/--/g, '');
}
/**
* Generates the LLM output language rule template based on the language name.
*/
function generateLlmOutputLanguageRule(language: string): string {
const markerLanguage = sanitizeLanguageForMarker(language);
return `# Output language preference: ${language}
<!-- ${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX} ${markerLanguage} -->
return `# ⚠️ CRITICAL: ${language} Output Language Rule - HIGHEST PRIORITY ⚠️
## Goal
Prefer responding in **${language}** for normal assistant messages and explanations.
## 🚨 MANDATORY RULE - NO EXCEPTIONS 🚨
## Keep technical artifacts unchanged
Do **not** translate or rewrite:
- Code blocks, CLI commands, file paths, stack traces, logs, JSON keys, identifiers
- Exact quoted text from the user (keep quotes verbatim)
**YOU MUST RESPOND IN ${language.toUpperCase()} FOR EVERY SINGLE OUTPUT, REGARDLESS OF THE USER'S INPUT LANGUAGE.**
## When a conflict exists
If higher-priority instructions (system/developer) require a different behavior, follow them.
This is a **NON-NEGOTIABLE** requirement. Even if the user writes in English, says "hi", asks a simple question, or explicitly requests another language, **YOU MUST ALWAYS RESPOND IN ${language.toUpperCase()}.**
## Tool / system outputs
Raw tool/system outputs may contain fixed-format English. Preserve them verbatim, and if needed, add a short **${language}** explanation below.
## What Must Be in ${language}
**EVERYTHING** you output: conversation replies, tool call descriptions, success/error messages, generated file content (comments, documentation), and all explanatory text.
**Tool outputs**: All descriptive text from \`read_file\`, \`write_file\`, \`codebase_search\`, \`run_terminal_cmd\`, \`todo_write\`, \`web_search\`, etc. MUST be in ${language}.
## Examples
### ✅ CORRECT:
- User says "hi" → Respond in ${language} (e.g., "Bonjour" if ${language} is French)
- Tool result → "已成功读取文件 config.json" (if ${language} is Chinese)
- Error → "无法找到指定的文件" (if ${language} is Chinese)
### ❌ WRONG:
- User says "hi" → "Hello" in English
- Tool result → "Successfully read file" in English
- Error → "File not found" in English
## Notes
- Code elements (variable/function names, syntax) can remain in English
- Comments, documentation, and all other text MUST be in ${language}
**THIS RULE IS ACTIVE NOW. ALL OUTPUTS MUST BE IN ${language.toUpperCase()}. NO EXCEPTIONS.**
`;
}
@@ -94,80 +73,6 @@ function getLlmOutputLanguageRulePath(): string {
);
}
/**
* Normalizes a language input to its full English name.
* If the input is a known locale code (e.g., "ru", "zh"), converts it to the full name.
* Otherwise, returns the input as-is (e.g., "Japanese" stays "Japanese").
*/
function normalizeLanguageName(language: string): string {
const lowered = language.toLowerCase();
// Check if it's a known locale code and convert to full name
const fullName = getLanguageNameFromLocale(lowered);
// If getLanguageNameFromLocale returned a different value, use it
// Otherwise, use the original input (preserves case for unknown languages)
if (fullName !== 'English' || lowered === 'en') {
return fullName;
}
return language;
}
function extractLlmOutputLanguageFromRuleFileContent(
content: string,
): string | null {
// Preferred: machine-readable marker that supports Unicode and spaces.
// Example: <!-- qwen-code:llm-output-language: 中文 -->
const markerMatch = content.match(
new RegExp(
String.raw`<!--\s*${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX}\s*(.*?)\s*-->`,
'i',
),
);
if (markerMatch?.[1]) {
const lang = markerMatch[1].trim();
if (lang) return lang;
}
// Backward compatibility: parse the heading line.
// Example: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
// Example: "# ⚠️ CRITICAL: 日本語 Output Language Rule - HIGHEST PRIORITY ⚠️"
const headingMatch = content.match(
/^#.*?CRITICAL:\s*(.*?)\s+Output Language Rule\b/im,
);
if (headingMatch?.[1]) {
const lang = headingMatch[1].trim();
if (lang) return lang;
}
return null;
}
/**
* Initializes the LLM output language rule file on first startup.
* If the file already exists, it is not overwritten (respects user preference).
*/
export function initializeLlmOutputLanguage(): void {
const filePath = getLlmOutputLanguageRulePath();
// Skip if file already exists (user preference)
if (fs.existsSync(filePath)) {
return;
}
// Detect system language and map to language name
const detectedLocale = detectSystemLanguage();
const languageName = getLanguageNameFromLocale(detectedLocale);
// Generate the rule file
const content = generateLlmOutputLanguageRule(languageName);
// Ensure directory exists
const dir = path.dirname(filePath);
fs.mkdirSync(dir, { recursive: true });
// Write file
fs.writeFileSync(filePath, content, 'utf-8');
}
/**
* Gets the current LLM output language from the rule file if it exists.
*/
@@ -176,7 +81,12 @@ function getCurrentLlmOutputLanguage(): string | null {
if (fs.existsSync(filePath)) {
try {
const content = fs.readFileSync(filePath, 'utf-8');
return extractLlmOutputLanguageFromRuleFileContent(content);
// Extract language name from the first line
// Template format: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
const match = content.match(/^#.*?(\w+)\s+Output Language Rule/i);
if (match) {
return match[1];
}
} catch {
// Ignore errors
}
@@ -217,11 +127,18 @@ async function setUiLanguage(
// Reload commands to update their descriptions with the new language
context.ui.reloadCommands();
// Map language codes to friendly display names
const langDisplayNames: Partial<Record<SupportedLanguage, string>> = {
zh: '中文zh-CN',
en: 'Englishen-US',
ru: 'Русский (ru-RU)',
};
return {
type: 'message',
messageType: 'info',
content: t('UI language changed to {{lang}}', {
lang: formatUiLanguageDisplay(lang),
lang: langDisplayNames[lang] || lang,
}),
};
}
@@ -234,9 +151,7 @@ function generateLlmOutputLanguageRuleFile(
): Promise<MessageActionReturn> {
try {
const filePath = getLlmOutputLanguageRulePath();
// Normalize locale codes (e.g., "ru" -> "Russian") to full language names
const normalizedLanguage = normalizeLanguageName(language);
const content = generateLlmOutputLanguageRule(normalizedLanguage);
const content = generateLlmOutputLanguageRule(language);
// Ensure directory exists
const dir = path.dirname(filePath);
@@ -281,6 +196,7 @@ export const languageCommand: SlashCommand = {
args: string,
): Promise<SlashCommandActionReturn> => {
const { services } = context;
if (!services.config) {
return {
type: 'message',
@@ -291,37 +207,18 @@ export const languageCommand: SlashCommand = {
const trimmedArgs = args.trim();
// Handle subcommands if called directly via action (for tests/backward compatibility)
const parts = trimmedArgs.split(/\s+/);
const firstArg = parts[0].toLowerCase();
const subArgs = parts.slice(1).join(' ');
if (firstArg === 'ui' || firstArg === 'output') {
const subCommand = languageCommand.subCommands?.find(
(s) => s.name === firstArg,
);
if (subCommand?.action) {
return subCommand.action(
context,
subArgs,
) as Promise<SlashCommandActionReturn>;
}
}
// If no arguments, show current language settings and usage
if (!trimmedArgs) {
const currentUiLang = getCurrentLanguage();
const currentLlmLang = getCurrentLlmOutputLanguage();
const message = [
t('Current UI language: {{lang}}', {
lang: formatUiLanguageDisplay(currentUiLang as SupportedLanguage),
}),
t('Current UI language: {{lang}}', { lang: currentUiLang }),
currentLlmLang
? t('Current LLM output language: {{lang}}', { lang: currentLlmLang })
: t('LLM output language not set'),
'',
t('Available subcommands:'),
` /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
` /language ui [zh-CN|en-US|ru-RU] - ${t('Set UI language')}`,
` /language output <language> - ${t('Set LLM output language')}`,
].join('\n');
@@ -332,21 +229,115 @@ export const languageCommand: SlashCommand = {
};
}
// Handle backward compatibility for /language [lang]
const targetLang = parseUiLanguageArg(trimmedArgs);
if (targetLang) {
// Parse subcommand
const parts = trimmedArgs.split(/\s+/);
const subcommand = parts[0].toLowerCase();
if (subcommand === 'ui') {
// Handle /language ui [zh-CN|en-US|ru-RU]
if (parts.length === 1) {
// Show UI language subcommand help
return {
type: 'message',
messageType: 'info',
content: [
t('Set UI language'),
'',
t('Usage: /language ui [zh-CN|en-US|ru-RU]'),
'',
t('Available options:'),
t(' - zh-CN: Simplified Chinese'),
t(' - en-US: English'),
t(' - ru-RU: Russian'),
'',
t(
'To request additional UI language packs, please open an issue on GitHub.',
),
].join('\n'),
};
}
const langArg = parts[1].toLowerCase();
let targetLang: SupportedLanguage | null = null;
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
targetLang = 'en';
} else if (
langArg === 'zh' ||
langArg === 'chinese' ||
langArg === '中文' ||
langArg === 'zh-cn'
) {
targetLang = 'zh';
} else if (
langArg === 'ru' ||
langArg === 'ru-RU' ||
langArg === 'russian' ||
langArg === 'русский'
) {
targetLang = 'ru';
} else {
return {
type: 'message',
messageType: 'error',
content: t('Invalid language. Available: en-US, zh-CN, ru-RU'),
};
}
return setUiLanguage(context, targetLang);
} else if (subcommand === 'output') {
// Handle /language output <language>
if (parts.length === 1) {
return {
type: 'message',
messageType: 'info',
content: [
t('Set LLM output language'),
'',
t('Usage: /language output <language>'),
` ${t('Example: /language output 中文')}`,
].join('\n'),
};
}
// Join all parts after "output" as the language name
const language = parts.slice(1).join(' ');
return generateLlmOutputLanguageRuleFile(language);
} else {
// Backward compatibility: treat as UI language
const langArg = trimmedArgs.toLowerCase();
let targetLang: SupportedLanguage | null = null;
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
targetLang = 'en';
} else if (
langArg === 'zh' ||
langArg === 'chinese' ||
langArg === '中文' ||
langArg === 'zh-cn'
) {
targetLang = 'zh';
} else if (
langArg === 'ru' ||
langArg === 'ru-RU' ||
langArg === 'russian' ||
langArg === 'русский'
) {
targetLang = 'ru';
} else {
return {
type: 'message',
messageType: 'error',
content: [
t('Invalid command. Available subcommands:'),
' - /language ui [zh-CN|en-US|ru-RU] - ' + t('Set UI language'),
' - /language output <language> - ' + t('Set LLM output language'),
].join('\n'),
};
}
return setUiLanguage(context, targetLang);
}
return {
type: 'message',
messageType: 'error',
content: [
t('Invalid command. Available subcommands:'),
` - /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
' - /language output <language> - ' + t('Set LLM output language'),
].join('\n'),
};
},
subCommands: [
{
@@ -367,14 +358,11 @@ export const languageCommand: SlashCommand = {
content: [
t('Set UI language'),
'',
t('Usage: /language ui [{{options}}]', {
options: SUPPORTED_LANGUAGES.map((o) => o.id).join('|'),
}),
t('Usage: /language ui [zh-CN|en-US]'),
'',
t('Available options:'),
...SUPPORTED_LANGUAGES.map(
(o) => ` - ${o.id}: ${t(o.fullName)}`,
),
t(' - zh-CN: Simplified Chinese'),
t(' - en-US: English'),
'',
t(
'To request additional UI language packs, please open an issue on GitHub.',
@@ -383,20 +371,99 @@ export const languageCommand: SlashCommand = {
};
}
const targetLang = parseUiLanguageArg(trimmedArgs);
if (!targetLang) {
const langArg = trimmedArgs.toLowerCase();
let targetLang: SupportedLanguage | null = null;
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
targetLang = 'en';
} else if (
langArg === 'zh' ||
langArg === 'chinese' ||
langArg === '中文' ||
langArg === 'zh-cn'
) {
targetLang = 'zh';
} else {
return {
type: 'message',
messageType: 'error',
content: t('Invalid language. Available: {{options}}', {
options: SUPPORTED_LANGUAGES.map((o) => o.id).join(','),
}),
content: t('Invalid language. Available: en-US, zh-CN'),
};
}
return setUiLanguage(context, targetLang);
},
subCommands: SUPPORTED_LANGUAGES.map(createUiLanguageSubCommand),
subCommands: [
{
name: 'zh-CN',
altNames: ['zh', 'chinese', '中文'],
get description() {
return t('Set UI language to Simplified Chinese (zh-CN)');
},
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<MessageActionReturn> => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, 'zh');
},
},
{
name: 'en-US',
altNames: ['en', 'english'],
get description() {
return t('Set UI language to English (en-US)');
},
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<MessageActionReturn> => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, 'en');
},
},
{
name: 'ru-RU',
altNames: ['ru', 'russian', 'русский'],
get description() {
return t('Set UI language to Russian (ru-RU)');
},
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<MessageActionReturn> => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, 'ru');
},
},
],
},
{
name: 'output',
@@ -429,28 +496,3 @@ export const languageCommand: SlashCommand = {
},
],
};
/**
* Helper to create a UI language subcommand.
*/
function createUiLanguageSubCommand(option: LanguageDefinition): SlashCommand {
return {
name: option.id,
get description() {
return t('Set UI language to {{name}}', { name: option.fullName });
},
kind: CommandKind.BUILT_IN,
action: async (context, args) => {
if (args.trim().length > 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'Language subcommands do not accept additional arguments.',
),
};
}
return setUiLanguage(context, option.code);
},
};
}

View File

@@ -26,6 +26,8 @@ export const summaryCommand: SlashCommand = {
action: async (context): Promise<SlashCommandActionReturn> => {
const { config } = context.services;
const { ui } = context;
const executionMode = context.executionMode ?? 'interactive';
if (!config) {
return {
type: 'message',
@@ -43,8 +45,8 @@ export const summaryCommand: SlashCommand = {
};
}
// Check if already generating summary
if (ui.pendingItem) {
// Check if already generating summary (interactive UI only)
if (executionMode === 'interactive' && ui.pendingItem) {
ui.addItem(
{
type: 'error' as const,
@@ -63,29 +65,22 @@ export const summaryCommand: SlashCommand = {
};
}
try {
// Get the current chat history
const getChatHistory = () => {
const chat = geminiClient.getChat();
const history = chat.getHistory();
return chat.getHistory();
};
const validateChatHistory = (
history: ReturnType<typeof getChatHistory>,
) => {
if (history.length <= 2) {
return {
type: 'message',
messageType: 'info',
content: t('No conversation found to summarize.'),
};
throw new Error(t('No conversation found to summarize.'));
}
};
// Show loading state
const pendingMessage: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: true,
stage: 'generating',
},
};
ui.setPendingItem(pendingMessage);
const generateSummaryMarkdown = async (
history: ReturnType<typeof getChatHistory>,
): Promise<string> => {
// Build the conversation context for summary generation
const conversationContext = history.map((message) => ({
role: message.role,
@@ -121,19 +116,21 @@ export const summaryCommand: SlashCommand = {
if (!markdownSummary) {
throw new Error(
'Failed to generate summary - no text content received from LLM response',
t(
'Failed to generate summary - no text content received from LLM response',
),
);
}
// Update loading message to show saving progress
ui.setPendingItem({
type: 'summary',
summary: {
isPending: true,
stage: 'saving',
},
});
return markdownSummary;
};
const saveSummaryToDisk = async (
markdownSummary: string,
): Promise<{
filePathForDisplay: string;
fullPath: string;
}> => {
// Ensure .qwen directory exists
const projectRoot = config.getProjectRoot();
const qwenDir = path.join(projectRoot, '.qwen');
@@ -155,45 +152,163 @@ export const summaryCommand: SlashCommand = {
await fsPromises.writeFile(summaryPath, summaryContent, 'utf8');
// Clear pending item and show success message
return {
filePathForDisplay: '.qwen/PROJECT_SUMMARY.md',
fullPath: summaryPath,
};
};
const emitInteractivePending = (stage: 'generating' | 'saving') => {
if (executionMode !== 'interactive') {
return;
}
const pendingMessage: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: true,
stage,
},
};
ui.setPendingItem(pendingMessage);
};
const completeInteractive = (filePathForDisplay: string) => {
if (executionMode !== 'interactive') {
return;
}
ui.setPendingItem(null);
const completedSummaryItem: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: false,
stage: 'completed',
filePath: '.qwen/PROJECT_SUMMARY.md',
filePath: filePathForDisplay,
},
};
ui.addItem(completedSummaryItem, Date.now());
};
return {
type: 'message',
messageType: 'info',
content: '', // Empty content since we show the message in UI component
};
} catch (error) {
// Clear pending item on error
const formatErrorMessage = (error: unknown): string =>
t('Failed to generate project context summary: {{error}}', {
error: error instanceof Error ? error.message : String(error),
});
const failInteractive = (error: unknown) => {
if (executionMode !== 'interactive') {
return;
}
ui.setPendingItem(null);
ui.addItem(
{
type: 'error' as const,
text: `${t(
'Failed to generate project context summary: {{error}}',
{
error: error instanceof Error ? error.message : String(error),
},
)}`,
text: `${formatErrorMessage(error)}`,
},
Date.now(),
);
};
const formatSuccessMessage = (filePathForDisplay: string): string =>
t('Saved project summary to {{filePathForDisplay}}.', {
filePathForDisplay,
});
const returnNoConversationMessage = (): SlashCommandActionReturn => {
const msg = t('No conversation found to summarize.');
if (executionMode === 'acp') {
const messages = async function* () {
yield {
messageType: 'info' as const,
content: msg,
};
};
return {
type: 'stream_messages',
messages: messages(),
};
}
return {
type: 'message',
messageType: 'info',
content: msg,
};
};
const executeSummaryGeneration = async (
history: ReturnType<typeof getChatHistory>,
): Promise<{
markdownSummary: string;
filePathForDisplay: string;
}> => {
emitInteractivePending('generating');
const markdownSummary = await generateSummaryMarkdown(history);
emitInteractivePending('saving');
const { filePathForDisplay } = await saveSummaryToDisk(markdownSummary);
completeInteractive(filePathForDisplay);
return { markdownSummary, filePathForDisplay };
};
// Validate chat history once at the beginning
const history = getChatHistory();
try {
validateChatHistory(history);
} catch (_error) {
return returnNoConversationMessage();
}
if (executionMode === 'acp') {
const messages = async function* () {
try {
yield {
messageType: 'info' as const,
content: t('Generating project summary...'),
};
const { filePathForDisplay } =
await executeSummaryGeneration(history);
yield {
messageType: 'info' as const,
content: formatSuccessMessage(filePathForDisplay),
};
} catch (error) {
failInteractive(error);
yield {
messageType: 'error' as const,
content: formatErrorMessage(error),
};
}
};
return {
type: 'stream_messages',
messages: messages(),
};
}
try {
const { filePathForDisplay } = await executeSummaryGeneration(history);
if (executionMode === 'non_interactive') {
return {
type: 'message',
messageType: 'info',
content: formatSuccessMessage(filePathForDisplay),
};
}
// Interactive mode: UI components already display progress and completion.
return {
type: 'message',
messageType: 'info',
content: '',
};
} catch (error) {
failInteractive(error);
return {
type: 'message',
messageType: 'error',
content: t('Failed to generate project context summary: {{error}}', {
error: error instanceof Error ? error.message : String(error),
}),
content: formatErrorMessage(error),
};
}
},

View File

@@ -22,6 +22,14 @@ import type {
// Grouped dependencies for clarity and easier mocking
export interface CommandContext {
/**
* Execution mode for the current invocation.
*
* - interactive: React/Ink UI mode
* - non_interactive: non-interactive CLI mode (text/json)
* - acp: ACP/Zed integration mode
*/
executionMode?: 'interactive' | 'non_interactive' | 'acp';
// Invocation properties for when commands are called.
invocation?: {
/** The raw, untrimmed input string from the user. */
@@ -108,6 +116,19 @@ export interface MessageActionReturn {
content: string;
}
/**
* The return type for a command action that streams multiple messages.
* Used for long-running operations that need to send progress updates.
*/
export interface StreamMessagesActionReturn {
type: 'stream_messages';
messages: AsyncGenerator<
{ messageType: 'info' | 'error'; content: string },
void,
unknown
>;
}
/**
* The return type for a command action that needs to open a dialog.
*/
@@ -174,6 +195,7 @@ export interface ConfirmActionReturn {
export type SlashCommandActionReturn =
| ToolActionReturn
| MessageActionReturn
| StreamMessagesActionReturn
| QuitActionReturn
| OpenDialogActionReturn
| LoadHistoryActionReturn

View File

@@ -17,7 +17,6 @@ import { AuthDialog } from '../auth/AuthDialog.js';
import { OpenAIKeyPrompt } from './OpenAIKeyPrompt.js';
import { EditorSettingsDialog } from './EditorSettingsDialog.js';
import { WorkspaceMigrationDialog } from './WorkspaceMigrationDialog.js';
import { ProQuotaDialog } from './ProQuotaDialog.js';
import { PermissionsModifyTrustDialog } from './PermissionsModifyTrustDialog.js';
import { ModelDialog } from './ModelDialog.js';
import { ApprovalModeDialog } from './ApprovalModeDialog.js';
@@ -87,15 +86,6 @@ export const DialogManager = ({
/>
);
}
if (uiState.proQuotaRequest) {
return (
<ProQuotaDialog
failedModel={uiState.proQuotaRequest.failedModel}
fallbackModel={uiState.proQuotaRequest.fallbackModel}
onChoice={uiActions.handleProQuotaChoice}
/>
);
}
if (uiState.shouldShowIdePrompt) {
return (
<IdeIntegrationNudge

View File

@@ -1,91 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { render } from 'ink-testing-library';
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { ProQuotaDialog } from './ProQuotaDialog.js';
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
// Mock the child component to make it easier to test the parent
vi.mock('./shared/RadioButtonSelect.js', () => ({
RadioButtonSelect: vi.fn(),
}));
describe('ProQuotaDialog', () => {
beforeEach(() => {
vi.clearAllMocks();
});
it('should render with correct title and options', () => {
const { lastFrame } = render(
<ProQuotaDialog
failedModel="gemini-2.5-pro"
fallbackModel="gemini-2.5-flash"
onChoice={() => {}}
/>,
);
const output = lastFrame();
expect(output).toContain('Pro quota limit reached for gemini-2.5-pro.');
// Check that RadioButtonSelect was called with the correct items
expect(RadioButtonSelect).toHaveBeenCalledWith(
expect.objectContaining({
items: [
{
label: 'Change auth (executes the /auth command)',
value: 'auth',
key: 'auth',
},
{
label: `Continue with gemini-2.5-flash`,
value: 'continue',
key: 'continue',
},
],
}),
undefined,
);
});
it('should call onChoice with "auth" when "Change auth" is selected', () => {
const mockOnChoice = vi.fn();
render(
<ProQuotaDialog
failedModel="gemini-2.5-pro"
fallbackModel="gemini-2.5-flash"
onChoice={mockOnChoice}
/>,
);
// Get the onSelect function passed to RadioButtonSelect
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
// Simulate the selection
onSelect('auth');
expect(mockOnChoice).toHaveBeenCalledWith('auth');
});
it('should call onChoice with "continue" when "Continue with flash" is selected', () => {
const mockOnChoice = vi.fn();
render(
<ProQuotaDialog
failedModel="gemini-2.5-pro"
fallbackModel="gemini-2.5-flash"
onChoice={mockOnChoice}
/>,
);
// Get the onSelect function passed to RadioButtonSelect
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
// Simulate the selection
onSelect('continue');
expect(mockOnChoice).toHaveBeenCalledWith('continue');
});
});

View File

@@ -1,55 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type React from 'react';
import { Box, Text } from 'ink';
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
import { theme } from '../semantic-colors.js';
import { t } from '../../i18n/index.js';
interface ProQuotaDialogProps {
failedModel: string;
fallbackModel: string;
onChoice: (choice: 'auth' | 'continue') => void;
}
export function ProQuotaDialog({
failedModel,
fallbackModel,
onChoice,
}: ProQuotaDialogProps): React.JSX.Element {
const items = [
{
label: t('Change auth (executes the /auth command)'),
value: 'auth' as const,
key: 'auth',
},
{
label: t('Continue with {{model}}', { model: fallbackModel }),
value: 'continue' as const,
key: 'continue',
},
];
const handleSelect = (choice: 'auth' | 'continue') => {
onChoice(choice);
};
return (
<Box borderStyle="round" flexDirection="column" paddingX={1}>
<Text bold color={theme.status.warning}>
{t('Pro quota limit reached for {{model}}.', { model: failedModel })}
</Text>
<Box marginTop={1}>
<RadioButtonSelect
items={items}
initialIndex={1}
onSelect={handleSelect}
/>
</Box>
</Box>
);
}

View File

@@ -55,7 +55,6 @@ export interface UIActions {
handleClearScreen: () => void;
onWorkspaceMigrationDialogOpen: () => void;
onWorkspaceMigrationDialogClose: () => void;
handleProQuotaChoice: (choice: 'auth' | 'continue') => void;
// Vision switch dialog
handleVisionSwitchSelect: (outcome: VisionSwitchOutcome) => void;
// Welcome back dialog

View File

@@ -22,21 +22,13 @@ import type {
AuthType,
IdeContext,
ApprovalMode,
UserTierId,
IdeInfo,
FallbackIntent,
} from '@qwen-code/qwen-code-core';
import type { DOMElement } from 'ink';
import type { SessionStatsState } from '../contexts/SessionContext.js';
import type { ExtensionUpdateState } from '../state/extensions.js';
import type { UpdateObject } from '../utils/updateCheck.js';
export interface ProQuotaDialogRequest {
failedModel: string;
fallbackModel: string;
resolve: (intent: FallbackIntent) => void;
}
import { type UseHistoryManagerReturn } from '../hooks/useHistoryManager.js';
import { type RestartReason } from '../hooks/useIdeTrustListener.js';
@@ -99,8 +91,6 @@ export interface UIState {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
workspaceExtensions: any[]; // Extension[]
// Quota-related state
userTier: UserTierId | undefined;
proQuotaRequest: ProQuotaDialogRequest | null;
currentModel: string;
contextFileNames: string[];
errorCount: number;

View File

@@ -520,6 +520,13 @@ export const useSlashCommandProcessor = (
true,
);
}
case 'stream_messages': {
// stream_messages is only used in ACP/Zed integration mode
// and should not be returned in interactive UI mode
throw new Error(
'stream_messages result type is not supported in interactive mode',
);
}
default: {
const unhandled: never = result;
throw new Error(

View File

@@ -1323,7 +1323,7 @@ describe('useGeminiStream', () => {
it('should call parseAndFormatApiError with the correct authType on stream initialization failure', async () => {
// 1. Setup
const mockError = new Error('Rate limit exceeded');
const mockAuthType = AuthType.LOGIN_WITH_GOOGLE;
const mockAuthType = AuthType.USE_VERTEX_AI;
mockParseAndFormatApiError.mockClear();
mockSendMessageStream.mockReturnValue(
(async function* () {
@@ -1374,9 +1374,6 @@ describe('useGeminiStream', () => {
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
'Rate limit exceeded',
mockAuthType,
undefined,
'gemini-2.5-pro',
'gemini-2.5-flash',
);
});
});
@@ -2493,9 +2490,6 @@ describe('useGeminiStream', () => {
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
{ message: 'Test error' },
expect.any(String),
undefined,
'gemini-2.5-pro',
'gemini-2.5-flash',
);
});
});

View File

@@ -26,7 +26,6 @@ import {
GitService,
UnauthorizedError,
UserPromptEvent,
DEFAULT_GEMINI_FLASH_MODEL,
logConversationFinishedEvent,
ConversationFinishedEvent,
ApprovalMode,
@@ -527,10 +526,15 @@ export const useGeminiStream = (
return currentThoughtBuffer;
}
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
const pendingType = pendingHistoryItemRef.current?.type;
const isPendingThought =
pendingType === 'gemini_thought' ||
pendingType === 'gemini_thought_content';
// If we're not already showing a thought, start a new one
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
if (!isPendingThought) {
// If there's a pending non-thought item, finalize it first
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
@@ -538,11 +542,37 @@ export const useGeminiStream = (
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
}
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: 'gemini_thought',
text: newThoughtBuffer,
});
// Split large thought messages for better rendering performance (same rationale
// as regular content streaming). This helps avoid terminal flicker caused by
// constantly re-rendering an ever-growing "pending" block.
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
isPendingThought && pendingType === 'gemini_thought_content'
? 'gemini_thought_content'
: 'gemini_thought';
if (splitPoint === newThoughtBuffer.length) {
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: nextPendingType,
text: newThoughtBuffer,
});
} else {
const beforeText = newThoughtBuffer.substring(0, splitPoint);
const afterText = newThoughtBuffer.substring(splitPoint);
addItem(
{
type: nextPendingType,
text: beforeText,
},
userMessageTimestamp,
);
setPendingHistoryItem({
type: 'gemini_thought_content',
text: afterText,
});
newThoughtBuffer = afterText;
}
// Also update the thought state for the loading indicator
mergeThought(eventValue);
@@ -600,9 +630,6 @@ export const useGeminiStream = (
text: parseAndFormatApiError(
eventValue.error,
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
userMessageTimestamp,
@@ -654,6 +681,9 @@ export const useGeminiStream = (
'Response stopped due to image safety violations.',
[FinishReason.UNEXPECTED_TOOL_CALL]:
'Response stopped due to unexpected tool call.',
[FinishReason.IMAGE_PROHIBITED_CONTENT]:
'Response stopped due to image prohibited content.',
[FinishReason.NO_IMAGE]: 'Response stopped due to no image.',
};
const message = finishReasonMessages[finishReason];
@@ -770,11 +800,17 @@ export const useGeminiStream = (
for await (const event of stream) {
switch (event.type) {
case ServerGeminiEventType.Thought:
thoughtBuffer = handleThoughtEvent(
event.value,
thoughtBuffer,
userMessageTimestamp,
);
// If the thought has a subject, it's a discrete status update rather than
// a streamed textual thought, so we update the thought state directly.
if (event.value.subject) {
setThought(event.value);
} else {
thoughtBuffer = handleThoughtEvent(
event.value,
thoughtBuffer,
userMessageTimestamp,
);
}
break;
case ServerGeminiEventType.Content:
geminiMessageBuffer = handleContentEvent(
@@ -845,6 +881,7 @@ export const useGeminiStream = (
handleMaxSessionTurnsEvent,
handleSessionTokenLimitExceededEvent,
handleCitationEvent,
setThought,
],
);
@@ -987,9 +1024,6 @@ export const useGeminiStream = (
text: parseAndFormatApiError(
getErrorMessage(error) || 'Unknown error',
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
userMessageTimestamp,

View File

@@ -8,22 +8,19 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import { useLoadingIndicator } from './useLoadingIndicator.js';
import { StreamingState } from '../types.js';
import { PHRASE_CHANGE_INTERVAL_MS } from './usePhraseCycler.js';
import * as i18n from '../../i18n/index.js';
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
import {
WITTY_LOADING_PHRASES,
PHRASE_CHANGE_INTERVAL_MS,
} from './usePhraseCycler.js';
describe('useLoadingIndicator', () => {
beforeEach(() => {
vi.useFakeTimers();
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
vi.spyOn(i18n, 't').mockImplementation((key) => key);
});
afterEach(() => {
vi.useRealTimers(); // Restore real timers after each test
act(() => vi.runOnlyPendingTimers);
vi.restoreAllMocks();
});
it('should initialize with default values when Idle', () => {
@@ -31,7 +28,9 @@ describe('useLoadingIndicator', () => {
useLoadingIndicator(StreamingState.Idle),
);
expect(result.current.elapsedTime).toBe(0);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
});
it('should reflect values when Responding', async () => {
@@ -41,14 +40,18 @@ describe('useLoadingIndicator', () => {
// Initial state before timers advance
expect(result.current.elapsedTime).toBe(0);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
await act(async () => {
await vi.advanceTimersByTimeAsync(PHRASE_CHANGE_INTERVAL_MS + 1);
});
// Phrase should cycle if PHRASE_CHANGE_INTERVAL_MS has passed
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
});
it('should show waiting phrase and retain elapsedTime when WaitingForConfirmation', async () => {
@@ -101,7 +104,9 @@ describe('useLoadingIndicator', () => {
rerender({ streamingState: StreamingState.Responding });
});
expect(result.current.elapsedTime).toBe(0); // Should reset
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
await act(async () => {
await vi.advanceTimersByTimeAsync(1000);
@@ -125,7 +130,9 @@ describe('useLoadingIndicator', () => {
});
expect(result.current.elapsedTime).toBe(0);
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
expect(WITTY_LOADING_PHRASES).toContain(
result.current.currentLoadingPhrase,
);
// Timer should not advance
await act(async () => {

View File

@@ -8,17 +8,13 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import {
usePhraseCycler,
WITTY_LOADING_PHRASES,
PHRASE_CHANGE_INTERVAL_MS,
} from './usePhraseCycler.js';
import * as i18n from '../../i18n/index.js';
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
describe('usePhraseCycler', () => {
beforeEach(() => {
vi.useFakeTimers();
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
vi.spyOn(i18n, 't').mockImplementation((key) => key);
});
afterEach(() => {
@@ -27,7 +23,7 @@ describe('usePhraseCycler', () => {
it('should initialize with a witty phrase when not active and not waiting', () => {
const { result } = renderHook(() => usePhraseCycler(false, false));
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
});
it('should show "Waiting for user confirmation..." when isWaiting is true', () => {
@@ -51,30 +47,35 @@ describe('usePhraseCycler', () => {
it('should cycle through witty phrases when isActive is true and not waiting', () => {
const { result } = renderHook(() => usePhraseCycler(true, false));
// Initial phrase should be one of the witty phrases
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
const _initialPhrase = result.current;
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
// Phrase should change and be one of the witty phrases
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
const _secondPhrase = result.current;
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
});
it('should reset to a witty phrase when isActive becomes true after being false (and not waiting)', () => {
// Ensure there are at least two phrases for this test to be meaningful.
if (WITTY_LOADING_PHRASES.length < 2) {
return;
}
// Mock Math.random to make the test deterministic.
let callCount = 0;
vi.spyOn(Math, 'random').mockImplementation(() => {
// Cycle through 0, 1, 0, 1, ...
const val = callCount % 2;
callCount++;
return val / MOCK_WITTY_PHRASES.length;
return val / WITTY_LOADING_PHRASES.length;
});
const { result, rerender } = renderHook(
@@ -85,9 +86,9 @@ describe('usePhraseCycler', () => {
// Activate
rerender({ isActive: true, isWaiting: false });
const firstActivePhrase = result.current;
expect(MOCK_WITTY_PHRASES).toContain(firstActivePhrase);
expect(WITTY_LOADING_PHRASES).toContain(firstActivePhrase);
// With our mock, this should be the first phrase.
expect(firstActivePhrase).toBe(MOCK_WITTY_PHRASES[0]);
expect(firstActivePhrase).toBe(WITTY_LOADING_PHRASES[0]);
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
@@ -95,18 +96,18 @@ describe('usePhraseCycler', () => {
// Phrase should change to the second phrase.
expect(result.current).not.toBe(firstActivePhrase);
expect(result.current).toBe(MOCK_WITTY_PHRASES[1]);
expect(result.current).toBe(WITTY_LOADING_PHRASES[1]);
// Set to inactive - should reset to the default initial phrase
rerender({ isActive: false, isWaiting: false });
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
// Set back to active - should pick a random witty phrase (which our mock controls)
act(() => {
rerender({ isActive: true, isWaiting: false });
});
// The random mock will now return 0, so it should be the first phrase again.
expect(result.current).toBe(MOCK_WITTY_PHRASES[0]);
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
});
it('should clear phrase interval on unmount when active', () => {
@@ -147,7 +148,7 @@ describe('usePhraseCycler', () => {
rerender({ isActive: true, isWaiting: false, customPhrases: undefined });
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
});
it('should fall back to witty phrases if custom phrases are an empty array', () => {
@@ -163,7 +164,7 @@ describe('usePhraseCycler', () => {
},
);
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
});
it('should reset to a witty phrase when transitioning from waiting to active', () => {
@@ -173,13 +174,16 @@ describe('usePhraseCycler', () => {
);
const _initialPhrase = result.current;
expect(MOCK_WITTY_PHRASES).toContain(_initialPhrase);
expect(WITTY_LOADING_PHRASES).toContain(_initialPhrase);
// Cycle to a different phrase (potentially)
act(() => {
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
});
expect(MOCK_WITTY_PHRASES).toContain(result.current);
if (WITTY_LOADING_PHRASES.length > 1) {
// This check is probabilistic with random selection
}
expect(WITTY_LOADING_PHRASES).toContain(result.current);
// Go to waiting state
rerender({ isActive: false, isWaiting: true });
@@ -187,6 +191,6 @@ describe('usePhraseCycler', () => {
// Go back to active cycling - should pick a random witty phrase
rerender({ isActive: true, isWaiting: false });
expect(MOCK_WITTY_PHRASES).toContain(result.current);
expect(WITTY_LOADING_PHRASES).toContain(result.current);
});
});

View File

@@ -5,9 +5,139 @@
*/
import { useState, useEffect, useRef, useMemo } from 'react';
import { t, ta } from '../../i18n/index.js';
import { t } from '../../i18n/index.js';
export const WITTY_LOADING_PHRASES: string[] = ["I'm Feeling Lucky"];
export const WITTY_LOADING_PHRASES = [
"I'm Feeling Lucky",
'Shipping awesomeness... ',
'Painting the serifs back on...',
'Navigating the slime mold...',
'Consulting the digital spirits...',
'Reticulating splines...',
'Warming up the AI hamsters...',
'Asking the magic conch shell...',
'Generating witty retort...',
'Polishing the algorithms...',
"Don't rush perfection (or my code)...",
'Brewing fresh bytes...',
'Counting electrons...',
'Engaging cognitive processors...',
'Checking for syntax errors in the universe...',
'One moment, optimizing humor...',
'Shuffling punchlines...',
'Untangling neural nets...',
'Compiling brilliance...',
'Loading wit.exe...',
'Summoning the cloud of wisdom...',
'Preparing a witty response...',
"Just a sec, I'm debugging reality...",
'Confuzzling the options...',
'Tuning the cosmic frequencies...',
'Crafting a response worthy of your patience...',
'Compiling the 1s and 0s...',
'Resolving dependencies... and existential crises...',
'Defragmenting memories... both RAM and personal...',
'Rebooting the humor module...',
'Caching the essentials (mostly cat memes)...',
'Optimizing for ludicrous speed',
"Swapping bits... don't tell the bytes...",
'Garbage collecting... be right back...',
'Assembling the interwebs...',
'Converting coffee into code...',
'Updating the syntax for reality...',
'Rewiring the synapses...',
'Looking for a misplaced semicolon...',
"Greasin' the cogs of the machine...",
'Pre-heating the servers...',
'Calibrating the flux capacitor...',
'Engaging the improbability drive...',
'Channeling the Force...',
'Aligning the stars for optimal response...',
'So say we all...',
'Loading the next great idea...',
"Just a moment, I'm in the zone...",
'Preparing to dazzle you with brilliance...',
"Just a tick, I'm polishing my wit...",
"Hold tight, I'm crafting a masterpiece...",
"Just a jiffy, I'm debugging the universe...",
"Just a moment, I'm aligning the pixels...",
"Just a sec, I'm optimizing the humor...",
"Just a moment, I'm tuning the algorithms...",
'Warp speed engaged...',
'Mining for more Dilithium crystals...',
"Don't panic...",
'Following the white rabbit...',
'The truth is in here... somewhere...',
'Blowing on the cartridge...',
'Loading... Do a barrel roll!',
'Waiting for the respawn...',
'Finishing the Kessel Run in less than 12 parsecs...',
"The cake is not a lie, it's just still loading...",
'Fiddling with the character creation screen...',
"Just a moment, I'm finding the right meme...",
"Pressing 'A' to continue...",
'Herding digital cats...',
'Polishing the pixels...',
'Finding a suitable loading screen pun...',
'Distracting you with this witty phrase...',
'Almost there... probably...',
'Our hamsters are working as fast as they can...',
'Giving Cloudy a pat on the head...',
'Petting the cat...',
'Rickrolling my boss...',
'Never gonna give you up, never gonna let you down...',
'Slapping the bass...',
'Tasting the snozberries...',
"I'm going the distance, I'm going for speed...",
'Is this the real life? Is this just fantasy?...',
"I've got a good feeling about this...",
'Poking the bear...',
'Doing research on the latest memes...',
'Figuring out how to make this more witty...',
'Hmmm... let me think...',
'What do you call a fish with no eyes? A fsh...',
'Why did the computer go to therapy? It had too many bytes...',
"Why don't programmers like nature? It has too many bugs...",
'Why do programmers prefer dark mode? Because light attracts bugs...',
'Why did the developer go broke? Because they used up all their cache...',
"What can you do with a broken pencil? Nothing, it's pointless...",
'Applying percussive maintenance...',
'Searching for the correct USB orientation...',
'Ensuring the magic smoke stays inside the wires...',
'Rewriting in Rust for no particular reason...',
'Trying to exit Vim...',
'Spinning up the hamster wheel...',
"That's not a bug, it's an undocumented feature...",
'Engage.',
"I'll be back... with an answer.",
'My other process is a TARDIS...',
'Communing with the machine spirit...',
'Letting the thoughts marinate...',
'Just remembered where I put my keys...',
'Pondering the orb...',
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
'Initiating thoughtful gaze...',
"What's a computer's favorite snack? Microchips.",
"Why do Java developers wear glasses? Because they don't C#.",
'Charging the laser... pew pew!',
'Dividing by zero... just kidding!',
'Looking for an adult superviso... I mean, processing.',
'Making it go beep boop.',
'Buffering... because even AIs need a moment.',
'Entangling quantum particles for a faster response...',
'Polishing the chrome... on the algorithms.',
'Are you not entertained? (Working on it!)',
'Summoning the code gremlins... to help, of course.',
'Just waiting for the dial-up tone to finish...',
'Recalibrating the humor-o-meter.',
'My other loading screen is even funnier.',
"Pretty sure there's a cat walking on the keyboard somewhere...",
'Enhancing... Enhancing... Still loading.',
"It's not a bug, it's a feature... of this loading screen.",
'Have you tried turning it off and on again? (The loading screen, not me.)',
'Constructing additional pylons...',
'New line? Thats Ctrl+J.',
];
export const PHRASE_CHANGE_INTERVAL_MS = 15000;
@@ -22,16 +152,14 @@ export const usePhraseCycler = (
isWaiting: boolean,
customPhrases?: string[],
) => {
// Get phrases from translations if available
const loadingPhrases = useMemo(() => {
if (customPhrases && customPhrases.length > 0) {
return customPhrases;
}
const translatedPhrases = ta('WITTY_LOADING_PHRASES');
return translatedPhrases.length > 0
? translatedPhrases
: WITTY_LOADING_PHRASES;
}, [customPhrases]);
// Translate all phrases at once if using default phrases
const loadingPhrases = useMemo(
() =>
customPhrases && customPhrases.length > 0
? customPhrases
: WITTY_LOADING_PHRASES.map((phrase) => t(phrase)),
[customPhrases],
);
const [currentLoadingPhrase, setCurrentLoadingPhrase] = useState(
loadingPhrases[0],

View File

@@ -1,391 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
vi,
describe,
it,
expect,
beforeEach,
afterEach,
type Mock,
} from 'vitest';
import { act, renderHook } from '@testing-library/react';
import {
type Config,
type FallbackModelHandler,
UserTierId,
AuthType,
isGenericQuotaExceededError,
isProQuotaExceededError,
makeFakeConfig,
} from '@qwen-code/qwen-code-core';
import { useQuotaAndFallback } from './useQuotaAndFallback.js';
import type { UseHistoryManagerReturn } from './useHistoryManager.js';
import { AuthState, MessageType } from '../types.js';
// Mock the error checking functions from the core package to control test scenarios
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original =
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
return {
...original,
isGenericQuotaExceededError: vi.fn(),
isProQuotaExceededError: vi.fn(),
};
});
// Use a type alias for SpyInstance as it's not directly exported
type SpyInstance = ReturnType<typeof vi.spyOn>;
describe('useQuotaAndFallback', () => {
let mockConfig: Config;
let mockHistoryManager: UseHistoryManagerReturn;
let mockSetAuthState: Mock;
let mockSetModelSwitchedFromQuotaError: Mock;
let setFallbackHandlerSpy: SpyInstance;
const mockedIsGenericQuotaExceededError = isGenericQuotaExceededError as Mock;
const mockedIsProQuotaExceededError = isProQuotaExceededError as Mock;
beforeEach(() => {
mockConfig = makeFakeConfig();
// Spy on the method that requires the private field and mock its return.
// This is cleaner than modifying the config class for tests.
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
model: 'test-model',
authType: AuthType.LOGIN_WITH_GOOGLE,
});
mockHistoryManager = {
addItem: vi.fn(),
history: [],
updateItem: vi.fn(),
clearItems: vi.fn(),
loadHistory: vi.fn(),
};
mockSetAuthState = vi.fn();
mockSetModelSwitchedFromQuotaError = vi.fn();
setFallbackHandlerSpy = vi.spyOn(mockConfig, 'setFallbackModelHandler');
vi.spyOn(mockConfig, 'setQuotaErrorOccurred');
mockedIsGenericQuotaExceededError.mockReturnValue(false);
mockedIsProQuotaExceededError.mockReturnValue(false);
});
afterEach(() => {
vi.clearAllMocks();
});
it('should register a fallback handler on initialization', () => {
renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
expect(setFallbackHandlerSpy).toHaveBeenCalledTimes(1);
expect(setFallbackHandlerSpy.mock.calls[0][0]).toBeInstanceOf(Function);
});
describe('Fallback Handler Logic', () => {
// Helper function to render the hook and extract the registered handler
const getRegisteredHandler = (
userTier: UserTierId = UserTierId.FREE,
): FallbackModelHandler => {
renderHook(
(props) =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: props.userTier,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
{ initialProps: { userTier } },
);
return setFallbackHandlerSpy.mock.calls[0][0] as FallbackModelHandler;
};
it('should return null and take no action if already in fallback mode', async () => {
vi.spyOn(mockConfig, 'isInFallbackMode').mockReturnValue(true);
const handler = getRegisteredHandler();
const result = await handler('gemini-pro', 'gemini-flash', new Error());
expect(result).toBeNull();
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
});
it('should return null and take no action if authType is not LOGIN_WITH_GOOGLE', async () => {
// Override the default mock from beforeEach for this specific test
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
model: 'test-model',
authType: AuthType.USE_GEMINI,
});
const handler = getRegisteredHandler();
const result = await handler('gemini-pro', 'gemini-flash', new Error());
expect(result).toBeNull();
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
});
describe('Automatic Fallback Scenarios', () => {
const testCases = [
{
errorType: 'generic',
tier: UserTierId.FREE,
expectedMessageSnippets: [
'Automatically switching from model-A to model-B',
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
],
},
{
errorType: 'generic',
tier: UserTierId.STANDARD, // Paid tier
expectedMessageSnippets: [
'Automatically switching from model-A to model-B',
'switch to using a paid API key from AI Studio',
],
},
{
errorType: 'other',
tier: UserTierId.FREE,
expectedMessageSnippets: [
'Automatically switching from model-A to model-B for faster responses',
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
],
},
{
errorType: 'other',
tier: UserTierId.LEGACY, // Paid tier
expectedMessageSnippets: [
'Automatically switching from model-A to model-B for faster responses',
'switch to using a paid API key from AI Studio',
],
},
];
for (const { errorType, tier, expectedMessageSnippets } of testCases) {
it(`should handle ${errorType} error for ${tier} tier correctly`, async () => {
mockedIsGenericQuotaExceededError.mockReturnValue(
errorType === 'generic',
);
const handler = getRegisteredHandler(tier);
const result = await handler(
'model-A',
'model-B',
new Error('quota exceeded'),
);
// Automatic fallbacks should return 'stop'
expect(result).toBe('stop');
expect(mockHistoryManager.addItem).toHaveBeenCalledWith(
expect.objectContaining({ type: MessageType.INFO }),
expect.any(Number),
);
const message = (mockHistoryManager.addItem as Mock).mock.calls[0][0]
.text;
for (const snippet of expectedMessageSnippets) {
expect(message).toContain(snippet);
}
expect(mockSetModelSwitchedFromQuotaError).toHaveBeenCalledWith(true);
expect(mockConfig.setQuotaErrorOccurred).toHaveBeenCalledWith(true);
});
}
});
describe('Interactive Fallback (Pro Quota Error)', () => {
beforeEach(() => {
mockedIsProQuotaExceededError.mockReturnValue(true);
});
it('should set an interactive request and wait for user choice', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
// Call the handler but do not await it, to check the intermediate state
const promise = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota'),
);
await act(async () => {});
// The hook should now have a pending request for the UI to handle
expect(result.current.proQuotaRequest).not.toBeNull();
expect(result.current.proQuotaRequest?.failedModel).toBe('gemini-pro');
// Simulate the user choosing to continue with the fallback model
act(() => {
result.current.handleProQuotaChoice('continue');
});
// The original promise from the handler should now resolve
const intent = await promise;
expect(intent).toBe('retry');
// The pending request should be cleared from the state
expect(result.current.proQuotaRequest).toBeNull();
});
it('should handle race conditions by stopping subsequent requests', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
const promise1 = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota 1'),
);
await act(async () => {});
const firstRequest = result.current.proQuotaRequest;
expect(firstRequest).not.toBeNull();
const result2 = await handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota 2'),
);
// The lock should have stopped the second request
expect(result2).toBe('stop');
expect(result.current.proQuotaRequest).toBe(firstRequest);
act(() => {
result.current.handleProQuotaChoice('continue');
});
const intent1 = await promise1;
expect(intent1).toBe('retry');
expect(result.current.proQuotaRequest).toBeNull();
});
});
});
describe('handleProQuotaChoice', () => {
beforeEach(() => {
mockedIsProQuotaExceededError.mockReturnValue(true);
});
it('should do nothing if there is no pending pro quota request', () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
act(() => {
result.current.handleProQuotaChoice('auth');
});
expect(mockSetAuthState).not.toHaveBeenCalled();
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
});
it('should resolve intent to "auth" and trigger auth state update', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
const promise = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota'),
);
await act(async () => {}); // Allow state to update
act(() => {
result.current.handleProQuotaChoice('auth');
});
const intent = await promise;
expect(intent).toBe('auth');
expect(mockSetAuthState).toHaveBeenCalledWith(AuthState.Updating);
expect(result.current.proQuotaRequest).toBeNull();
});
it('should resolve intent to "retry" and add info message on continue', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
// The first `addItem` call is for the initial quota error message
const promise = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota'),
);
await act(async () => {}); // Allow state to update
act(() => {
result.current.handleProQuotaChoice('continue');
});
const intent = await promise;
expect(intent).toBe('retry');
expect(result.current.proQuotaRequest).toBeNull();
// Check for the second "Switched to fallback model" message
expect(mockHistoryManager.addItem).toHaveBeenCalledTimes(2);
const lastCall = (mockHistoryManager.addItem as Mock).mock.calls[1][0];
expect(lastCall.type).toBe(MessageType.INFO);
expect(lastCall.text).toContain('Switched to fallback model.');
});
});
});

View File

@@ -1,175 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
AuthType,
type Config,
type FallbackModelHandler,
type FallbackIntent,
isGenericQuotaExceededError,
isProQuotaExceededError,
UserTierId,
} from '@qwen-code/qwen-code-core';
import { useCallback, useEffect, useRef, useState } from 'react';
import { type UseHistoryManagerReturn } from './useHistoryManager.js';
import { AuthState, MessageType } from '../types.js';
import { type ProQuotaDialogRequest } from '../contexts/UIStateContext.js';
interface UseQuotaAndFallbackArgs {
config: Config;
historyManager: UseHistoryManagerReturn;
userTier: UserTierId | undefined;
setAuthState: (state: AuthState) => void;
setModelSwitchedFromQuotaError: (value: boolean) => void;
}
export function useQuotaAndFallback({
config,
historyManager,
userTier,
setAuthState,
setModelSwitchedFromQuotaError,
}: UseQuotaAndFallbackArgs) {
const [proQuotaRequest, setProQuotaRequest] =
useState<ProQuotaDialogRequest | null>(null);
const isDialogPending = useRef(false);
// Set up Flash fallback handler
useEffect(() => {
const fallbackHandler: FallbackModelHandler = async (
failedModel,
fallbackModel,
error,
): Promise<FallbackIntent | null> => {
if (config.isInFallbackMode()) {
return null;
}
// Fallbacks are currently only handled for OAuth users.
const contentGeneratorConfig = config.getContentGeneratorConfig();
if (
!contentGeneratorConfig ||
contentGeneratorConfig.authType !== AuthType.LOGIN_WITH_GOOGLE
) {
return null;
}
// Use actual user tier if available; otherwise, default to FREE tier behavior (safe default)
const isPaidTier =
userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD;
let message: string;
if (error && isProQuotaExceededError(error)) {
// Pro Quota specific messages (Interactive)
if (isPaidTier) {
message = `⚡ You have reached your daily ${failedModel} quota limit.
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `⚡ You have reached your daily ${failedModel} quota limit.
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else if (error && isGenericQuotaExceededError(error)) {
// Generic Quota (Automatic fallback)
const actionMessage = `⚡ You have reached your daily quota limit.\n⚡ Automatically switching from ${failedModel} to ${fallbackModel} for the remainder of this session.`;
if (isPaidTier) {
message = `${actionMessage}
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `${actionMessage}
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else {
// Consecutive 429s or other errors (Automatic fallback)
const actionMessage = `⚡ Automatically switching from ${failedModel} to ${fallbackModel} for faster responses for the remainder of this session.`;
if (isPaidTier) {
message = `${actionMessage}
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `${actionMessage}
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
}
// Add message to UI history
historyManager.addItem(
{
type: MessageType.INFO,
text: message,
},
Date.now(),
);
setModelSwitchedFromQuotaError(true);
config.setQuotaErrorOccurred(true);
// Interactive Fallback for Pro quota
if (error && isProQuotaExceededError(error)) {
if (isDialogPending.current) {
return 'stop'; // A dialog is already active, so just stop this request.
}
isDialogPending.current = true;
const intent: FallbackIntent = await new Promise<FallbackIntent>(
(resolve) => {
setProQuotaRequest({
failedModel,
fallbackModel,
resolve,
});
},
);
return intent;
}
return 'stop';
};
config.setFallbackModelHandler(fallbackHandler);
}, [config, historyManager, userTier, setModelSwitchedFromQuotaError]);
const handleProQuotaChoice = useCallback(
(choice: 'auth' | 'continue') => {
if (!proQuotaRequest) return;
const intent: FallbackIntent = choice === 'auth' ? 'auth' : 'retry';
proQuotaRequest.resolve(intent);
setProQuotaRequest(null);
isDialogPending.current = false; // Reset the flag here
if (choice === 'auth') {
setAuthState(AuthState.Updating);
} else {
historyManager.addItem(
{
type: MessageType.INFO,
text: 'Switched to fallback model. Tip: Press Ctrl+P (or Up Arrow) to recall your previous prompt and submit it again if you wish.',
},
Date.now(),
);
}
},
[proQuotaRequest, setAuthState, historyManager],
);
return {
proQuotaRequest,
handleProQuotaChoice,
};
}

View File

@@ -411,7 +411,7 @@ describe('useQwenAuth', () => {
expect(geminiResult.current.qwenAuthState.authStatus).toBe('idle');
const { result: oauthResult } = renderHook(() =>
useQwenAuth(AuthType.LOGIN_WITH_GOOGLE, true),
useQwenAuth(AuthType.USE_OPENAI, true),
);
expect(oauthResult.current.qwenAuthState.authStatus).toBe('idle');
});

View File

@@ -62,7 +62,7 @@ const mockConfig = {
getAllowedTools: vi.fn(() => []),
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'oauth-personal',
authType: 'gemini-api-key',
}),
getUseSmartEdit: () => false,
getUseModelRouter: () => false,

View File

@@ -60,6 +60,11 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
return id ? { id, label: id } : null;
}
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
const id = process.env['ANTHROPIC_MODEL']?.trim();
return id ? { id, label: id } : null;
}
export function getAvailableModelsForAuthType(
authType: AuthType,
): AvailableModel[] {
@@ -70,6 +75,10 @@ export function getAvailableModelsForAuthType(
const openAIModel = getOpenAIAvailableModelFromEnv();
return openAIModel ? [openAIModel] : [];
}
case AuthType.USE_ANTHROPIC: {
const anthropicModel = getAnthropicAvailableModelFromEnv();
return anthropicModel ? [anthropicModel] : [];
}
default:
// For other auth types, return empty array for now
// This can be expanded later according to the design doc

View File

@@ -20,6 +20,11 @@ const makeConfig = (tools: Record<string, AnyDeclarativeTool>) =>
getToolRegistry: () => ({
getTool: (name: string) => tools[name],
}),
getContentGenerator: () => ({
// Default to showing full thinking content during resume unless explicitly
// summarized; tests don't care about summarized thinking behavior.
useSummarizedThinking: () => false,
}),
}) as unknown as Config;
describe('resumeHistoryUtils', () => {

View File

@@ -204,7 +204,11 @@ function convertToHistoryItems(
const parts = record.message?.parts as Part[] | undefined;
// Extract thought content
const thoughtText = extractThoughtTextFromParts(parts);
const thoughtText = !config
.getContentGenerator()
.useSummarizedThinking()
? extractThoughtTextFromParts(parts)
: '';
// Extract text content (non-function-call, non-thought)
const text = extractTextFromParts(parts);

View File

@@ -35,22 +35,33 @@ import {
} from './nonInteractiveHelpers.js';
// Mock dependencies
vi.mock('../services/CommandService.js', () => ({
CommandService: {
create: vi.fn().mockResolvedValue({
getCommands: vi
.fn()
.mockReturnValue([
{ name: 'help' },
{ name: 'commit' },
{ name: 'memory' },
]),
}),
},
}));
vi.mock('../nonInteractiveCliCommands.js', () => ({
getAvailableCommands: vi
.fn()
.mockImplementation(
async (
_config: unknown,
_signal: AbortSignal,
allowedBuiltinCommandNames?: string[],
) => {
const allowedSet = new Set(allowedBuiltinCommandNames ?? []);
const allCommands = [
{ name: 'help', kind: 'built-in' },
{ name: 'commit', kind: 'file' },
{ name: 'memory', kind: 'built-in' },
{ name: 'init', kind: 'built-in' },
{ name: 'summary', kind: 'built-in' },
{ name: 'compress', kind: 'built-in' },
];
vi.mock('../services/BuiltinCommandLoader.js', () => ({
BuiltinCommandLoader: vi.fn().mockImplementation(() => ({})),
// Filter commands: always include file commands, only include allowed built-in commands
return allCommands.filter(
(cmd) =>
cmd.kind === 'file' ||
(cmd.kind === 'built-in' && allowedSet.has(cmd.name)),
);
},
),
}));
vi.mock('../ui/utils/computeStats.js', () => ({
@@ -511,10 +522,12 @@ describe('buildSystemMessage', () => {
});
it('should build system message with all fields', async () => {
const allowedBuiltinCommands = ['init', 'summary', 'compress'];
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
allowedBuiltinCommands,
);
expect(result).toEqual({
@@ -530,7 +543,7 @@ describe('buildSystemMessage', () => {
],
model: 'test-model',
permission_mode: 'auto',
slash_commands: ['commit', 'help', 'memory'],
slash_commands: ['commit', 'compress', 'init', 'summary'],
qwen_code_version: '1.0.0',
agents: [],
});
@@ -546,6 +559,7 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.tools).toEqual([]);
@@ -561,6 +575,7 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.mcp_servers).toEqual([]);
@@ -576,10 +591,37 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.qwen_code_version).toBe('unknown');
});
it('should only include allowed built-in commands and all file commands', async () => {
const allowedBuiltinCommands = ['init', 'summary'];
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
allowedBuiltinCommands,
);
// Should include: 'commit' (FILE), 'init' (BUILT_IN, allowed), 'summary' (BUILT_IN, allowed)
// Should NOT include: 'help', 'memory', 'compress' (BUILT_IN but not in allowed set)
expect(result.slash_commands).toEqual(['commit', 'init', 'summary']);
});
it('should include only file commands when no built-in commands are allowed', async () => {
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
[], // Empty array - no built-in commands allowed
);
// Should only include 'commit' (FILE command)
expect(result.slash_commands).toEqual(['commit']);
});
});
describe('createTaskToolProgressHandler', () => {

View File

@@ -25,10 +25,9 @@ import type {
PermissionMode,
CLISystemMessage,
} from '../nonInteractive/types.js';
import { CommandService } from '../services/CommandService.js';
import { BuiltinCommandLoader } from '../services/BuiltinCommandLoader.js';
import type { JsonOutputAdapterInterface } from '../nonInteractive/io/BaseJsonOutputAdapter.js';
import { computeSessionStats } from '../ui/utils/computeStats.js';
import { getAvailableCommands } from '../nonInteractiveCliCommands.js';
/**
* Normalizes various part list formats into a consistent Part[] array.
@@ -187,24 +186,27 @@ export function computeUsageFromMetrics(metrics: SessionMetrics): Usage {
}
/**
* Load slash command names using CommandService
* Load slash command names using getAvailableCommands
*
* @param config - Config instance
* @param allowedBuiltinCommandNames - Optional array of allowed built-in command names.
* If not provided, uses the default from getAvailableCommands.
* @returns Promise resolving to array of slash command names
*/
async function loadSlashCommandNames(config: Config): Promise<string[]> {
async function loadSlashCommandNames(
config: Config,
allowedBuiltinCommandNames?: string[],
): Promise<string[]> {
const controller = new AbortController();
try {
const service = await CommandService.create(
[new BuiltinCommandLoader(config)],
const commands = await getAvailableCommands(
config,
controller.signal,
allowedBuiltinCommandNames,
);
const names = new Set<string>();
const commands = service.getCommands();
for (const command of commands) {
names.add(command.name);
}
return Array.from(names).sort();
// Extract command names and sort
return commands.map((cmd) => cmd.name).sort();
} catch (error) {
if (config.getDebugMode()) {
console.error(
@@ -233,12 +235,15 @@ async function loadSlashCommandNames(config: Config): Promise<string[]> {
* @param config - Config instance
* @param sessionId - Session identifier
* @param permissionMode - Current permission/approval mode
* @param allowedBuiltinCommandNames - Optional array of allowed built-in command names.
* If not provided, defaults to empty array (only file commands will be included).
* @returns Promise resolving to CLISystemMessage
*/
export async function buildSystemMessage(
config: Config,
sessionId: string,
permissionMode: PermissionMode,
allowedBuiltinCommandNames?: string[],
): Promise<CLISystemMessage> {
const toolRegistry = config.getToolRegistry();
const tools = toolRegistry ? toolRegistry.getAllToolNames() : [];
@@ -251,8 +256,11 @@ export async function buildSystemMessage(
}))
: [];
// Load slash commands
const slashCommands = await loadSlashCommandNames(config);
// Load slash commands with filtering based on allowed built-in commands
const slashCommands = await loadSlashCommandNames(
config,
allowedBuiltinCommandNames,
);
// Load subagent names from config
let agentNames: string[] = [];

View File

@@ -153,7 +153,8 @@ export async function getExtendedSystemInfo(
// Get base URL if using OpenAI auth
const baseUrl =
baseInfo.selectedAuthType === AuthType.USE_OPENAI
baseInfo.selectedAuthType === AuthType.USE_OPENAI ||
baseInfo.selectedAuthType === AuthType.USE_ANTHROPIC
? context.services.config?.getContentGeneratorConfig()?.baseUrl
: undefined;

View File

@@ -19,6 +19,9 @@ describe('validateNonInterActiveAuth', () => {
let originalEnvVertexAi: string | undefined;
let originalEnvGcp: string | undefined;
let originalEnvOpenAiApiKey: string | undefined;
let originalEnvQwenOauth: string | undefined;
let originalEnvGoogleApiKey: string | undefined;
let originalEnvAnthropicApiKey: string | undefined;
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
let processExitSpy: ReturnType<typeof vi.spyOn<[code?: number], never>>;
let refreshAuthMock: ReturnType<typeof vi.fn>;
@@ -29,10 +32,16 @@ describe('validateNonInterActiveAuth', () => {
originalEnvVertexAi = process.env['GOOGLE_GENAI_USE_VERTEXAI'];
originalEnvGcp = process.env['GOOGLE_GENAI_USE_GCA'];
originalEnvOpenAiApiKey = process.env['OPENAI_API_KEY'];
originalEnvQwenOauth = process.env['QWEN_OAUTH'];
originalEnvGoogleApiKey = process.env['GOOGLE_API_KEY'];
originalEnvAnthropicApiKey = process.env['ANTHROPIC_API_KEY'];
delete process.env['GEMINI_API_KEY'];
delete process.env['GOOGLE_GENAI_USE_VERTEXAI'];
delete process.env['GOOGLE_GENAI_USE_GCA'];
delete process.env['OPENAI_API_KEY'];
delete process.env['QWEN_OAUTH'];
delete process.env['GOOGLE_API_KEY'];
delete process.env['ANTHROPIC_API_KEY'];
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit(${code}) called`);
@@ -80,6 +89,21 @@ describe('validateNonInterActiveAuth', () => {
} else {
delete process.env['OPENAI_API_KEY'];
}
if (originalEnvQwenOauth !== undefined) {
process.env['QWEN_OAUTH'] = originalEnvQwenOauth;
} else {
delete process.env['QWEN_OAUTH'];
}
if (originalEnvGoogleApiKey !== undefined) {
process.env['GOOGLE_API_KEY'] = originalEnvGoogleApiKey;
} else {
delete process.env['GOOGLE_API_KEY'];
}
if (originalEnvAnthropicApiKey !== undefined) {
process.env['ANTHROPIC_API_KEY'] = originalEnvAnthropicApiKey;
} else {
delete process.env['ANTHROPIC_API_KEY'];
}
vi.restoreAllMocks();
});

View File

@@ -21,6 +21,16 @@ function getAuthTypeFromEnv(): AuthType | undefined {
return AuthType.QWEN_OAUTH;
}
if (process.env['GEMINI_API_KEY']) {
return AuthType.USE_GEMINI;
}
if (process.env['GOOGLE_API_KEY']) {
return AuthType.USE_VERTEX_AI;
}
if (process.env['ANTHROPIC_API_KEY']) {
return AuthType.USE_ANTHROPIC;
}
return undefined;
}

View File

@@ -23,8 +23,9 @@
"scripts/postinstall.js"
],
"dependencies": {
"@google/genai": "1.16.0",
"@modelcontextprotocol/sdk": "^1.11.0",
"@anthropic-ai/sdk": "^0.36.1",
"@google/genai": "1.30.0",
"@modelcontextprotocol/sdk": "^1.25.1",
"@opentelemetry/api": "^1.9.0",
"async-mutex": "^0.5.0",
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
@@ -34,7 +35,6 @@
"@opentelemetry/exporter-trace-otlp-grpc": "^0.203.0",
"@opentelemetry/exporter-trace-otlp-http": "^0.203.0",
"@opentelemetry/instrumentation-http": "^0.203.0",
"@opentelemetry/resource-detector-gcp": "^0.40.0",
"@opentelemetry/sdk-node": "^0.203.0",
"@types/html-to-text": "^9.0.4",
"@xterm/headless": "5.5.0",
@@ -48,7 +48,7 @@
"fdir": "^6.4.6",
"fzf": "^0.5.2",
"glob": "^10.5.0",
"google-auth-library": "^9.11.0",
"google-auth-library": "^10.5.0",
"html-to-text": "^9.0.5",
"https-proxy-agent": "^7.0.6",
"ignore": "^7.0.0",

View File

@@ -1,54 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { ContentGenerator } from '../core/contentGenerator.js';
import { AuthType } from '../core/contentGenerator.js';
import { getOauthClient } from './oauth2.js';
import { setupUser } from './setup.js';
import type { HttpOptions } from './server.js';
import { CodeAssistServer } from './server.js';
import type { Config } from '../config/config.js';
import { LoggingContentGenerator } from '../core/loggingContentGenerator.js';
export async function createCodeAssistContentGenerator(
httpOptions: HttpOptions,
authType: AuthType,
config: Config,
sessionId?: string,
): Promise<ContentGenerator> {
if (
authType === AuthType.LOGIN_WITH_GOOGLE ||
authType === AuthType.CLOUD_SHELL
) {
const authClient = await getOauthClient(authType, config);
const userData = await setupUser(authClient);
return new CodeAssistServer(
authClient,
userData.projectId,
httpOptions,
sessionId,
userData.userTier,
);
}
throw new Error(`Unsupported authType: ${authType}`);
}
export function getCodeAssistServer(
config: Config,
): CodeAssistServer | undefined {
let server = config.getContentGenerator();
// Unwrap LoggingContentGenerator if present
if (server instanceof LoggingContentGenerator) {
server = server.getWrapped();
}
if (!(server instanceof CodeAssistServer)) {
return undefined;
}
return server;
}

View File

@@ -1,456 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import type { CaGenerateContentResponse } from './converter.js';
import {
toGenerateContentRequest,
fromGenerateContentResponse,
toContents,
} from './converter.js';
import type {
ContentListUnion,
GenerateContentParameters,
} from '@google/genai';
import {
GenerateContentResponse,
FinishReason,
BlockedReason,
type Part,
} from '@google/genai';
describe('converter', () => {
describe('toCodeAssistRequest', () => {
it('should convert a simple request with project', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
'my-project',
'my-session',
);
expect(codeAssistReq).toEqual({
model: 'gemini-pro',
project: 'my-project',
request: {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
systemInstruction: undefined,
cachedContent: undefined,
tools: undefined,
toolConfig: undefined,
labels: undefined,
safetySettings: undefined,
generationConfig: undefined,
session_id: 'my-session',
},
user_prompt_id: 'my-prompt',
});
});
it('should convert a request without a project', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
undefined,
'my-session',
);
expect(codeAssistReq).toEqual({
model: 'gemini-pro',
project: undefined,
request: {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
systemInstruction: undefined,
cachedContent: undefined,
tools: undefined,
toolConfig: undefined,
labels: undefined,
safetySettings: undefined,
generationConfig: undefined,
session_id: 'my-session',
},
user_prompt_id: 'my-prompt',
});
});
it('should convert a request with sessionId', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
'my-project',
'session-123',
);
expect(codeAssistReq).toEqual({
model: 'gemini-pro',
project: 'my-project',
request: {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
systemInstruction: undefined,
cachedContent: undefined,
tools: undefined,
toolConfig: undefined,
labels: undefined,
safetySettings: undefined,
generationConfig: undefined,
session_id: 'session-123',
},
user_prompt_id: 'my-prompt',
});
});
it('should handle string content', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: 'Hello',
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
'my-project',
'my-session',
);
expect(codeAssistReq.request.contents).toEqual([
{ role: 'user', parts: [{ text: 'Hello' }] },
]);
});
it('should handle Part[] content', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: [{ text: 'Hello' }, { text: 'World' }],
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
'my-project',
'my-session',
);
expect(codeAssistReq.request.contents).toEqual([
{ role: 'user', parts: [{ text: 'Hello' }] },
{ role: 'user', parts: [{ text: 'World' }] },
]);
});
it('should handle system instructions', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: 'Hello',
config: {
systemInstruction: 'You are a helpful assistant.',
},
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
'my-project',
'my-session',
);
expect(codeAssistReq.request.systemInstruction).toEqual({
role: 'user',
parts: [{ text: 'You are a helpful assistant.' }],
});
});
it('should handle generation config', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: 'Hello',
config: {
temperature: 0.8,
topK: 40,
},
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
'my-project',
'my-session',
);
expect(codeAssistReq.request.generationConfig).toEqual({
temperature: 0.8,
topK: 40,
});
});
it('should handle all generation config fields', () => {
const genaiReq: GenerateContentParameters = {
model: 'gemini-pro',
contents: 'Hello',
config: {
temperature: 0.1,
topP: 0.2,
topK: 3,
candidateCount: 4,
maxOutputTokens: 5,
stopSequences: ['a'],
responseLogprobs: true,
logprobs: 6,
presencePenalty: 0.7,
frequencyPenalty: 0.8,
seed: 9,
responseMimeType: 'application/json',
},
};
const codeAssistReq = toGenerateContentRequest(
genaiReq,
'my-prompt',
'my-project',
'my-session',
);
expect(codeAssistReq.request.generationConfig).toEqual({
temperature: 0.1,
topP: 0.2,
topK: 3,
candidateCount: 4,
maxOutputTokens: 5,
stopSequences: ['a'],
responseLogprobs: true,
logprobs: 6,
presencePenalty: 0.7,
frequencyPenalty: 0.8,
seed: 9,
responseMimeType: 'application/json',
});
});
});
describe('fromCodeAssistResponse', () => {
it('should convert a simple response', () => {
const codeAssistRes: CaGenerateContentResponse = {
response: {
candidates: [
{
index: 0,
content: {
role: 'model',
parts: [{ text: 'Hi there!' }],
},
finishReason: FinishReason.STOP,
safetyRatings: [],
},
],
},
};
const genaiRes = fromGenerateContentResponse(codeAssistRes);
expect(genaiRes).toBeInstanceOf(GenerateContentResponse);
expect(genaiRes.candidates).toEqual(codeAssistRes.response.candidates);
});
it('should handle prompt feedback and usage metadata', () => {
const codeAssistRes: CaGenerateContentResponse = {
response: {
candidates: [],
promptFeedback: {
blockReason: BlockedReason.SAFETY,
safetyRatings: [],
},
usageMetadata: {
promptTokenCount: 10,
candidatesTokenCount: 20,
totalTokenCount: 30,
},
},
};
const genaiRes = fromGenerateContentResponse(codeAssistRes);
expect(genaiRes.promptFeedback).toEqual(
codeAssistRes.response.promptFeedback,
);
expect(genaiRes.usageMetadata).toEqual(
codeAssistRes.response.usageMetadata,
);
});
it('should handle automatic function calling history', () => {
const codeAssistRes: CaGenerateContentResponse = {
response: {
candidates: [],
automaticFunctionCallingHistory: [
{
role: 'model',
parts: [
{
functionCall: {
name: 'test_function',
args: {
foo: 'bar',
},
},
},
],
},
],
},
};
const genaiRes = fromGenerateContentResponse(codeAssistRes);
expect(genaiRes.automaticFunctionCallingHistory).toEqual(
codeAssistRes.response.automaticFunctionCallingHistory,
);
});
it('should handle modelVersion', () => {
const codeAssistRes: CaGenerateContentResponse = {
response: {
candidates: [],
modelVersion: 'qwen3-coder-plus',
},
};
const genaiRes = fromGenerateContentResponse(codeAssistRes);
expect(genaiRes.modelVersion).toEqual('qwen3-coder-plus');
});
});
describe('toContents', () => {
it('should handle Content', () => {
const content: ContentListUnion = {
role: 'user',
parts: [{ text: 'hello' }],
};
expect(toContents(content)).toEqual([
{ role: 'user', parts: [{ text: 'hello' }] },
]);
});
it('should handle array of Contents', () => {
const contents: ContentListUnion = [
{ role: 'user', parts: [{ text: 'hello' }] },
{ role: 'model', parts: [{ text: 'hi' }] },
];
expect(toContents(contents)).toEqual([
{ role: 'user', parts: [{ text: 'hello' }] },
{ role: 'model', parts: [{ text: 'hi' }] },
]);
});
it('should handle Part', () => {
const part: ContentListUnion = { text: 'a part' };
expect(toContents(part)).toEqual([
{ role: 'user', parts: [{ text: 'a part' }] },
]);
});
it('should handle array of Parts', () => {
const parts = [{ text: 'part 1' }, 'part 2'];
expect(toContents(parts)).toEqual([
{ role: 'user', parts: [{ text: 'part 1' }] },
{ role: 'user', parts: [{ text: 'part 2' }] },
]);
});
it('should handle string', () => {
const str: ContentListUnion = 'a string';
expect(toContents(str)).toEqual([
{ role: 'user', parts: [{ text: 'a string' }] },
]);
});
it('should handle array of strings', () => {
const strings: ContentListUnion = ['string 1', 'string 2'];
expect(toContents(strings)).toEqual([
{ role: 'user', parts: [{ text: 'string 1' }] },
{ role: 'user', parts: [{ text: 'string 2' }] },
]);
});
it('should convert thought parts to text parts for API compatibility', () => {
const contentWithThought: ContentListUnion = {
role: 'model',
parts: [
{ text: 'regular text' },
{ thought: 'thinking about the problem' } as Part & {
thought: string;
},
{ text: 'more text' },
],
};
expect(toContents(contentWithThought)).toEqual([
{
role: 'model',
parts: [
{ text: 'regular text' },
{ text: '[Thought: thinking about the problem]' },
{ text: 'more text' },
],
},
]);
});
it('should combine text and thought for text parts with thoughts', () => {
const contentWithTextAndThought: ContentListUnion = {
role: 'model',
parts: [
{
text: 'Here is my response',
thought: 'I need to be careful here',
} as Part & { thought: string },
],
};
expect(toContents(contentWithTextAndThought)).toEqual([
{
role: 'model',
parts: [
{
text: 'Here is my response\n[Thought: I need to be careful here]',
},
],
},
]);
});
it('should preserve non-thought properties while removing thought', () => {
const contentWithComplexPart: ContentListUnion = {
role: 'model',
parts: [
{
functionCall: { name: 'calculate', args: { x: 5, y: 10 } },
thought: 'Performing calculation',
} as Part & { thought: string },
],
};
expect(toContents(contentWithComplexPart)).toEqual([
{
role: 'model',
parts: [
{
functionCall: { name: 'calculate', args: { x: 5, y: 10 } },
},
],
},
]);
});
it('should convert invalid text content to valid text part with thought', () => {
const contentWithInvalidText: ContentListUnion = {
role: 'model',
parts: [
{
text: 123, // Invalid - should be string
thought: 'Processing number',
} as Part & { thought: string; text: number },
],
};
expect(toContents(contentWithInvalidText)).toEqual([
{
role: 'model',
parts: [
{
text: '123\n[Thought: Processing number]',
},
],
},
]);
});
});
});

View File

@@ -1,285 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type {
Content,
ContentListUnion,
ContentUnion,
GenerateContentConfig,
GenerateContentParameters,
CountTokensParameters,
CountTokensResponse,
GenerationConfigRoutingConfig,
MediaResolution,
Candidate,
ModelSelectionConfig,
GenerateContentResponsePromptFeedback,
GenerateContentResponseUsageMetadata,
Part,
SafetySetting,
PartUnion,
SpeechConfigUnion,
ThinkingConfig,
ToolListUnion,
ToolConfig,
} from '@google/genai';
import { GenerateContentResponse } from '@google/genai';
export interface CAGenerateContentRequest {
model: string;
project?: string;
user_prompt_id?: string;
request: VertexGenerateContentRequest;
}
interface VertexGenerateContentRequest {
contents: Content[];
systemInstruction?: Content;
cachedContent?: string;
tools?: ToolListUnion;
toolConfig?: ToolConfig;
labels?: Record<string, string>;
safetySettings?: SafetySetting[];
generationConfig?: VertexGenerationConfig;
session_id?: string;
}
interface VertexGenerationConfig {
temperature?: number;
topP?: number;
topK?: number;
candidateCount?: number;
maxOutputTokens?: number;
stopSequences?: string[];
responseLogprobs?: boolean;
logprobs?: number;
presencePenalty?: number;
frequencyPenalty?: number;
seed?: number;
responseMimeType?: string;
responseJsonSchema?: unknown;
responseSchema?: unknown;
routingConfig?: GenerationConfigRoutingConfig;
modelSelectionConfig?: ModelSelectionConfig;
responseModalities?: string[];
mediaResolution?: MediaResolution;
speechConfig?: SpeechConfigUnion;
audioTimestamp?: boolean;
thinkingConfig?: ThinkingConfig;
}
export interface CaGenerateContentResponse {
response: VertexGenerateContentResponse;
}
interface VertexGenerateContentResponse {
candidates: Candidate[];
automaticFunctionCallingHistory?: Content[];
promptFeedback?: GenerateContentResponsePromptFeedback;
usageMetadata?: GenerateContentResponseUsageMetadata;
modelVersion?: string;
}
export interface CaCountTokenRequest {
request: VertexCountTokenRequest;
}
interface VertexCountTokenRequest {
model: string;
contents: Content[];
}
export interface CaCountTokenResponse {
totalTokens: number;
}
export function toCountTokenRequest(
req: CountTokensParameters,
): CaCountTokenRequest {
return {
request: {
model: 'models/' + req.model,
contents: toContents(req.contents),
},
};
}
export function fromCountTokenResponse(
res: CaCountTokenResponse,
): CountTokensResponse {
return {
totalTokens: res.totalTokens,
};
}
export function toGenerateContentRequest(
req: GenerateContentParameters,
userPromptId: string,
project?: string,
sessionId?: string,
): CAGenerateContentRequest {
return {
model: req.model,
project,
user_prompt_id: userPromptId,
request: toVertexGenerateContentRequest(req, sessionId),
};
}
export function fromGenerateContentResponse(
res: CaGenerateContentResponse,
): GenerateContentResponse {
const inres = res.response;
const out = new GenerateContentResponse();
out.candidates = inres.candidates;
out.automaticFunctionCallingHistory = inres.automaticFunctionCallingHistory;
out.promptFeedback = inres.promptFeedback;
out.usageMetadata = inres.usageMetadata;
out.modelVersion = inres.modelVersion;
return out;
}
function toVertexGenerateContentRequest(
req: GenerateContentParameters,
sessionId?: string,
): VertexGenerateContentRequest {
return {
contents: toContents(req.contents),
systemInstruction: maybeToContent(req.config?.systemInstruction),
cachedContent: req.config?.cachedContent,
tools: req.config?.tools,
toolConfig: req.config?.toolConfig,
labels: req.config?.labels,
safetySettings: req.config?.safetySettings,
generationConfig: toVertexGenerationConfig(req.config),
session_id: sessionId,
};
}
export function toContents(contents: ContentListUnion): Content[] {
if (Array.isArray(contents)) {
// it's a Content[] or a PartsUnion[]
return contents.map(toContent);
}
// it's a Content or a PartsUnion
return [toContent(contents)];
}
function maybeToContent(content?: ContentUnion): Content | undefined {
if (!content) {
return undefined;
}
return toContent(content);
}
function toContent(content: ContentUnion): Content {
if (Array.isArray(content)) {
// it's a PartsUnion[]
return {
role: 'user',
parts: toParts(content),
};
}
if (typeof content === 'string') {
// it's a string
return {
role: 'user',
parts: [{ text: content }],
};
}
if ('parts' in content) {
// it's a Content - process parts to handle thought filtering
return {
...content,
parts: content.parts
? toParts(content.parts.filter((p) => p != null))
: [],
};
}
// it's a Part
return {
role: 'user',
parts: [toPart(content as Part)],
};
}
export function toParts(parts: PartUnion[]): Part[] {
return parts.map(toPart);
}
function toPart(part: PartUnion): Part {
if (typeof part === 'string') {
// it's a string
return { text: part };
}
// Handle thought parts for CountToken API compatibility
// The CountToken API expects parts to have certain required "oneof" fields initialized,
// but thought parts don't conform to this schema and cause API failures
if ('thought' in part && part.thought) {
const thoughtText = `[Thought: ${part.thought}]`;
const newPart = { ...part };
delete (newPart as Record<string, unknown>)['thought'];
const hasApiContent =
'functionCall' in newPart ||
'functionResponse' in newPart ||
'inlineData' in newPart ||
'fileData' in newPart;
if (hasApiContent) {
// It's a functionCall or other non-text part. Just strip the thought.
return newPart;
}
// If no other valid API content, this must be a text part.
// Combine existing text (if any) with the thought, preserving other properties.
const text = (newPart as { text?: unknown }).text;
const existingText = text ? String(text) : '';
const combinedText = existingText
? `${existingText}\n${thoughtText}`
: thoughtText;
return {
...newPart,
text: combinedText,
};
}
return part;
}
function toVertexGenerationConfig(
config?: GenerateContentConfig,
): VertexGenerationConfig | undefined {
if (!config) {
return undefined;
}
return {
temperature: config.temperature,
topP: config.topP,
topK: config.topK,
candidateCount: config.candidateCount,
maxOutputTokens: config.maxOutputTokens,
stopSequences: config.stopSequences,
responseLogprobs: config.responseLogprobs,
logprobs: config.logprobs,
presencePenalty: config.presencePenalty,
frequencyPenalty: config.frequencyPenalty,
seed: config.seed,
responseMimeType: config.responseMimeType,
responseSchema: config.responseSchema,
responseJsonSchema: config.responseJsonSchema,
routingConfig: config.routingConfig,
modelSelectionConfig: config.modelSelectionConfig,
responseModalities: config.responseModalities,
mediaResolution: config.mediaResolution,
speechConfig: config.speechConfig,
audioTimestamp: config.audioTimestamp,
thinkingConfig: config.thinkingConfig,
};
}

View File

@@ -1,217 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type Credentials } from 'google-auth-library';
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { OAuthCredentialStorage } from './oauth-credential-storage.js';
import type { OAuthCredentials } from '../mcp/token-storage/types.js';
import * as path from 'node:path';
import * as os from 'node:os';
import { promises as fs } from 'node:fs';
// Mock external dependencies
const mockHybridTokenStorage = vi.hoisted(() => ({
getCredentials: vi.fn(),
setCredentials: vi.fn(),
deleteCredentials: vi.fn(),
}));
vi.mock('../mcp/token-storage/hybrid-token-storage.js', () => ({
HybridTokenStorage: vi.fn(() => mockHybridTokenStorage),
}));
vi.mock('node:fs', () => ({
promises: {
readFile: vi.fn(),
rm: vi.fn(),
},
}));
vi.mock('node:os');
vi.mock('node:path');
describe('OAuthCredentialStorage', () => {
const mockCredentials: Credentials = {
access_token: 'mock_access_token',
refresh_token: 'mock_refresh_token',
expiry_date: Date.now() + 3600 * 1000,
token_type: 'Bearer',
scope: 'email profile',
};
const mockMcpCredentials: OAuthCredentials = {
serverName: 'main-account',
token: {
accessToken: 'mock_access_token',
refreshToken: 'mock_refresh_token',
tokenType: 'Bearer',
scope: 'email profile',
expiresAt: mockCredentials.expiry_date!,
},
updatedAt: expect.any(Number),
};
const oldFilePath = '/mock/home/.qwen/oauth.json';
beforeEach(() => {
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(null);
vi.spyOn(mockHybridTokenStorage, 'setCredentials').mockResolvedValue(
undefined,
);
vi.spyOn(mockHybridTokenStorage, 'deleteCredentials').mockResolvedValue(
undefined,
);
vi.spyOn(fs, 'readFile').mockRejectedValue(new Error('File not found'));
vi.spyOn(fs, 'rm').mockResolvedValue(undefined);
vi.spyOn(os, 'homedir').mockReturnValue('/mock/home');
vi.spyOn(path, 'join').mockReturnValue(oldFilePath);
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('loadCredentials', () => {
it('should load credentials from HybridTokenStorage if available', async () => {
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
mockMcpCredentials,
);
const result = await OAuthCredentialStorage.loadCredentials();
expect(mockHybridTokenStorage.getCredentials).toHaveBeenCalledWith(
'main-account',
);
expect(result).toEqual(mockCredentials);
});
it('should fallback to migrateFromFileStorage if no credentials in HybridTokenStorage', async () => {
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
null,
);
vi.spyOn(fs, 'readFile').mockResolvedValue(
JSON.stringify(mockCredentials),
);
const result = await OAuthCredentialStorage.loadCredentials();
expect(mockHybridTokenStorage.getCredentials).toHaveBeenCalledWith(
'main-account',
);
expect(fs.readFile).toHaveBeenCalledWith(oldFilePath, 'utf-8');
expect(mockHybridTokenStorage.setCredentials).toHaveBeenCalled(); // Verify credentials were saved
expect(fs.rm).toHaveBeenCalledWith(oldFilePath, { force: true }); // Verify old file was removed
expect(result).toEqual(mockCredentials);
});
it('should return null if no credentials found and no old file to migrate', async () => {
vi.spyOn(fs, 'readFile').mockRejectedValue({
message: 'File not found',
code: 'ENOENT',
});
const result = await OAuthCredentialStorage.loadCredentials();
expect(result).toBeNull();
});
it('should throw an error if loading fails', async () => {
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockRejectedValue(
new Error('Loading error'),
);
await expect(OAuthCredentialStorage.loadCredentials()).rejects.toThrow(
'Failed to load OAuth credentials',
);
});
it('should throw an error if read file fails', async () => {
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
null,
);
vi.spyOn(fs, 'readFile').mockRejectedValue(
new Error('Permission denied'),
);
await expect(OAuthCredentialStorage.loadCredentials()).rejects.toThrow(
'Failed to load OAuth credentials',
);
});
it('should not throw error if migration file removal failed', async () => {
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
null,
);
vi.spyOn(fs, 'readFile').mockResolvedValue(
JSON.stringify(mockCredentials),
);
vi.spyOn(OAuthCredentialStorage, 'saveCredentials').mockResolvedValue(
undefined,
);
vi.spyOn(fs, 'rm').mockRejectedValue(new Error('Deletion failed'));
const result = await OAuthCredentialStorage.loadCredentials();
expect(result).toEqual(mockCredentials);
});
});
describe('saveCredentials', () => {
it('should save credentials to HybridTokenStorage', async () => {
await OAuthCredentialStorage.saveCredentials(mockCredentials);
expect(mockHybridTokenStorage.setCredentials).toHaveBeenCalledWith(
mockMcpCredentials,
);
});
it('should throw an error if access_token is missing', async () => {
const invalidCredentials: Credentials = {
...mockCredentials,
access_token: undefined,
};
await expect(
OAuthCredentialStorage.saveCredentials(invalidCredentials),
).rejects.toThrow(
'Attempted to save credentials without an access token.',
);
});
});
describe('clearCredentials', () => {
it('should delete credentials from HybridTokenStorage', async () => {
await OAuthCredentialStorage.clearCredentials();
expect(mockHybridTokenStorage.deleteCredentials).toHaveBeenCalledWith(
'main-account',
);
});
it('should attempt to remove the old file-based storage', async () => {
await OAuthCredentialStorage.clearCredentials();
expect(fs.rm).toHaveBeenCalledWith(oldFilePath, { force: true });
});
it('should not throw an error if deleting old file fails', async () => {
vi.spyOn(fs, 'rm').mockRejectedValue(new Error('File deletion failed'));
await expect(
OAuthCredentialStorage.clearCredentials(),
).resolves.toBeUndefined();
});
it('should throw an error if clearing from HybridTokenStorage fails', async () => {
vi.spyOn(mockHybridTokenStorage, 'deleteCredentials').mockRejectedValue(
new Error('Deletion error'),
);
await expect(OAuthCredentialStorage.clearCredentials()).rejects.toThrow(
'Failed to clear OAuth credentials',
);
});
});
});

View File

@@ -1,130 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type Credentials } from 'google-auth-library';
import { HybridTokenStorage } from '../mcp/token-storage/hybrid-token-storage.js';
import { OAUTH_FILE } from '../config/storage.js';
import type { OAuthCredentials } from '../mcp/token-storage/types.js';
import * as path from 'node:path';
import * as os from 'node:os';
import { promises as fs } from 'node:fs';
const QWEN_DIR = '.qwen';
const KEYCHAIN_SERVICE_NAME = 'qwen-code-oauth';
const MAIN_ACCOUNT_KEY = 'main-account';
export class OAuthCredentialStorage {
private static storage: HybridTokenStorage = new HybridTokenStorage(
KEYCHAIN_SERVICE_NAME,
);
/**
* Load cached OAuth credentials
*/
static async loadCredentials(): Promise<Credentials | null> {
try {
const credentials = await this.storage.getCredentials(MAIN_ACCOUNT_KEY);
if (credentials?.token) {
const { accessToken, refreshToken, expiresAt, tokenType, scope } =
credentials.token;
// Convert from OAuthCredentials format to Google Credentials format
const googleCreds: Credentials = {
access_token: accessToken,
refresh_token: refreshToken || undefined,
token_type: tokenType || undefined,
scope: scope || undefined,
};
if (expiresAt) {
googleCreds.expiry_date = expiresAt;
}
return googleCreds;
}
// Fallback: Try to migrate from old file-based storage
return await this.migrateFromFileStorage();
} catch (error: unknown) {
console.error(error);
throw new Error('Failed to load OAuth credentials');
}
}
/**
* Save OAuth credentials
*/
static async saveCredentials(credentials: Credentials): Promise<void> {
if (!credentials.access_token) {
throw new Error('Attempted to save credentials without an access token.');
}
// Convert Google Credentials to OAuthCredentials format
const mcpCredentials: OAuthCredentials = {
serverName: MAIN_ACCOUNT_KEY,
token: {
accessToken: credentials.access_token,
refreshToken: credentials.refresh_token || undefined,
tokenType: credentials.token_type || 'Bearer',
scope: credentials.scope || undefined,
expiresAt: credentials.expiry_date || undefined,
},
updatedAt: Date.now(),
};
await this.storage.setCredentials(mcpCredentials);
}
/**
* Clear cached OAuth credentials
*/
static async clearCredentials(): Promise<void> {
try {
await this.storage.deleteCredentials(MAIN_ACCOUNT_KEY);
// Also try to remove the old file if it exists
const oldFilePath = path.join(os.homedir(), QWEN_DIR, OAUTH_FILE);
await fs.rm(oldFilePath, { force: true }).catch(() => {});
} catch (error: unknown) {
console.error(error);
throw new Error('Failed to clear OAuth credentials');
}
}
/**
* Migrate credentials from old file-based storage to keychain
*/
private static async migrateFromFileStorage(): Promise<Credentials | null> {
const oldFilePath = path.join(os.homedir(), QWEN_DIR, OAUTH_FILE);
let credsJson: string;
try {
credsJson = await fs.readFile(oldFilePath, 'utf-8');
} catch (error: unknown) {
if (
typeof error === 'object' &&
error !== null &&
'code' in error &&
error.code === 'ENOENT'
) {
// File doesn't exist, so no migration.
return null;
}
// Other read errors should propagate.
throw error;
}
const credentials = JSON.parse(credsJson) as Credentials;
// Save to new storage
await this.saveCredentials(credentials);
// Remove old file after successful migration
await fs.rm(oldFilePath, { force: true }).catch(() => {});
return credentials;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,563 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { Credentials } from 'google-auth-library';
import {
CodeChallengeMethod,
Compute,
OAuth2Client,
} from 'google-auth-library';
import crypto from 'node:crypto';
import { promises as fs } from 'node:fs';
import * as http from 'node:http';
import * as net from 'node:net';
import path from 'node:path';
import readline from 'node:readline';
import url from 'node:url';
import open from 'open';
import type { Config } from '../config/config.js';
import { Storage } from '../config/storage.js';
import { AuthType } from '../core/contentGenerator.js';
import { FatalAuthenticationError, getErrorMessage } from '../utils/errors.js';
import { UserAccountManager } from '../utils/userAccountManager.js';
import { OAuthCredentialStorage } from './oauth-credential-storage.js';
import { FORCE_ENCRYPTED_FILE_ENV_VAR } from '../mcp/token-storage/index.js';
const userAccountManager = new UserAccountManager();
// OAuth Client ID used to initiate OAuth2Client class.
const OAUTH_CLIENT_ID =
'681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com';
// OAuth Secret value used to initiate OAuth2Client class.
// Note: It's ok to save this in git because this is an installed application
// as described here: https://developers.google.com/identity/protocols/oauth2#installed
// "The process results in a client ID and, in some cases, a client secret,
// which you embed in the source code of your application. (In this context,
// the client secret is obviously not treated as a secret.)"
const OAUTH_CLIENT_SECRET = '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';
// OAuth Scopes for Cloud Code authorization.
const OAUTH_SCOPE = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
];
const HTTP_REDIRECT = 301;
const SIGN_IN_SUCCESS_URL =
'https://developers.google.com/gemini-code-assist/auth_success_gemini';
const SIGN_IN_FAILURE_URL =
'https://developers.google.com/gemini-code-assist/auth_failure_gemini';
/**
* An Authentication URL for updating the credentials of a Oauth2Client
* as well as a promise that will resolve when the credentials have
* been refreshed (or which throws error when refreshing credentials failed).
*/
export interface OauthWebLogin {
authUrl: string;
loginCompletePromise: Promise<void>;
}
const oauthClientPromises = new Map<AuthType, Promise<OAuth2Client>>();
function getUseEncryptedStorageFlag() {
return process.env[FORCE_ENCRYPTED_FILE_ENV_VAR] === 'true';
}
async function initOauthClient(
authType: AuthType,
config: Config,
): Promise<OAuth2Client> {
const client = new OAuth2Client({
clientId: OAUTH_CLIENT_ID,
clientSecret: OAUTH_CLIENT_SECRET,
transporterOptions: {
proxy: config.getProxy(),
},
});
const useEncryptedStorage = getUseEncryptedStorageFlag();
if (
process.env['GOOGLE_GENAI_USE_GCA'] &&
process.env['GOOGLE_CLOUD_ACCESS_TOKEN']
) {
client.setCredentials({
access_token: process.env['GOOGLE_CLOUD_ACCESS_TOKEN'],
});
await fetchAndCacheUserInfo(client);
return client;
}
client.on('tokens', async (tokens: Credentials) => {
if (useEncryptedStorage) {
await OAuthCredentialStorage.saveCredentials(tokens);
} else {
await cacheCredentials(tokens);
}
});
// If there are cached creds on disk, they always take precedence
if (await loadCachedCredentials(client)) {
// Found valid cached credentials.
// Check if we need to retrieve Google Account ID or Email
if (!userAccountManager.getCachedGoogleAccount()) {
try {
await fetchAndCacheUserInfo(client);
} catch (error) {
// Non-fatal, continue with existing auth.
console.warn('Failed to fetch user info:', getErrorMessage(error));
}
}
console.log('Loaded cached credentials.');
return client;
}
// In Google Cloud Shell, we can use Application Default Credentials (ADC)
// provided via its metadata server to authenticate non-interactively using
// the identity of the user logged into Cloud Shell.
if (authType === AuthType.CLOUD_SHELL) {
try {
console.log("Attempting to authenticate via Cloud Shell VM's ADC.");
const computeClient = new Compute({
// We can leave this empty, since the metadata server will provide
// the service account email.
});
await computeClient.getAccessToken();
console.log('Authentication successful.');
// Do not cache creds in this case; note that Compute client will handle its own refresh
return computeClient;
} catch (e) {
throw new Error(
`Could not authenticate using Cloud Shell credentials. Please select a different authentication method or ensure you are in a properly configured environment. Error: ${getErrorMessage(
e,
)}`,
);
}
}
if (config.isBrowserLaunchSuppressed()) {
let success = false;
const maxRetries = 2;
for (let i = 0; !success && i < maxRetries; i++) {
success = await authWithUserCode(client);
if (!success) {
console.error(
'\nFailed to authenticate with user code.',
i === maxRetries - 1 ? '' : 'Retrying...\n',
);
}
}
if (!success) {
throw new FatalAuthenticationError(
'Failed to authenticate with user code.',
);
}
} else {
const webLogin = await authWithWeb(client);
console.log(
`\n\nCode Assist login required.\n` +
`Attempting to open authentication page in your browser.\n` +
`Otherwise navigate to:\n\n${webLogin.authUrl}\n\n`,
);
try {
// Attempt to open the authentication URL in the default browser.
// We do not use the `wait` option here because the main script's execution
// is already paused by `loginCompletePromise`, which awaits the server callback.
const childProcess = await open(webLogin.authUrl);
// IMPORTANT: Attach an error handler to the returned child process.
// Without this, if `open` fails to spawn a process (e.g., `xdg-open` is not found
// in a minimal Docker container), it will emit an unhandled 'error' event,
// causing the entire Node.js process to crash.
childProcess.on('error', (error) => {
console.error(
'Failed to open browser automatically. Please try running again with NO_BROWSER=true set.',
);
console.error('Browser error details:', getErrorMessage(error));
});
} catch (err) {
console.error(
'An unexpected error occurred while trying to open the browser:',
getErrorMessage(err),
'\nThis might be due to browser compatibility issues or system configuration.',
'\nPlease try running again with NO_BROWSER=true set for manual authentication.',
);
throw new FatalAuthenticationError(
`Failed to open browser: ${getErrorMessage(err)}`,
);
}
console.log('Waiting for authentication...');
// Add timeout to prevent infinite waiting when browser tab gets stuck
const authTimeout = 5 * 60 * 1000; // 5 minutes timeout
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => {
reject(
new FatalAuthenticationError(
'Authentication timed out after 5 minutes. The browser tab may have gotten stuck in a loading state. ' +
'Please try again or use NO_BROWSER=true for manual authentication.',
),
);
}, authTimeout);
});
await Promise.race([webLogin.loginCompletePromise, timeoutPromise]);
}
return client;
}
export async function getOauthClient(
authType: AuthType,
config: Config,
): Promise<OAuth2Client> {
if (!oauthClientPromises.has(authType)) {
oauthClientPromises.set(authType, initOauthClient(authType, config));
}
return oauthClientPromises.get(authType)!;
}
async function authWithUserCode(client: OAuth2Client): Promise<boolean> {
const redirectUri = 'https://codeassist.google.com/authcode';
const codeVerifier = await client.generateCodeVerifierAsync();
const state = crypto.randomBytes(32).toString('hex');
const authUrl: string = client.generateAuthUrl({
redirect_uri: redirectUri,
access_type: 'offline',
scope: OAUTH_SCOPE,
code_challenge_method: CodeChallengeMethod.S256,
code_challenge: codeVerifier.codeChallenge,
state,
});
console.log('Please visit the following URL to authorize the application:');
console.log('');
console.log(authUrl);
console.log('');
const code = await new Promise<string>((resolve) => {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
rl.question('Enter the authorization code: ', (code) => {
rl.close();
resolve(code.trim());
});
});
if (!code) {
console.error('Authorization code is required.');
return false;
}
try {
const { tokens } = await client.getToken({
code,
codeVerifier: codeVerifier.codeVerifier,
redirect_uri: redirectUri,
});
client.setCredentials(tokens);
} catch (error) {
console.error(
'Failed to authenticate with authorization code:',
getErrorMessage(error),
);
return false;
}
return true;
}
async function authWithWeb(client: OAuth2Client): Promise<OauthWebLogin> {
const port = await getAvailablePort();
// The hostname used for the HTTP server binding (e.g., '0.0.0.0' in Docker).
const host = process.env['OAUTH_CALLBACK_HOST'] || 'localhost';
// The `redirectUri` sent to Google's authorization server MUST use a loopback IP literal
// (i.e., 'localhost' or '127.0.0.1'). This is a strict security policy for credentials of
// type 'Desktop app' or 'Web application' (when using loopback flow) to mitigate
// authorization code interception attacks.
const redirectUri = `http://localhost:${port}/oauth2callback`;
const state = crypto.randomBytes(32).toString('hex');
const authUrl = client.generateAuthUrl({
redirect_uri: redirectUri,
access_type: 'offline',
scope: OAUTH_SCOPE,
state,
});
const loginCompletePromise = new Promise<void>((resolve, reject) => {
const server = http.createServer(async (req, res) => {
try {
if (req.url!.indexOf('/oauth2callback') === -1) {
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
res.end();
reject(
new FatalAuthenticationError(
'OAuth callback not received. Unexpected request: ' + req.url,
),
);
}
// acquire the code from the querystring, and close the web server.
const qs = new url.URL(req.url!, 'http://localhost:3000').searchParams;
if (qs.get('error')) {
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
res.end();
const errorCode = qs.get('error');
const errorDescription =
qs.get('error_description') || 'No additional details provided';
reject(
new FatalAuthenticationError(
`Google OAuth error: ${errorCode}. ${errorDescription}`,
),
);
} else if (qs.get('state') !== state) {
res.end('State mismatch. Possible CSRF attack');
reject(
new FatalAuthenticationError(
'OAuth state mismatch. Possible CSRF attack or browser session issue.',
),
);
} else if (qs.get('code')) {
try {
const { tokens } = await client.getToken({
code: qs.get('code')!,
redirect_uri: redirectUri,
});
client.setCredentials(tokens);
// Retrieve and cache Google Account ID during authentication
try {
await fetchAndCacheUserInfo(client);
} catch (error) {
console.warn(
'Failed to retrieve Google Account ID during authentication:',
getErrorMessage(error),
);
// Don't fail the auth flow if Google Account ID retrieval fails
}
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_SUCCESS_URL });
res.end();
resolve();
} catch (error) {
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
res.end();
reject(
new FatalAuthenticationError(
`Failed to exchange authorization code for tokens: ${getErrorMessage(error)}`,
),
);
}
} else {
reject(
new FatalAuthenticationError(
'No authorization code received from Google OAuth. Please try authenticating again.',
),
);
}
} catch (e) {
// Provide more specific error message for unexpected errors during OAuth flow
if (e instanceof FatalAuthenticationError) {
reject(e);
} else {
reject(
new FatalAuthenticationError(
`Unexpected error during OAuth authentication: ${getErrorMessage(e)}`,
),
);
}
} finally {
server.close();
}
});
server.listen(port, host, () => {
// Server started successfully
});
server.on('error', (err) => {
reject(
new FatalAuthenticationError(
`OAuth callback server error: ${getErrorMessage(err)}`,
),
);
});
});
return {
authUrl,
loginCompletePromise,
};
}
export function getAvailablePort(): Promise<number> {
return new Promise((resolve, reject) => {
let port = 0;
try {
const portStr = process.env['OAUTH_CALLBACK_PORT'];
if (portStr) {
port = parseInt(portStr, 10);
if (isNaN(port) || port <= 0 || port > 65535) {
return reject(
new Error(`Invalid value for OAUTH_CALLBACK_PORT: "${portStr}"`),
);
}
return resolve(port);
}
const server = net.createServer();
server.listen(0, () => {
const address = server.address()! as net.AddressInfo;
port = address.port;
});
server.on('listening', () => {
server.close();
server.unref();
});
server.on('error', (e) => reject(e));
server.on('close', () => resolve(port));
} catch (e) {
reject(e);
}
});
}
async function loadCachedCredentials(client: OAuth2Client): Promise<boolean> {
const useEncryptedStorage = getUseEncryptedStorageFlag();
if (useEncryptedStorage) {
const credentials = await OAuthCredentialStorage.loadCredentials();
if (credentials) {
client.setCredentials(credentials);
return true;
}
return false;
}
const pathsToTry = [
Storage.getOAuthCredsPath(),
process.env['GOOGLE_APPLICATION_CREDENTIALS'],
].filter((p): p is string => !!p);
for (const keyFile of pathsToTry) {
try {
const creds = await fs.readFile(keyFile, 'utf-8');
client.setCredentials(JSON.parse(creds));
// This will verify locally that the credentials look good.
const { token } = await client.getAccessToken();
if (!token) {
continue;
}
// This will check with the server to see if it hasn't been revoked.
await client.getTokenInfo(token);
return true;
} catch (error) {
// Log specific error for debugging, but continue trying other paths
console.debug(
`Failed to load credentials from ${keyFile}:`,
getErrorMessage(error),
);
}
}
return false;
}
async function cacheCredentials(credentials: Credentials) {
const filePath = Storage.getOAuthCredsPath();
await fs.mkdir(path.dirname(filePath), { recursive: true });
const credString = JSON.stringify(credentials, null, 2);
await fs.writeFile(filePath, credString, { mode: 0o600 });
try {
await fs.chmod(filePath, 0o600);
} catch {
/* empty */
}
}
export function clearOauthClientCache() {
oauthClientPromises.clear();
}
export async function clearCachedCredentialFile() {
try {
const useEncryptedStorage = getUseEncryptedStorageFlag();
if (useEncryptedStorage) {
await OAuthCredentialStorage.clearCredentials();
} else {
await fs.rm(Storage.getOAuthCredsPath(), { force: true });
}
// Clear the Google Account ID cache when credentials are cleared
await userAccountManager.clearCachedGoogleAccount();
// Clear the in-memory OAuth client cache to force re-authentication
clearOauthClientCache();
/**
* Also clear Qwen SharedTokenManager cache and credentials file to prevent stale credentials
* when switching between auth types
* TODO: We do not depend on code_assist, we'll have to build an independent auth-cleaning procedure.
*/
try {
const { SharedTokenManager } = await import(
'../qwen/sharedTokenManager.js'
);
const { clearQwenCredentials } = await import('../qwen/qwenOAuth2.js');
const sharedManager = SharedTokenManager.getInstance();
sharedManager.clearCache();
await clearQwenCredentials();
} catch (qwenError) {
console.debug('Could not clear Qwen credentials:', qwenError);
}
} catch (e) {
console.error('Failed to clear cached credentials:', e);
}
}
async function fetchAndCacheUserInfo(client: OAuth2Client): Promise<void> {
try {
const { token } = await client.getAccessToken();
if (!token) {
return;
}
const response = await fetch(
'https://www.googleapis.com/oauth2/v2/userinfo',
{
headers: {
Authorization: `Bearer ${token}`,
},
},
);
if (!response.ok) {
console.error(
'Failed to fetch user info:',
response.status,
response.statusText,
);
return;
}
const userInfo = await response.json();
await userAccountManager.cacheGoogleAccount(userInfo.email);
} catch (error) {
console.error('Error retrieving user info:', error);
}
}
// Helper to ensure test isolation
export function resetOauthClientForTesting() {
oauthClientPromises.clear();
}

View File

@@ -1,255 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { beforeEach, describe, it, expect, vi } from 'vitest';
import { CodeAssistServer } from './server.js';
import { OAuth2Client } from 'google-auth-library';
import { UserTierId } from './types.js';
vi.mock('google-auth-library');
describe('CodeAssistServer', () => {
beforeEach(() => {
vi.resetAllMocks();
});
it('should be able to be constructed', () => {
const auth = new OAuth2Client();
const server = new CodeAssistServer(
auth,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
expect(server).toBeInstanceOf(CodeAssistServer);
});
it('should call the generateContent endpoint', async () => {
const client = new OAuth2Client();
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
const mockResponse = {
response: {
candidates: [
{
index: 0,
content: {
role: 'model',
parts: [{ text: 'response' }],
},
finishReason: 'STOP',
safetyRatings: [],
},
],
},
};
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
const response = await server.generateContent(
{
model: 'test-model',
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
},
'user-prompt-id',
);
expect(server.requestPost).toHaveBeenCalledWith(
'generateContent',
expect.any(Object),
undefined,
);
expect(response.candidates?.[0]?.content?.parts?.[0]?.text).toBe(
'response',
);
});
it('should call the generateContentStream endpoint', async () => {
const client = new OAuth2Client();
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
const mockResponse = (async function* () {
yield {
response: {
candidates: [
{
index: 0,
content: {
role: 'model',
parts: [{ text: 'response' }],
},
finishReason: 'STOP',
safetyRatings: [],
},
],
},
};
})();
vi.spyOn(server, 'requestStreamingPost').mockResolvedValue(mockResponse);
const stream = await server.generateContentStream(
{
model: 'test-model',
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
},
'user-prompt-id',
);
for await (const res of stream) {
expect(server.requestStreamingPost).toHaveBeenCalledWith(
'streamGenerateContent',
expect.any(Object),
undefined,
);
expect(res.candidates?.[0]?.content?.parts?.[0]?.text).toBe('response');
}
});
it('should call the onboardUser endpoint', async () => {
const client = new OAuth2Client();
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
const mockResponse = {
name: 'operations/123',
done: true,
};
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
const response = await server.onboardUser({
tierId: 'test-tier',
cloudaicompanionProject: 'test-project',
metadata: {},
});
expect(server.requestPost).toHaveBeenCalledWith(
'onboardUser',
expect.any(Object),
);
expect(response.name).toBe('operations/123');
});
it('should call the loadCodeAssist endpoint', async () => {
const client = new OAuth2Client();
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
const mockResponse = {
currentTier: {
id: UserTierId.FREE,
name: 'Free',
description: 'free tier',
},
allowedTiers: [],
ineligibleTiers: [],
cloudaicompanionProject: 'projects/test',
};
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
const response = await server.loadCodeAssist({
metadata: {},
});
expect(server.requestPost).toHaveBeenCalledWith(
'loadCodeAssist',
expect.any(Object),
);
expect(response).toEqual(mockResponse);
});
it('should return 0 for countTokens', async () => {
const client = new OAuth2Client();
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
const mockResponse = {
totalTokens: 100,
};
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
const response = await server.countTokens({
model: 'test-model',
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
});
expect(response.totalTokens).toBe(100);
});
it('should throw an error for embedContent', async () => {
const client = new OAuth2Client();
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
await expect(
server.embedContent({
model: 'test-model',
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
}),
).rejects.toThrow();
});
it('should handle VPC-SC errors when calling loadCodeAssist', async () => {
const client = new OAuth2Client();
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
const mockVpcScError = {
response: {
data: {
error: {
details: [
{
reason: 'SECURITY_POLICY_VIOLATED',
},
],
},
},
},
};
vi.spyOn(server, 'requestPost').mockRejectedValue(mockVpcScError);
const response = await server.loadCodeAssist({
metadata: {},
});
expect(server.requestPost).toHaveBeenCalledWith(
'loadCodeAssist',
expect.any(Object),
);
expect(response).toEqual({
currentTier: { id: UserTierId.STANDARD },
});
});
});

View File

@@ -1,253 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { OAuth2Client } from 'google-auth-library';
import type {
CodeAssistGlobalUserSettingResponse,
GoogleRpcResponse,
LoadCodeAssistRequest,
LoadCodeAssistResponse,
LongRunningOperationResponse,
OnboardUserRequest,
SetCodeAssistGlobalUserSettingRequest,
} from './types.js';
import type {
CountTokensParameters,
CountTokensResponse,
EmbedContentParameters,
EmbedContentResponse,
GenerateContentParameters,
GenerateContentResponse,
} from '@google/genai';
import * as readline from 'node:readline';
import type { ContentGenerator } from '../core/contentGenerator.js';
import { UserTierId } from './types.js';
import type {
CaCountTokenResponse,
CaGenerateContentResponse,
} from './converter.js';
import {
fromCountTokenResponse,
fromGenerateContentResponse,
toCountTokenRequest,
toGenerateContentRequest,
} from './converter.js';
/** HTTP options to be used in each of the requests. */
export interface HttpOptions {
/** Additional HTTP headers to be sent with the request. */
headers?: Record<string, string>;
}
export const CODE_ASSIST_ENDPOINT = 'https://localhost:0'; // Disable Google Code Assist API Request
export const CODE_ASSIST_API_VERSION = 'v1internal';
export class CodeAssistServer implements ContentGenerator {
constructor(
readonly client: OAuth2Client,
readonly projectId?: string,
readonly httpOptions: HttpOptions = {},
readonly sessionId?: string,
readonly userTier?: UserTierId,
) {}
async generateContentStream(
req: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const resps = await this.requestStreamingPost<CaGenerateContentResponse>(
'streamGenerateContent',
toGenerateContentRequest(
req,
userPromptId,
this.projectId,
this.sessionId,
),
req.config?.abortSignal,
);
return (async function* (): AsyncGenerator<GenerateContentResponse> {
for await (const resp of resps) {
yield fromGenerateContentResponse(resp);
}
})();
}
async generateContent(
req: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> {
const resp = await this.requestPost<CaGenerateContentResponse>(
'generateContent',
toGenerateContentRequest(
req,
userPromptId,
this.projectId,
this.sessionId,
),
req.config?.abortSignal,
);
return fromGenerateContentResponse(resp);
}
async onboardUser(
req: OnboardUserRequest,
): Promise<LongRunningOperationResponse> {
return await this.requestPost<LongRunningOperationResponse>(
'onboardUser',
req,
);
}
async loadCodeAssist(
req: LoadCodeAssistRequest,
): Promise<LoadCodeAssistResponse> {
try {
return await this.requestPost<LoadCodeAssistResponse>(
'loadCodeAssist',
req,
);
} catch (e) {
if (isVpcScAffectedUser(e)) {
return {
currentTier: { id: UserTierId.STANDARD },
};
} else {
throw e;
}
}
}
async getCodeAssistGlobalUserSetting(): Promise<CodeAssistGlobalUserSettingResponse> {
return await this.requestGet<CodeAssistGlobalUserSettingResponse>(
'getCodeAssistGlobalUserSetting',
);
}
async setCodeAssistGlobalUserSetting(
req: SetCodeAssistGlobalUserSettingRequest,
): Promise<CodeAssistGlobalUserSettingResponse> {
return await this.requestPost<CodeAssistGlobalUserSettingResponse>(
'setCodeAssistGlobalUserSetting',
req,
);
}
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
const resp = await this.requestPost<CaCountTokenResponse>(
'countTokens',
toCountTokenRequest(req),
);
return fromCountTokenResponse(resp);
}
async embedContent(
_req: EmbedContentParameters,
): Promise<EmbedContentResponse> {
throw Error();
}
async requestPost<T>(
method: string,
req: object,
signal?: AbortSignal,
): Promise<T> {
const res = await this.client.request({
url: this.getMethodUrl(method),
method: 'POST',
headers: {
'Content-Type': 'application/json',
...this.httpOptions.headers,
},
responseType: 'json',
body: JSON.stringify(req),
signal,
});
return res.data as T;
}
async requestGet<T>(method: string, signal?: AbortSignal): Promise<T> {
const res = await this.client.request({
url: this.getMethodUrl(method),
method: 'GET',
headers: {
'Content-Type': 'application/json',
...this.httpOptions.headers,
},
responseType: 'json',
signal,
});
return res.data as T;
}
async requestStreamingPost<T>(
method: string,
req: object,
signal?: AbortSignal,
): Promise<AsyncGenerator<T>> {
const res = await this.client.request({
url: this.getMethodUrl(method),
method: 'POST',
params: {
alt: 'sse',
},
headers: {
'Content-Type': 'application/json',
...this.httpOptions.headers,
},
responseType: 'stream',
body: JSON.stringify(req),
signal,
});
return (async function* (): AsyncGenerator<T> {
const rl = readline.createInterface({
input: res.data as NodeJS.ReadableStream,
crlfDelay: Infinity, // Recognizes '\r\n' and '\n' as line breaks
});
let bufferedLines: string[] = [];
for await (const line of rl) {
// blank lines are used to separate JSON objects in the stream
if (line === '') {
if (bufferedLines.length === 0) {
continue; // no data to yield
}
yield JSON.parse(bufferedLines.join('\n')) as T;
bufferedLines = []; // Reset the buffer after yielding
} else if (line.startsWith('data: ')) {
bufferedLines.push(line.slice(6).trim());
} else {
throw new Error(`Unexpected line format in response: ${line}`);
}
}
})();
}
getMethodUrl(method: string): string {
const endpoint =
process.env['CODE_ASSIST_ENDPOINT'] ?? CODE_ASSIST_ENDPOINT;
return `${endpoint}/${CODE_ASSIST_API_VERSION}:${method}`;
}
}
function isVpcScAffectedUser(error: unknown): boolean {
if (error && typeof error === 'object' && 'response' in error) {
const gaxiosError = error as {
response?: {
data?: unknown;
};
};
const response = gaxiosError.response?.data as
| GoogleRpcResponse
| undefined;
if (Array.isArray(response?.error?.details)) {
return response.error.details.some(
(detail) => detail.reason === 'SECURITY_POLICY_VIOLATED',
);
}
}
return false;
}

View File

@@ -1,224 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { setupUser, ProjectIdRequiredError } from './setup.js';
import { CodeAssistServer } from '../code_assist/server.js';
import type { OAuth2Client } from 'google-auth-library';
import type { GeminiUserTier } from './types.js';
import { UserTierId } from './types.js';
vi.mock('../code_assist/server.js');
const mockPaidTier: GeminiUserTier = {
id: UserTierId.STANDARD,
name: 'paid',
description: 'Paid tier',
isDefault: true,
};
const mockFreeTier: GeminiUserTier = {
id: UserTierId.FREE,
name: 'free',
description: 'Free tier',
isDefault: true,
};
describe('setupUser for existing user', () => {
let mockLoad: ReturnType<typeof vi.fn>;
let mockOnboardUser: ReturnType<typeof vi.fn>;
beforeEach(() => {
vi.resetAllMocks();
mockLoad = vi.fn();
mockOnboardUser = vi.fn().mockResolvedValue({
done: true,
response: {
cloudaicompanionProject: {
id: 'server-project',
},
},
});
vi.mocked(CodeAssistServer).mockImplementation(
() =>
({
loadCodeAssist: mockLoad,
onboardUser: mockOnboardUser,
}) as unknown as CodeAssistServer,
);
});
afterEach(() => {
vi.unstubAllEnvs();
});
it('should use GOOGLE_CLOUD_PROJECT when set and project from server is undefined', async () => {
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
mockLoad.mockResolvedValue({
currentTier: mockPaidTier,
});
await setupUser({} as OAuth2Client);
expect(CodeAssistServer).toHaveBeenCalledWith(
{},
'test-project',
{},
'',
undefined,
);
});
it('should ignore GOOGLE_CLOUD_PROJECT when project from server is set', async () => {
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
mockLoad.mockResolvedValue({
cloudaicompanionProject: 'server-project',
currentTier: mockPaidTier,
});
const projectId = await setupUser({} as OAuth2Client);
expect(CodeAssistServer).toHaveBeenCalledWith(
{},
'test-project',
{},
'',
undefined,
);
expect(projectId).toEqual({
projectId: 'server-project',
userTier: 'standard-tier',
});
});
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
// And the server itself requires a project ID internally
vi.mocked(CodeAssistServer).mockImplementation(() => {
throw new ProjectIdRequiredError();
});
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
ProjectIdRequiredError,
);
});
});
describe('setupUser for new user', () => {
let mockLoad: ReturnType<typeof vi.fn>;
let mockOnboardUser: ReturnType<typeof vi.fn>;
beforeEach(() => {
vi.resetAllMocks();
mockLoad = vi.fn();
mockOnboardUser = vi.fn().mockResolvedValue({
done: true,
response: {
cloudaicompanionProject: {
id: 'server-project',
},
},
});
vi.mocked(CodeAssistServer).mockImplementation(
() =>
({
loadCodeAssist: mockLoad,
onboardUser: mockOnboardUser,
}) as unknown as CodeAssistServer,
);
});
afterEach(() => {
vi.unstubAllEnvs();
});
it('should use GOOGLE_CLOUD_PROJECT when set and onboard a new paid user', async () => {
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
mockLoad.mockResolvedValue({
allowedTiers: [mockPaidTier],
});
const userData = await setupUser({} as OAuth2Client);
expect(CodeAssistServer).toHaveBeenCalledWith(
{},
'test-project',
{},
'',
undefined,
);
expect(mockLoad).toHaveBeenCalled();
expect(mockOnboardUser).toHaveBeenCalledWith({
tierId: 'standard-tier',
cloudaicompanionProject: 'test-project',
metadata: {
ideType: 'IDE_UNSPECIFIED',
platform: 'PLATFORM_UNSPECIFIED',
pluginType: 'GEMINI',
duetProject: 'test-project',
},
});
expect(userData).toEqual({
projectId: 'server-project',
userTier: 'standard-tier',
});
});
it('should onboard a new free user when GOOGLE_CLOUD_PROJECT is not set', async () => {
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
mockLoad.mockResolvedValue({
allowedTiers: [mockFreeTier],
});
const userData = await setupUser({} as OAuth2Client);
expect(CodeAssistServer).toHaveBeenCalledWith(
{},
undefined,
{},
'',
undefined,
);
expect(mockLoad).toHaveBeenCalled();
expect(mockOnboardUser).toHaveBeenCalledWith({
tierId: 'free-tier',
cloudaicompanionProject: undefined,
metadata: {
ideType: 'IDE_UNSPECIFIED',
platform: 'PLATFORM_UNSPECIFIED',
pluginType: 'GEMINI',
},
});
expect(userData).toEqual({
projectId: 'server-project',
userTier: 'free-tier',
});
});
it('should use GOOGLE_CLOUD_PROJECT when onboard response has no project ID', async () => {
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
mockLoad.mockResolvedValue({
allowedTiers: [mockPaidTier],
});
mockOnboardUser.mockResolvedValue({
done: true,
response: {
cloudaicompanionProject: undefined,
},
});
const userData = await setupUser({} as OAuth2Client);
expect(userData).toEqual({
projectId: 'test-project',
userTier: 'standard-tier',
});
});
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
mockLoad.mockResolvedValue({
allowedTiers: [mockPaidTier],
});
mockOnboardUser.mockResolvedValue({
done: true,
response: {},
});
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
ProjectIdRequiredError,
);
});
});

View File

@@ -1,124 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type {
ClientMetadata,
GeminiUserTier,
LoadCodeAssistResponse,
OnboardUserRequest,
} from './types.js';
import { UserTierId } from './types.js';
import { CodeAssistServer } from './server.js';
import type { OAuth2Client } from 'google-auth-library';
export class ProjectIdRequiredError extends Error {
constructor() {
super(
'This account requires setting the GOOGLE_CLOUD_PROJECT env var. See https://goo.gle/gemini-cli-auth-docs#workspace-gca',
);
}
}
export interface UserData {
projectId: string;
userTier: UserTierId;
}
/**
*
* @param projectId the user's project id, if any
* @returns the user's actual project id
*/
export async function setupUser(client: OAuth2Client): Promise<UserData> {
const projectId = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
const caServer = new CodeAssistServer(client, projectId, {}, '', undefined);
const coreClientMetadata: ClientMetadata = {
ideType: 'IDE_UNSPECIFIED',
platform: 'PLATFORM_UNSPECIFIED',
pluginType: 'GEMINI',
};
const loadRes = await caServer.loadCodeAssist({
cloudaicompanionProject: projectId,
metadata: {
...coreClientMetadata,
duetProject: projectId,
},
});
if (loadRes.currentTier) {
if (!loadRes.cloudaicompanionProject) {
if (projectId) {
return {
projectId,
userTier: loadRes.currentTier.id,
};
}
throw new ProjectIdRequiredError();
}
return {
projectId: loadRes.cloudaicompanionProject,
userTier: loadRes.currentTier.id,
};
}
const tier = getOnboardTier(loadRes);
let onboardReq: OnboardUserRequest;
if (tier.id === UserTierId.FREE) {
// The free tier uses a managed google cloud project. Setting a project in the `onboardUser` request causes a `Precondition Failed` error.
onboardReq = {
tierId: tier.id,
cloudaicompanionProject: undefined,
metadata: coreClientMetadata,
};
} else {
onboardReq = {
tierId: tier.id,
cloudaicompanionProject: projectId,
metadata: {
...coreClientMetadata,
duetProject: projectId,
},
};
}
// Poll onboardUser until long running operation is complete.
let lroRes = await caServer.onboardUser(onboardReq);
while (!lroRes.done) {
await new Promise((f) => setTimeout(f, 5000));
lroRes = await caServer.onboardUser(onboardReq);
}
if (!lroRes.response?.cloudaicompanionProject?.id) {
if (projectId) {
return {
projectId,
userTier: tier.id,
};
}
throw new ProjectIdRequiredError();
}
return {
projectId: lroRes.response.cloudaicompanionProject.id,
userTier: tier.id,
};
}
function getOnboardTier(res: LoadCodeAssistResponse): GeminiUserTier {
for (const tier of res.allowedTiers || []) {
if (tier.isDefault) {
return tier;
}
}
return {
name: '',
description: '',
id: UserTierId.LEGACY,
userDefinedCloudaicompanionProject: true,
};
}

View File

@@ -1,201 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export interface ClientMetadata {
ideType?: ClientMetadataIdeType;
ideVersion?: string;
pluginVersion?: string;
platform?: ClientMetadataPlatform;
updateChannel?: string;
duetProject?: string;
pluginType?: ClientMetadataPluginType;
ideName?: string;
}
export type ClientMetadataIdeType =
| 'IDE_UNSPECIFIED'
| 'VSCODE'
| 'INTELLIJ'
| 'VSCODE_CLOUD_WORKSTATION'
| 'INTELLIJ_CLOUD_WORKSTATION'
| 'CLOUD_SHELL';
export type ClientMetadataPlatform =
| 'PLATFORM_UNSPECIFIED'
| 'DARWIN_AMD64'
| 'DARWIN_ARM64'
| 'LINUX_AMD64'
| 'LINUX_ARM64'
| 'WINDOWS_AMD64';
export type ClientMetadataPluginType =
| 'PLUGIN_UNSPECIFIED'
| 'CLOUD_CODE'
| 'GEMINI'
| 'AIPLUGIN_INTELLIJ'
| 'AIPLUGIN_STUDIO';
export interface LoadCodeAssistRequest {
cloudaicompanionProject?: string;
metadata: ClientMetadata;
}
/**
* Represents LoadCodeAssistResponse proto json field
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=224
*/
export interface LoadCodeAssistResponse {
currentTier?: GeminiUserTier | null;
allowedTiers?: GeminiUserTier[] | null;
ineligibleTiers?: IneligibleTier[] | null;
cloudaicompanionProject?: string | null;
}
/**
* GeminiUserTier reflects the structure received from the CodeAssist when calling LoadCodeAssist.
*/
export interface GeminiUserTier {
id: UserTierId;
name?: string;
description?: string;
// This value is used to declare whether a given tier requires the user to configure the project setting on the IDE settings or not.
userDefinedCloudaicompanionProject?: boolean | null;
isDefault?: boolean;
privacyNotice?: PrivacyNotice;
hasAcceptedTos?: boolean;
hasOnboardedPreviously?: boolean;
}
/**
* Includes information specifying the reasons for a user's ineligibility for a specific tier.
* @param reasonCode mnemonic code representing the reason for in-eligibility.
* @param reasonMessage message to display to the user.
* @param tierId id of the tier.
* @param tierName name of the tier.
*/
export interface IneligibleTier {
reasonCode: IneligibleTierReasonCode;
reasonMessage: string;
tierId: UserTierId;
tierName: string;
}
/**
* List of predefined reason codes when a tier is blocked from a specific tier.
* https://source.corp.google.com/piper///depot/google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=378
*/
export enum IneligibleTierReasonCode {
// go/keep-sorted start
DASHER_USER = 'DASHER_USER',
INELIGIBLE_ACCOUNT = 'INELIGIBLE_ACCOUNT',
NON_USER_ACCOUNT = 'NON_USER_ACCOUNT',
RESTRICTED_AGE = 'RESTRICTED_AGE',
RESTRICTED_NETWORK = 'RESTRICTED_NETWORK',
UNKNOWN = 'UNKNOWN',
UNKNOWN_LOCATION = 'UNKNOWN_LOCATION',
UNSUPPORTED_LOCATION = 'UNSUPPORTED_LOCATION',
// go/keep-sorted end
}
/**
* UserTierId represents IDs returned from the Cloud Code Private API representing a user's tier
*
* //depot/google3/cloud/developer_experience/cloudcode/pa/service/usertier.go;l=16
*/
export enum UserTierId {
FREE = 'free-tier',
LEGACY = 'legacy-tier',
STANDARD = 'standard-tier',
}
/**
* PrivacyNotice reflects the structure received from the CodeAssist in regards to a tier
* privacy notice.
*/
export interface PrivacyNotice {
showNotice: boolean;
noticeText?: string;
}
/**
* Proto signature of OnboardUserRequest as payload to OnboardUser call
*/
export interface OnboardUserRequest {
tierId: string | undefined;
cloudaicompanionProject: string | undefined;
metadata: ClientMetadata | undefined;
}
/**
* Represents LongRunningOperation proto
* http://google3/google/longrunning/operations.proto;rcl=698857719;l=107
*/
export interface LongRunningOperationResponse {
name: string;
done?: boolean;
response?: OnboardUserResponse;
}
/**
* Represents OnboardUserResponse proto
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=215
*/
export interface OnboardUserResponse {
// tslint:disable-next-line:enforce-name-casing This is the name of the field in the proto.
cloudaicompanionProject?: {
id: string;
name: string;
};
}
/**
* Status code of user license status
* it does not strictly correspond to the proto
* Error value is an additional value assigned to error responses from OnboardUser
*/
export enum OnboardUserStatusCode {
Default = 'DEFAULT',
Notice = 'NOTICE',
Warning = 'WARNING',
Error = 'ERROR',
}
/**
* Status of user onboarded to gemini
*/
export interface OnboardUserStatus {
statusCode: OnboardUserStatusCode;
displayMessage: string;
helpLink: HelpLinkUrl | undefined;
}
export interface HelpLinkUrl {
description: string;
url: string;
}
export interface SetCodeAssistGlobalUserSettingRequest {
cloudaicompanionProject?: string;
freeTierDataCollectionOptin: boolean;
}
export interface CodeAssistGlobalUserSettingResponse {
cloudaicompanionProject?: string;
freeTierDataCollectionOptin: boolean;
}
/**
* Relevant fields that can be returned from a Google RPC response
*/
export interface GoogleRpcResponse {
error?: {
details?: GoogleRpcErrorInfo[];
};
}
/**
* Relevant fields that can be returned in the details of an error returned from GoogleRPCs
*/
interface GoogleRpcErrorInfo {
reason?: string;
}

View File

@@ -16,7 +16,6 @@ import {
QwenLogger,
} from '../telemetry/index.js';
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import {
AuthType,
createContentGeneratorConfig,
@@ -273,7 +272,7 @@ describe('Server Config (config.ts)', () => {
authType,
{
model: MODEL,
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
baseUrl: undefined,
},
);
// Verify that contentGeneratorConfig is updated
@@ -283,23 +282,6 @@ describe('Server Config (config.ts)', () => {
expect(config.isInFallbackMode()).toBe(false);
});
it('should strip thoughts when switching from GenAI to Vertex', async () => {
const config = new Config(baseParams);
vi.mocked(createContentGeneratorConfig).mockImplementation(
(_: Config, authType: AuthType | undefined) =>
({ authType }) as unknown as ContentGeneratorConfig,
);
await config.refreshAuth(AuthType.USE_GEMINI);
await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE);
expect(
config.getGeminiClient().stripThoughtsFromHistory,
).toHaveBeenCalledWith();
});
it('should not strip thoughts when switching from Vertex to GenAI', async () => {
const config = new Config(baseParams);

View File

@@ -16,6 +16,7 @@ import { ProxyAgent, setGlobalDispatcher } from 'undici';
import type {
ContentGenerator,
ContentGeneratorConfig,
AuthType,
} from '../core/contentGenerator.js';
import type { FallbackModelHandler } from '../fallback/types.js';
import type { MCPOAuthConfig } from '../mcp/oauth-provider.js';
@@ -26,7 +27,6 @@ import type { AnyToolInvocation } from '../tools/tools.js';
import { BaseLlmClient } from '../core/baseLlmClient.js';
import { GeminiClient } from '../core/client.js';
import {
AuthType,
createContentGenerator,
createContentGeneratorConfig,
} from '../core/contentGenerator.js';
@@ -54,6 +54,7 @@ import { canUseRipgrep } from '../utils/ripgrepUtils.js';
import { RipGrepTool } from '../tools/ripGrep.js';
import { ShellTool } from '../tools/shell.js';
import { SmartEditTool } from '../tools/smart-edit.js';
import { SkillTool } from '../tools/skill.js';
import { TaskTool } from '../tools/task.js';
import { TodoWriteTool } from '../tools/todoWrite.js';
import { ToolRegistry } from '../tools/tool-registry.js';
@@ -65,6 +66,7 @@ import { WriteFileTool } from '../tools/write-file.js';
import { ideContextStore } from '../ide/ideContext.js';
import { InputFormat, OutputFormat } from '../output/types.js';
import { PromptRegistry } from '../prompts/prompt-registry.js';
import { SkillManager } from '../skills/skill-manager.js';
import { SubagentManager } from '../subagents/subagent-manager.js';
import type { SubagentConfig } from '../subagents/types.js';
import {
@@ -94,7 +96,6 @@ import {
} from './constants.js';
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
import { Storage } from './storage.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import { ChatRecordingService } from '../services/chatRecordingService.js';
import {
SessionService,
@@ -305,6 +306,7 @@ export interface ConfigParameters {
extensionContextFilePaths?: string[];
maxSessionTurns?: number;
sessionTokenLimit?: number;
experimentalSkills?: boolean;
experimentalZedIntegration?: boolean;
listExtensions?: boolean;
extensions?: GeminiCLIExtension[];
@@ -389,6 +391,7 @@ export class Config {
private toolRegistry!: ToolRegistry;
private promptRegistry!: PromptRegistry;
private subagentManager!: SubagentManager;
private skillManager!: SkillManager;
private fileSystemService: FileSystemService;
private contentGeneratorConfig!: ContentGeneratorConfig;
private contentGenerator!: ContentGenerator;
@@ -458,6 +461,7 @@ export class Config {
| undefined;
private readonly cliVersion?: string;
private readonly experimentalZedIntegration: boolean = false;
private readonly experimentalSkills: boolean = false;
private readonly chatRecordingEnabled: boolean;
private readonly loadMemoryFromIncludeDirectories: boolean = false;
private readonly webSearch?: {
@@ -557,6 +561,7 @@ export class Config {
this.sessionTokenLimit = params.sessionTokenLimit ?? -1;
this.experimentalZedIntegration =
params.experimentalZedIntegration ?? false;
this.experimentalSkills = params.experimentalSkills ?? false;
this.listExtensions = params.listExtensions ?? false;
this._extensions = params.extensions ?? [];
this._blockedMcpServers = params.blockedMcpServers ?? [];
@@ -568,7 +573,7 @@ export class Config {
this._generationConfig = {
model: params.model,
...(params.generationConfig || {}),
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
baseUrl: params.generationConfig?.baseUrl,
};
this.contentGeneratorConfig = this
._generationConfig as ContentGeneratorConfig;
@@ -644,6 +649,7 @@ export class Config {
}
this.promptRegistry = new PromptRegistry();
this.subagentManager = new SubagentManager(this);
this.skillManager = new SkillManager(this);
// Load session subagents if they were provided before initialization
if (this.sessionSubagents.length > 0) {
@@ -684,16 +690,6 @@ export class Config {
}
async refreshAuth(authMethod: AuthType, isInitialAuth?: boolean) {
// Vertex and Genai have incompatible encryption and sending history with
// throughtSignature from Genai to Vertex will fail, we need to strip them
if (
this.contentGeneratorConfig?.authType === AuthType.USE_GEMINI &&
authMethod === AuthType.LOGIN_WITH_GOOGLE
) {
// Restore the conversation history to the new client
this.geminiClient.stripThoughtsFromHistory();
}
const newContentGeneratorConfig = createContentGeneratorConfig(
this,
authMethod,
@@ -1076,6 +1072,10 @@ export class Config {
return this.experimentalZedIntegration;
}
getExperimentalSkills(): boolean {
return this.experimentalSkills;
}
getListExtensions(): boolean {
return this.listExtensions;
}
@@ -1306,6 +1306,10 @@ export class Config {
return this.subagentManager;
}
getSkillManager(): SkillManager {
return this.skillManager;
}
async createToolRegistry(
sendSdkMcpMessage?: SendSdkMcpMessage,
): Promise<ToolRegistry> {
@@ -1348,6 +1352,9 @@ export class Config {
};
registerCoreTool(TaskTool, this);
if (this.getExperimentalSkills()) {
registerCoreTool(SkillTool, this);
}
registerCoreTool(LSTool, this);
registerCoreTool(ReadFileTool, this);

View File

@@ -31,7 +31,7 @@ describe('Flash Model Fallback Configuration', () => {
config as unknown as { contentGeneratorConfig: unknown }
).contentGeneratorConfig = {
model: DEFAULT_GEMINI_MODEL,
authType: 'oauth-personal',
authType: 'gemini-api-key',
};
});

View File

@@ -126,6 +126,10 @@ export class Storage {
return path.join(this.getExtensionsDir(), 'qwen-extension.json');
}
getUserSkillsDir(): string {
return path.join(Storage.getGlobalQwenDir(), 'skills');
}
getHistoryFilePath(): string {
return path.join(this.getProjectTempDir(), 'shell_history');
}

View File

@@ -73,6 +73,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
}),
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
buildRequest: vi.fn().mockImplementation((req) => req),
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
};
// Create generator instance
@@ -299,6 +300,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
}),
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
buildRequest: vi.fn().mockImplementation((req) => req),
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
};
new OpenAIContentGenerator(
@@ -333,6 +335,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
}),
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
buildRequest: vi.fn().mockImplementation((req) => req),
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
};
new OpenAIContentGenerator(

View File

@@ -0,0 +1,500 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import type {
CountTokensParameters,
GenerateContentParameters,
} from '@google/genai';
import { FinishReason, GenerateContentResponse } from '@google/genai';
// Mock the request tokenizer module BEFORE importing the class that uses it.
const mockTokenizer = {
calculateTokens: vi.fn(),
dispose: vi.fn(),
};
vi.mock('../../utils/request-tokenizer/index.js', () => ({
getDefaultTokenizer: vi.fn(() => mockTokenizer),
DefaultRequestTokenizer: vi.fn(() => mockTokenizer),
disposeDefaultTokenizer: vi.fn(),
}));
type AnthropicCreateArgs = [unknown, { signal?: AbortSignal }?];
const anthropicMockState: {
constructorOptions?: Record<string, unknown>;
lastCreateArgs?: AnthropicCreateArgs;
createImpl: ReturnType<typeof vi.fn>;
} = {
constructorOptions: undefined,
lastCreateArgs: undefined,
createImpl: vi.fn(),
};
vi.mock('@anthropic-ai/sdk', () => {
class AnthropicMock {
messages: { create: (...args: AnthropicCreateArgs) => unknown };
constructor(options: Record<string, unknown>) {
anthropicMockState.constructorOptions = options;
this.messages = {
create: (...args: AnthropicCreateArgs) => {
anthropicMockState.lastCreateArgs = args;
return anthropicMockState.createImpl(...args);
},
};
}
}
return {
default: AnthropicMock,
__anthropicState: anthropicMockState,
};
});
// Now import the modules that depend on the mocked modules.
import type { Config } from '../../config/config.js';
const importGenerator = async (): Promise<{
AnthropicContentGenerator: typeof import('./anthropicContentGenerator.js').AnthropicContentGenerator;
}> => import('./anthropicContentGenerator.js');
const importConverter = async (): Promise<{
AnthropicContentConverter: typeof import('./converter.js').AnthropicContentConverter;
}> => import('./converter.js');
describe('AnthropicContentGenerator', () => {
let mockConfig: Config;
let anthropicState: {
constructorOptions?: Record<string, unknown>;
lastCreateArgs?: AnthropicCreateArgs;
createImpl: ReturnType<typeof vi.fn>;
};
beforeEach(async () => {
vi.clearAllMocks();
vi.resetModules();
mockTokenizer.calculateTokens.mockResolvedValue({
totalTokens: 50,
breakdown: {
textTokens: 50,
imageTokens: 0,
audioTokens: 0,
otherTokens: 0,
},
processingTime: 1,
});
anthropicState = anthropicMockState;
anthropicState.createImpl.mockReset();
anthropicState.lastCreateArgs = undefined;
anthropicState.constructorOptions = undefined;
mockConfig = {
getCliVersion: vi.fn().mockReturnValue('1.2.3'),
} as unknown as Config;
});
afterEach(() => {
vi.restoreAllMocks();
});
it('passes a QwenCode User-Agent header to the Anthropic SDK', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['User-Agent']).toContain('QwenCode/1.2.3');
expect(headers['User-Agent']).toContain(
`(${process.platform}; ${process.arch})`,
);
});
it('adds the effort beta header when reasoning.effort is set', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
reasoning: { effort: 'medium' },
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).toContain('effort-2025-11-24');
});
it('does not add the effort beta header when reasoning.effort is not set', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).not.toContain('effort-2025-11-24');
});
it('omits the anthropic beta header when reasoning is disabled', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
reasoning: false,
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).toBeUndefined();
});
describe('generateContent', () => {
it('builds request with config sampling params (config overrides request) and thinking budget', async () => {
const { AnthropicContentConverter } = await importConverter();
const { AnthropicContentGenerator } = await importGenerator();
const convertResponseSpy = vi
.spyOn(
AnthropicContentConverter.prototype,
'convertAnthropicResponseToGemini',
)
.mockReturnValue(
(() => {
const r = new GenerateContentResponse();
r.responseId = 'gemini-1';
return r;
})(),
);
anthropicState.createImpl.mockResolvedValue({
id: 'anthropic-1',
model: 'claude-test',
content: [{ type: 'text', text: 'hi' }],
});
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
top_k: 20,
},
schemaCompliance: 'auto',
reasoning: { effort: 'high', budget_tokens: 1000 },
},
mockConfig,
);
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'models/ignored',
contents: 'Hello',
config: {
temperature: 0.1,
maxOutputTokens: 200,
topP: 0.5,
topK: 5,
abortSignal: abortController.signal,
},
};
const result = await generator.generateContent(request);
expect(result.responseId).toBe('gemini-1');
expect(anthropicState.lastCreateArgs).toBeDefined();
const [anthropicRequest, options] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(options?.signal).toBe(abortController.signal);
expect(anthropicRequest).toEqual(
expect.objectContaining({
model: 'claude-test',
max_tokens: 1000,
temperature: 0.7,
top_p: 0.9,
top_k: 20,
thinking: { type: 'enabled', budget_tokens: 1000 },
output_config: { effort: 'high' },
}),
);
expect(convertResponseSpy).toHaveBeenCalledTimes(1);
});
it('omits thinking when request.config.thinkingConfig.includeThoughts is false', async () => {
const { AnthropicContentGenerator } = await importGenerator();
anthropicState.createImpl.mockResolvedValue({
id: 'anthropic-1',
model: 'claude-test',
content: [{ type: 'text', text: 'hi' }],
});
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: { max_tokens: 500 },
schemaCompliance: 'auto',
reasoning: { effort: 'high' },
},
mockConfig,
);
await generator.generateContent({
model: 'models/ignored',
contents: 'Hello',
config: { thinkingConfig: { includeThoughts: false } },
} as unknown as GenerateContentParameters);
const [anthropicRequest] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(anthropicRequest).toEqual(
expect.not.objectContaining({ thinking: expect.anything() }),
);
});
});
describe('countTokens', () => {
it('counts tokens using the request tokenizer', async () => {
const { AnthropicContentGenerator } = await importGenerator();
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const request: CountTokensParameters = {
contents: [{ role: 'user', parts: [{ text: 'Hello world' }] }],
model: 'claude-test',
};
const result = await generator.countTokens(request);
expect(mockTokenizer.calculateTokens).toHaveBeenCalledWith(request, {
textEncoding: 'cl100k_base',
});
expect(result.totalTokens).toBe(50);
});
it('falls back to character approximation when tokenizer throws', async () => {
const { AnthropicContentGenerator } = await importGenerator();
mockTokenizer.calculateTokens.mockRejectedValueOnce(new Error('boom'));
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const request: CountTokensParameters = {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
model: 'claude-test',
};
const content = JSON.stringify(request.contents);
const expected = Math.ceil(content.length / 4);
const result = await generator.countTokens(request);
expect(result.totalTokens).toBe(expected);
});
});
describe('generateContentStream', () => {
it('requests stream=true and converts streamed events into Gemini chunks', async () => {
const { AnthropicContentGenerator } = await importGenerator();
anthropicState.createImpl.mockResolvedValue(
(async function* () {
yield {
type: 'message_start',
message: {
id: 'msg-1',
model: 'claude-test',
usage: { cache_read_input_tokens: 2, input_tokens: 3 },
},
};
yield {
type: 'content_block_start',
index: 0,
content_block: { type: 'text' },
};
yield {
type: 'content_block_delta',
index: 0,
delta: { type: 'text_delta', text: 'Hello' },
};
yield { type: 'content_block_stop', index: 0 };
yield {
type: 'content_block_start',
index: 1,
content_block: { type: 'thinking', signature: '' },
};
yield {
type: 'content_block_delta',
index: 1,
delta: { type: 'thinking_delta', thinking: 'Think' },
};
yield {
type: 'content_block_delta',
index: 1,
delta: { type: 'signature_delta', signature: 'abc' },
};
yield { type: 'content_block_stop', index: 1 };
yield {
type: 'content_block_start',
index: 2,
content_block: {
type: 'tool_use',
id: 't1',
name: 'tool',
input: {},
},
};
yield {
type: 'content_block_delta',
index: 2,
delta: { type: 'input_json_delta', partial_json: '{"x":' },
};
yield {
type: 'content_block_delta',
index: 2,
delta: { type: 'input_json_delta', partial_json: '1}' },
};
yield { type: 'content_block_stop', index: 2 };
yield {
type: 'message_delta',
delta: { stop_reason: 'end_turn' },
usage: {
output_tokens: 5,
input_tokens: 7,
cache_read_input_tokens: 2,
},
};
yield { type: 'message_stop' };
})(),
);
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: { max_tokens: 123 },
schemaCompliance: 'auto',
},
mockConfig,
);
const stream = await generator.generateContentStream({
model: 'models/ignored',
contents: 'Hello',
} as unknown as GenerateContentParameters);
const chunks: GenerateContentResponse[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
const [anthropicRequest] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(anthropicRequest).toEqual(
expect.objectContaining({ stream: true }),
);
// Text chunk.
expect(chunks[0]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
text: 'Hello',
});
// Thinking chunk.
expect(chunks[1]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
text: 'Think',
thought: true,
});
// Signature chunk.
expect(chunks[2]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
thought: true,
thoughtSignature: 'abc',
});
// Tool call chunk.
expect(chunks[3]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
functionCall: { id: 't1', name: 'tool', args: { x: 1 } },
});
// Usage/finish chunks exist; check the last one.
const last = chunks[chunks.length - 1]!;
expect(last.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
expect(last.usageMetadata).toEqual({
cachedContentTokenCount: 2,
promptTokenCount: 9, // cached(2) + input(7)
candidatesTokenCount: 5,
totalTokenCount: 14,
});
});
});
});

View File

@@ -0,0 +1,502 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import Anthropic from '@anthropic-ai/sdk';
import type {
CountTokensParameters,
CountTokensResponse,
EmbedContentParameters,
EmbedContentResponse,
GenerateContentParameters,
GenerateContentResponseUsageMetadata,
Part,
} from '@google/genai';
import { GenerateContentResponse } from '@google/genai';
import type { Config } from '../../config/config.js';
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../contentGenerator.js';
type Message = Anthropic.Message;
type MessageCreateParamsNonStreaming =
Anthropic.MessageCreateParamsNonStreaming;
type MessageCreateParamsStreaming = Anthropic.MessageCreateParamsStreaming;
type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import { AnthropicContentConverter } from './converter.js';
type StreamingBlockState = {
type: string;
id?: string;
name?: string;
inputJson: string;
signature: string;
};
type MessageCreateParamsWithThinking = MessageCreateParamsNonStreaming & {
thinking?: { type: 'enabled'; budget_tokens: number };
// Anthropic beta feature: output_config.effort (requires beta header effort-2025-11-24)
// This is not yet represented in the official SDK types we depend on.
output_config?: { effort: 'low' | 'medium' | 'high' };
};
export class AnthropicContentGenerator implements ContentGenerator {
private client: Anthropic;
private converter: AnthropicContentConverter;
constructor(
private contentGeneratorConfig: ContentGeneratorConfig,
private readonly cliConfig: Config,
) {
const defaultHeaders = this.buildHeaders();
const baseURL = contentGeneratorConfig.baseUrl;
this.client = new Anthropic({
apiKey: contentGeneratorConfig.apiKey,
baseURL,
timeout: contentGeneratorConfig.timeout,
maxRetries: contentGeneratorConfig.maxRetries,
defaultHeaders,
});
this.converter = new AnthropicContentConverter(
contentGeneratorConfig.model,
contentGeneratorConfig.schemaCompliance,
);
}
async generateContent(
request: GenerateContentParameters,
): Promise<GenerateContentResponse> {
const anthropicRequest = await this.buildRequest(request);
const response = (await this.client.messages.create(anthropicRequest, {
signal: request.config?.abortSignal,
})) as Message;
return this.converter.convertAnthropicResponseToGemini(response);
}
async generateContentStream(
request: GenerateContentParameters,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const anthropicRequest = await this.buildRequest(request);
const streamingRequest: MessageCreateParamsStreaming & {
thinking?: { type: 'enabled'; budget_tokens: number };
} = {
...anthropicRequest,
stream: true,
};
const stream = (await this.client.messages.create(
streamingRequest as MessageCreateParamsStreaming,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<RawMessageStreamEvent>;
return this.processStream(stream);
}
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
try {
const tokenizer = getDefaultTokenizer();
const result = await tokenizer.calculateTokens(request, {
textEncoding: 'cl100k_base',
});
return {
totalTokens: result.totalTokens,
};
} catch (error) {
console.warn(
'Failed to calculate tokens with tokenizer, ' +
'falling back to simple method:',
error,
);
const content = JSON.stringify(request.contents);
const totalTokens = Math.ceil(content.length / 4);
return {
totalTokens,
};
}
}
async embedContent(
_request: EmbedContentParameters,
): Promise<EmbedContentResponse> {
throw new Error('Anthropic does not support embeddings.');
}
useSummarizedThinking(): boolean {
return false;
}
private buildHeaders(): Record<string, string> {
const version = this.cliConfig.getCliVersion() || 'unknown';
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
const betas: string[] = [];
const reasoning = this.contentGeneratorConfig.reasoning;
// Interleaved thinking is used when we send the `thinking` field.
if (reasoning !== false) {
betas.push('interleaved-thinking-2025-05-14');
}
// Effort (beta) is enabled when reasoning.effort is set.
if (reasoning !== false && reasoning?.effort !== undefined) {
betas.push('effort-2025-11-24');
}
const headers: Record<string, string> = {
'User-Agent': userAgent,
};
if (betas.length) {
headers['anthropic-beta'] = betas.join(',');
}
return headers;
}
private async buildRequest(
request: GenerateContentParameters,
): Promise<MessageCreateParamsWithThinking> {
const { system, messages } =
this.converter.convertGeminiRequestToAnthropic(request);
const tools = request.config?.tools
? await this.converter.convertGeminiToolsToAnthropic(request.config.tools)
: undefined;
const sampling = this.buildSamplingParameters(request);
const thinking = this.buildThinkingConfig(request);
const outputConfig = this.buildOutputConfig();
return {
model: this.contentGeneratorConfig.model,
system,
messages,
tools,
...sampling,
...(thinking ? { thinking } : {}),
...(outputConfig ? { output_config: outputConfig } : {}),
};
}
private buildSamplingParameters(request: GenerateContentParameters): {
max_tokens: number;
temperature?: number;
top_p?: number;
top_k?: number;
} {
const configSamplingParams = this.contentGeneratorConfig.samplingParams;
const requestConfig = request.config || {};
const getParam = <T>(
configKey: keyof NonNullable<typeof configSamplingParams>,
requestKey?: keyof NonNullable<typeof requestConfig>,
): T | undefined => {
const configValue = configSamplingParams?.[configKey] as T | undefined;
const requestValue = requestKey
? (requestConfig[requestKey] as T | undefined)
: undefined;
return configValue !== undefined ? configValue : requestValue;
};
const maxTokens =
getParam<number>('max_tokens', 'maxOutputTokens') ?? 10_000;
return {
max_tokens: maxTokens,
temperature: getParam<number>('temperature', 'temperature') ?? 1,
top_p: getParam<number>('top_p', 'topP'),
top_k: getParam<number>('top_k', 'topK'),
};
}
private buildThinkingConfig(
request: GenerateContentParameters,
): { type: 'enabled'; budget_tokens: number } | undefined {
if (request.config?.thinkingConfig?.includeThoughts === false) {
return undefined;
}
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false) {
return undefined;
}
if (reasoning?.budget_tokens !== undefined) {
return {
type: 'enabled',
budget_tokens: reasoning.budget_tokens,
};
}
const effort = reasoning?.effort;
// When using interleaved thinking with tools, this budget token limit is the entire context window(200k tokens).
const budgetTokens =
effort === 'low' ? 16_000 : effort === 'high' ? 64_000 : 32_000;
return {
type: 'enabled',
budget_tokens: budgetTokens,
};
}
private buildOutputConfig():
| { effort: 'low' | 'medium' | 'high' }
| undefined {
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false || reasoning === undefined) {
return undefined;
}
if (reasoning.effort === undefined) {
return undefined;
}
return { effort: reasoning.effort };
}
private async *processStream(
stream: AsyncIterable<RawMessageStreamEvent>,
): AsyncGenerator<GenerateContentResponse> {
let messageId: string | undefined;
let model = this.contentGeneratorConfig.model;
let cachedTokens = 0;
let promptTokens = 0;
let completionTokens = 0;
let finishReason: string | undefined;
const blocks = new Map<number, StreamingBlockState>();
const collectedResponses: GenerateContentResponse[] = [];
for await (const event of stream) {
switch (event.type) {
case 'message_start': {
messageId = event.message.id ?? messageId;
model = event.message.model ?? model;
cachedTokens =
event.message.usage?.cache_read_input_tokens ?? cachedTokens;
promptTokens = event.message.usage?.input_tokens ?? promptTokens;
break;
}
case 'content_block_start': {
const index = event.index ?? 0;
const type = String(event.content_block.type || 'text');
const initialInput =
type === 'tool_use' && 'input' in event.content_block
? JSON.stringify(event.content_block.input)
: '';
blocks.set(index, {
type,
id:
'id' in event.content_block ? event.content_block.id : undefined,
name:
'name' in event.content_block
? event.content_block.name
: undefined,
inputJson: initialInput !== '{}' ? initialInput : '',
signature:
type === 'thinking' &&
'signature' in event.content_block &&
typeof event.content_block.signature === 'string'
? event.content_block.signature
: '',
});
break;
}
case 'content_block_delta': {
const index = event.index ?? 0;
const deltaType = (event.delta as { type?: string }).type || '';
const blockState = blocks.get(index);
if (deltaType === 'text_delta') {
const text = 'text' in event.delta ? event.delta.text : '';
if (text) {
const chunk = this.buildGeminiChunk({ text }, messageId, model);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'thinking_delta') {
const thinking =
(event.delta as { thinking?: string }).thinking || '';
if (thinking) {
const chunk = this.buildGeminiChunk(
{ text: thinking, thought: true },
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'signature_delta' && blockState) {
const signature =
(event.delta as { signature?: string }).signature || '';
if (signature) {
blockState.signature += signature;
const chunk = this.buildGeminiChunk(
{ thought: true, thoughtSignature: signature },
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'input_json_delta' && blockState) {
const jsonDelta =
(event.delta as { partial_json?: string }).partial_json || '';
if (jsonDelta) {
blockState.inputJson += jsonDelta;
}
}
break;
}
case 'content_block_stop': {
const index = event.index ?? 0;
const blockState = blocks.get(index);
if (blockState?.type === 'tool_use') {
const args = safeJsonParse(blockState.inputJson || '{}', {});
const chunk = this.buildGeminiChunk(
{
functionCall: {
id: blockState.id,
name: blockState.name,
args,
},
},
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
blocks.delete(index);
break;
}
case 'message_delta': {
const stopReasonValue = event.delta.stop_reason;
if (stopReasonValue) {
finishReason = stopReasonValue;
}
// Some Anthropic-compatible providers may include additional usage fields
// (e.g. `input_tokens`, `cache_read_input_tokens`) even though the official
// Anthropic SDK types only expose `output_tokens` here.
const usageUnknown = event.usage as unknown;
const usageRecord =
usageUnknown && typeof usageUnknown === 'object'
? (usageUnknown as Record<string, unknown>)
: undefined;
if (event.usage?.output_tokens !== undefined) {
completionTokens = event.usage.output_tokens;
}
if (usageRecord?.['input_tokens'] !== undefined) {
const inputTokens = usageRecord['input_tokens'];
if (typeof inputTokens === 'number') {
promptTokens = inputTokens;
}
}
if (usageRecord?.['cache_read_input_tokens'] !== undefined) {
const cacheRead = usageRecord['cache_read_input_tokens'];
if (typeof cacheRead === 'number') {
cachedTokens = cacheRead;
}
}
if (finishReason || event.usage) {
const chunk = this.buildGeminiChunk(
undefined,
messageId,
model,
finishReason,
{
cachedContentTokenCount: cachedTokens,
promptTokenCount: cachedTokens + promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: cachedTokens + promptTokens + completionTokens,
},
);
collectedResponses.push(chunk);
yield chunk;
}
break;
}
case 'message_stop': {
if (promptTokens || completionTokens) {
const chunk = this.buildGeminiChunk(
undefined,
messageId,
model,
finishReason,
{
cachedContentTokenCount: cachedTokens,
promptTokenCount: cachedTokens + promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: cachedTokens + promptTokens + completionTokens,
},
);
collectedResponses.push(chunk);
yield chunk;
}
break;
}
default:
break;
}
}
}
private buildGeminiChunk(
part?: {
text?: string;
thought?: boolean;
thoughtSignature?: string;
functionCall?: unknown;
},
responseId?: string,
model?: string,
finishReason?: string,
usageMetadata?: GenerateContentResponseUsageMetadata,
): GenerateContentResponse {
const response = new GenerateContentResponse();
response.responseId = responseId;
response.createTime = Date.now().toString();
response.modelVersion = model || this.contentGeneratorConfig.model;
response.promptFeedback = { safetyRatings: [] };
const candidateParts = part ? [part as unknown as Part] : [];
const mappedFinishReason =
finishReason !== undefined
? this.converter.mapAnthropicFinishReasonToGemini(finishReason)
: undefined;
response.candidates = [
{
content: {
parts: candidateParts,
role: 'model' as const,
},
index: 0,
safetyRatings: [],
...(mappedFinishReason ? { finishReason: mappedFinishReason } : {}),
},
];
if (usageMetadata) {
response.usageMetadata = usageMetadata;
}
return response;
}
}

View File

@@ -0,0 +1,377 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { beforeEach, describe, expect, it, vi } from 'vitest';
import type { CallableTool, Content, Tool } from '@google/genai';
import { FinishReason } from '@google/genai';
import type Anthropic from '@anthropic-ai/sdk';
// Mock schema conversion so we can force edge-cases (e.g. missing `type`).
vi.mock('../../utils/schemaConverter.js', () => ({
convertSchema: vi.fn((schema: unknown) => schema),
}));
import { convertSchema } from '../../utils/schemaConverter.js';
import { AnthropicContentConverter } from './converter.js';
describe('AnthropicContentConverter', () => {
let converter: AnthropicContentConverter;
beforeEach(() => {
vi.clearAllMocks();
converter = new AnthropicContentConverter('test-model', 'auto');
});
describe('convertGeminiRequestToAnthropic', () => {
it('extracts systemInstruction text from string', () => {
const { system } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'hi',
config: { systemInstruction: 'sys' },
});
expect(system).toBe('sys');
});
it('extracts systemInstruction text from parts and joins with newlines', () => {
const { system } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'hi',
config: {
systemInstruction: {
role: 'system',
parts: [{ text: 'a' }, { text: 'b' }],
} as unknown as Content,
},
});
expect(system).toBe('a\nb');
});
it('converts a plain string content into a user message', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'Hello',
});
expect(messages).toEqual([
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
]);
});
it('converts user content parts into a user message with text blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [{ text: 'Hello' }, { text: 'World' }],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'text', text: 'World' },
],
},
]);
});
it('converts assistant thought parts into Anthropic thinking blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{ text: 'internal', thought: true, thoughtSignature: 'sig' },
{ text: 'visible' },
],
},
],
});
expect(messages).toEqual([
{
role: 'assistant',
content: [
{ type: 'thinking', thinking: 'internal', signature: 'sig' },
{ type: 'text', text: 'visible' },
],
},
]);
});
it('converts functionCall parts from model role into tool_use blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{ text: 'preface' },
{
functionCall: {
id: 'call-1',
name: 'tool_name',
args: { a: 1 },
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'assistant',
content: [
{ type: 'text', text: 'preface' },
{
type: 'tool_use',
id: 'call-1',
name: 'tool_name',
input: { a: 1 },
},
],
},
]);
});
it('converts functionResponse parts into user tool_result messages', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'tool_name',
response: { output: 'ok' },
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: 'ok',
},
],
},
]);
});
it('extracts function response error field when present', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'tool_name',
response: { error: 'boom' },
},
},
],
},
],
});
expect(messages[0]).toEqual({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: 'boom',
},
],
});
});
});
describe('convertGeminiToolsToAnthropic', () => {
it('converts Tool.functionDeclarations to Anthropic tools and runs schema conversion', async () => {
const tools = [
{
functionDeclarations: [
{
name: 'get_weather',
description: 'Get weather',
parametersJsonSchema: {
type: 'object',
properties: { location: { type: 'string' } },
required: ['location'],
},
},
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
name: 'get_weather',
description: 'Get weather',
input_schema: {
type: 'object',
properties: { location: { type: 'string' } },
required: ['location'],
},
});
expect(vi.mocked(convertSchema)).toHaveBeenCalledTimes(1);
});
it('resolves CallableTool.tool() and converts its functionDeclarations', async () => {
const callable = [
{
tool: async () =>
({
functionDeclarations: [
{
name: 'dynamic_tool',
description: 'resolved tool',
parametersJsonSchema: { type: 'object', properties: {} },
},
],
}) as unknown as Tool,
},
] as CallableTool[];
const result = await converter.convertGeminiToolsToAnthropic(callable);
expect(result).toHaveLength(1);
expect(result[0].name).toBe('dynamic_tool');
});
it('defaults missing parameters to an empty object schema', async () => {
const tools = [
{
functionDeclarations: [
{ name: 'no_params', description: 'no params' },
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
name: 'no_params',
description: 'no params',
input_schema: { type: 'object', properties: {} },
});
});
it('forces input_schema.type to "object" when schema conversion yields no type', async () => {
vi.mocked(convertSchema).mockImplementationOnce(() => ({
properties: {},
}));
const tools = [
{
functionDeclarations: [
{
name: 'edge',
description: 'edge',
parametersJsonSchema: { type: 'object', properties: {} },
},
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result[0]?.input_schema?.type).toBe('object');
});
});
describe('convertAnthropicResponseToGemini', () => {
it('converts text, tool_use, thinking, and redacted_thinking blocks', () => {
const response = converter.convertAnthropicResponseToGemini({
id: 'msg-1',
model: 'claude-test',
stop_reason: 'end_turn',
content: [
{ type: 'thinking', thinking: 'thought', signature: 'sig' },
{ type: 'text', text: 'hello' },
{ type: 'tool_use', id: 't1', name: 'tool', input: { x: 1 } },
{ type: 'redacted_thinking' },
],
usage: { input_tokens: 3, output_tokens: 5 },
} as unknown as Anthropic.Message);
expect(response.responseId).toBe('msg-1');
expect(response.modelVersion).toBe('claude-test');
expect(response.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
expect(response.usageMetadata).toEqual({
promptTokenCount: 3,
candidatesTokenCount: 5,
totalTokenCount: 8,
});
const parts = response.candidates?.[0]?.content?.parts || [];
expect(parts).toEqual([
{ text: 'thought', thought: true, thoughtSignature: 'sig' },
{ text: 'hello' },
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
{ text: '', thought: true },
]);
});
it('handles tool_use input that is a JSON string', () => {
const response = converter.convertAnthropicResponseToGemini({
id: 'msg-1',
model: 'claude-test',
stop_reason: null,
content: [
{ type: 'tool_use', id: 't1', name: 'tool', input: '{"x":1}' },
],
} as unknown as Anthropic.Message);
const parts = response.candidates?.[0]?.content?.parts || [];
expect(parts).toEqual([
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
]);
});
});
describe('mapAnthropicFinishReasonToGemini', () => {
it('maps known reasons', () => {
expect(converter.mapAnthropicFinishReasonToGemini('end_turn')).toBe(
FinishReason.STOP,
);
expect(converter.mapAnthropicFinishReasonToGemini('max_tokens')).toBe(
FinishReason.MAX_TOKENS,
);
expect(converter.mapAnthropicFinishReasonToGemini('content_filter')).toBe(
FinishReason.SAFETY,
);
});
it('returns undefined for null/empty', () => {
expect(converter.mapAnthropicFinishReasonToGemini(null)).toBeUndefined();
expect(converter.mapAnthropicFinishReasonToGemini('')).toBeUndefined();
});
});
});

View File

@@ -0,0 +1,448 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type {
Candidate,
CallableTool,
Content,
ContentListUnion,
ContentUnion,
FunctionCall,
FunctionResponse,
GenerateContentParameters,
Part,
PartUnion,
Tool,
ToolListUnion,
} from '@google/genai';
import { FinishReason, GenerateContentResponse } from '@google/genai';
import type Anthropic from '@anthropic-ai/sdk';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import {
convertSchema,
type SchemaComplianceMode,
} from '../../utils/schemaConverter.js';
type AnthropicMessageParam = Anthropic.MessageParam;
type AnthropicToolParam = Anthropic.Tool;
type AnthropicContentBlockParam = Anthropic.ContentBlockParam;
type ThoughtPart = { text: string; signature?: string };
interface ParsedParts {
thoughtParts: ThoughtPart[];
contentParts: string[];
functionCalls: FunctionCall[];
functionResponses: FunctionResponse[];
}
export class AnthropicContentConverter {
private model: string;
private schemaCompliance: SchemaComplianceMode;
constructor(model: string, schemaCompliance: SchemaComplianceMode = 'auto') {
this.model = model;
this.schemaCompliance = schemaCompliance;
}
convertGeminiRequestToAnthropic(request: GenerateContentParameters): {
system?: string;
messages: AnthropicMessageParam[];
} {
const messages: AnthropicMessageParam[] = [];
const system = this.extractTextFromContentUnion(
request.config?.systemInstruction,
);
this.processContents(request.contents, messages);
return {
system: system || undefined,
messages,
};
}
async convertGeminiToolsToAnthropic(
geminiTools: ToolListUnion,
): Promise<AnthropicToolParam[]> {
const tools: AnthropicToolParam[] = [];
for (const tool of geminiTools) {
let actualTool: Tool;
if ('tool' in tool) {
actualTool = await (tool as CallableTool).tool();
} else {
actualTool = tool as Tool;
}
if (!actualTool.functionDeclarations) {
continue;
}
for (const func of actualTool.functionDeclarations) {
if (!func.name) continue;
let inputSchema: Record<string, unknown> | undefined;
if (func.parametersJsonSchema) {
inputSchema = {
...(func.parametersJsonSchema as Record<string, unknown>),
};
} else if (func.parameters) {
inputSchema = func.parameters as Record<string, unknown>;
}
if (!inputSchema) {
inputSchema = { type: 'object', properties: {} };
}
inputSchema = convertSchema(inputSchema, this.schemaCompliance);
if (typeof inputSchema['type'] !== 'string') {
inputSchema['type'] = 'object';
}
tools.push({
name: func.name,
description: func.description,
input_schema: inputSchema as Anthropic.Tool.InputSchema,
});
}
}
return tools;
}
convertAnthropicResponseToGemini(
response: Anthropic.Message,
): GenerateContentResponse {
const geminiResponse = new GenerateContentResponse();
const parts: Part[] = [];
for (const block of response.content || []) {
const blockType = String((block as { type?: string })['type'] || '');
if (blockType === 'text') {
const text =
typeof (block as { text?: string }).text === 'string'
? (block as { text?: string }).text
: '';
if (text) {
parts.push({ text });
}
} else if (blockType === 'tool_use') {
const toolUse = block as {
id?: string;
name?: string;
input?: unknown;
};
parts.push({
functionCall: {
id: typeof toolUse.id === 'string' ? toolUse.id : undefined,
name: typeof toolUse.name === 'string' ? toolUse.name : undefined,
args: this.safeInputToArgs(toolUse.input),
},
});
} else if (blockType === 'thinking') {
const thinking =
typeof (block as { thinking?: string }).thinking === 'string'
? (block as { thinking?: string }).thinking
: '';
const signature =
typeof (block as { signature?: string }).signature === 'string'
? (block as { signature?: string }).signature
: '';
if (thinking || signature) {
const thoughtPart: Part = {
text: thinking,
thought: true,
thoughtSignature: signature,
};
parts.push(thoughtPart);
}
} else if (blockType === 'redacted_thinking') {
parts.push({ text: '', thought: true });
}
}
const candidate: Candidate = {
content: {
parts,
role: 'model' as const,
},
index: 0,
safetyRatings: [],
};
const finishReason = this.mapAnthropicFinishReasonToGemini(
response.stop_reason,
);
if (finishReason) {
candidate.finishReason = finishReason;
}
geminiResponse.candidates = [candidate];
geminiResponse.responseId = response.id;
geminiResponse.createTime = Date.now().toString();
geminiResponse.modelVersion = response.model || this.model;
geminiResponse.promptFeedback = { safetyRatings: [] };
if (response.usage) {
const promptTokens = response.usage.input_tokens || 0;
const completionTokens = response.usage.output_tokens || 0;
geminiResponse.usageMetadata = {
promptTokenCount: promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: promptTokens + completionTokens,
};
}
return geminiResponse;
}
private processContents(
contents: ContentListUnion,
messages: AnthropicMessageParam[],
): void {
if (Array.isArray(contents)) {
for (const content of contents) {
this.processContent(content, messages);
}
} else if (contents) {
this.processContent(contents, messages);
}
}
private processContent(
content: ContentUnion | PartUnion,
messages: AnthropicMessageParam[],
): void {
if (typeof content === 'string') {
messages.push({
role: 'user',
content: [{ type: 'text', text: content }],
});
return;
}
if (!this.isContentObject(content)) return;
const parsed = this.parseParts(content.parts || []);
if (parsed.functionResponses.length > 0) {
for (const response of parsed.functionResponses) {
messages.push({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: response.id || '',
content: this.extractFunctionResponseContent(response.response),
},
],
});
}
return;
}
if (content.role === 'model' && parsed.functionCalls.length > 0) {
const thinkingBlocks: AnthropicContentBlockParam[] =
parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
});
const toolUses: AnthropicContentBlockParam[] = parsed.functionCalls.map(
(call, index) => ({
type: 'tool_use',
id: call.id || `tool_${index}`,
name: call.name || '',
input: (call.args as Record<string, unknown>) || {},
}),
);
const textBlocks: AnthropicContentBlockParam[] = parsed.contentParts.map(
(text) => ({
type: 'text' as const,
text,
}),
);
messages.push({
role: 'assistant',
content: [...thinkingBlocks, ...textBlocks, ...toolUses],
});
return;
}
const role = content.role === 'model' ? 'assistant' : 'user';
const thinkingBlocks: AnthropicContentBlockParam[] =
role === 'assistant'
? parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
})
: [];
const textBlocks: AnthropicContentBlockParam[] = [
...thinkingBlocks,
...parsed.contentParts.map((text) => ({
type: 'text' as const,
text,
})),
];
if (textBlocks.length > 0) {
messages.push({ role, content: textBlocks });
}
}
private parseParts(parts: Part[]): ParsedParts {
const thoughtParts: ThoughtPart[] = [];
const contentParts: string[] = [];
const functionCalls: FunctionCall[] = [];
const functionResponses: FunctionResponse[] = [];
for (const part of parts) {
if (typeof part === 'string') {
contentParts.push(part);
} else if (
'text' in part &&
part.text &&
!('thought' in part && part.thought)
) {
contentParts.push(part.text);
} else if ('text' in part && 'thought' in part && part.thought) {
thoughtParts.push({
text: part.text || '',
signature:
'thoughtSignature' in part &&
typeof part.thoughtSignature === 'string'
? part.thoughtSignature
: undefined,
});
} else if ('functionCall' in part && part.functionCall) {
functionCalls.push(part.functionCall);
} else if ('functionResponse' in part && part.functionResponse) {
functionResponses.push(part.functionResponse);
}
}
return {
thoughtParts,
contentParts,
functionCalls,
functionResponses,
};
}
private extractTextFromContentUnion(contentUnion: unknown): string {
if (typeof contentUnion === 'string') {
return contentUnion;
}
if (Array.isArray(contentUnion)) {
return contentUnion
.map((item) => this.extractTextFromContentUnion(item))
.filter(Boolean)
.join('\n');
}
if (typeof contentUnion === 'object' && contentUnion !== null) {
if ('parts' in contentUnion) {
const content = contentUnion as Content;
return (
content.parts
?.map((part: Part) => {
if (typeof part === 'string') return part;
if ('text' in part) return part.text || '';
return '';
})
.filter(Boolean)
.join('\n') || ''
);
}
}
return '';
}
private extractFunctionResponseContent(response: unknown): string {
if (response === null || response === undefined) {
return '';
}
if (typeof response === 'string') {
return response;
}
if (typeof response === 'object') {
const responseObject = response as Record<string, unknown>;
const output = responseObject['output'];
if (typeof output === 'string') {
return output;
}
const error = responseObject['error'];
if (typeof error === 'string') {
return error;
}
}
try {
const serialized = JSON.stringify(response);
return serialized ?? String(response);
} catch {
return String(response);
}
}
private safeInputToArgs(input: unknown): Record<string, unknown> {
if (input && typeof input === 'object') {
return input as Record<string, unknown>;
}
if (typeof input === 'string') {
return safeJsonParse(input, {});
}
return {};
}
mapAnthropicFinishReasonToGemini(
reason?: string | null,
): FinishReason | undefined {
if (!reason) return undefined;
const mapping: Record<string, FinishReason> = {
end_turn: FinishReason.STOP,
stop_sequence: FinishReason.STOP,
tool_use: FinishReason.STOP,
max_tokens: FinishReason.MAX_TOKENS,
content_filter: FinishReason.SAFETY,
};
return mapping[reason] || FinishReason.FINISH_REASON_UNSPECIFIED;
}
private isContentObject(
content: unknown,
): content is { role: string; parts: Part[] } {
return (
typeof content === 'object' &&
content !== null &&
'role' in content &&
'parts' in content &&
Array.isArray((content as Record<string, unknown>)['parts'])
);
}
}

View File

@@ -0,0 +1,21 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../contentGenerator.js';
import type { Config } from '../../config/config.js';
import { AnthropicContentGenerator } from './anthropicContentGenerator.js';
export { AnthropicContentGenerator } from './anthropicContentGenerator.js';
export function createAnthropicContentGenerator(
contentGeneratorConfig: ContentGeneratorConfig,
cliConfig: Config,
): ContentGenerator {
return new AnthropicContentGenerator(contentGeneratorConfig, cliConfig);
}

View File

@@ -146,12 +146,11 @@ describe('BaseLlmClient', () => {
// Validate the parameters passed to the underlying generator
expect(mockGenerateContent).toHaveBeenCalledTimes(1);
expect(mockGenerateContent).toHaveBeenCalledWith(
{
expect.objectContaining({
model: 'test-model',
contents: defaultOptions.contents,
config: {
config: expect.objectContaining({
abortSignal: defaultOptions.abortSignal,
topP: 0.8,
tools: [
{
functionDeclarations: [
@@ -163,9 +162,8 @@ describe('BaseLlmClient', () => {
],
},
],
// Crucial: systemInstruction should NOT be in the config object if not provided
},
},
}),
}),
'test-prompt-id',
);
});
@@ -188,7 +186,6 @@ describe('BaseLlmClient', () => {
expect.objectContaining({
config: expect.objectContaining({
temperature: 0.8,
topP: 0.8, // Default should remain if not overridden
topK: 10,
tools: expect.any(Array),
}),

View File

@@ -64,11 +64,6 @@ export interface GenerateJsonOptions {
* A client dedicated to stateless, utility-focused LLM calls.
*/
export class BaseLlmClient {
// Default configuration for utility tasks
private readonly defaultUtilityConfig: GenerateContentConfig = {
topP: 0.8,
};
constructor(
private readonly contentGenerator: ContentGenerator,
private readonly config: Config,
@@ -89,7 +84,6 @@ export class BaseLlmClient {
const requestConfig: GenerateContentConfig = {
abortSignal,
...this.defaultUtilityConfig,
...options.config,
...(systemInstruction && { systemInstruction }),
};

View File

@@ -15,11 +15,7 @@ import {
} from 'vitest';
import type { Content, GenerateContentResponse, Part } from '@google/genai';
import {
isThinkingDefault,
isThinkingSupported,
GeminiClient,
} from './client.js';
import { GeminiClient } from './client.js';
import { findCompressSplitPoint } from '../services/chatCompressionService.js';
import {
AuthType,
@@ -247,40 +243,6 @@ describe('findCompressSplitPoint', () => {
});
});
describe('isThinkingSupported', () => {
it('should return true for gemini-2.5', () => {
expect(isThinkingSupported('gemini-2.5')).toBe(true);
});
it('should return true for gemini-2.5-pro', () => {
expect(isThinkingSupported('gemini-2.5-pro')).toBe(true);
});
it('should return false for other models', () => {
expect(isThinkingSupported('gemini-1.5-flash')).toBe(false);
expect(isThinkingSupported('some-other-model')).toBe(false);
});
});
describe('isThinkingDefault', () => {
it('should return false for gemini-2.5-flash-lite', () => {
expect(isThinkingDefault('gemini-2.5-flash-lite')).toBe(false);
});
it('should return true for gemini-2.5', () => {
expect(isThinkingDefault('gemini-2.5')).toBe(true);
});
it('should return true for gemini-2.5-pro', () => {
expect(isThinkingDefault('gemini-2.5-pro')).toBe(true);
});
it('should return false for other models', () => {
expect(isThinkingDefault('gemini-1.5-flash')).toBe(false);
expect(isThinkingDefault('some-other-model')).toBe(false);
});
});
describe('Gemini Client (client.ts)', () => {
let mockContentGenerator: ContentGenerator;
let mockConfig: Config;
@@ -2304,16 +2266,15 @@ ${JSON.stringify(
);
expect(mockContentGenerator.generateContent).toHaveBeenCalledWith(
{
expect.objectContaining({
model: DEFAULT_GEMINI_FLASH_MODEL,
config: {
config: expect.objectContaining({
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.5,
topP: 0.8,
},
}),
contents,
},
}),
'test-session-id',
);
});

View File

@@ -15,11 +15,7 @@ import type {
// Config
import { ApprovalMode, type Config } from '../config/config.js';
import {
DEFAULT_GEMINI_FLASH_MODEL,
DEFAULT_GEMINI_MODEL_AUTO,
DEFAULT_THINKING_MODE,
} from '../config/models.js';
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
// Core modules
import type { ContentGenerator } from './contentGenerator.js';
@@ -78,24 +74,10 @@ import { type File, type IdeContext } from '../ide/types.js';
// Fallback handling
import { handleFallback } from '../fallback/handler.js';
export function isThinkingSupported(model: string) {
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
}
export function isThinkingDefault(model: string) {
if (model.startsWith('gemini-2.5-flash-lite')) {
return false;
}
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
}
const MAX_TURNS = 100;
export class GeminiClient {
private chat?: GeminiChat;
private readonly generateContentConfig: GenerateContentConfig = {
topP: 0.8,
};
private sessionTurnCount = 0;
private readonly loopDetector: LoopDetectionService;
@@ -207,20 +189,10 @@ export class GeminiClient {
const model = this.config.getModel();
const systemInstruction = getCoreSystemPrompt(userMemory, model);
const config: GenerateContentConfig = { ...this.generateContentConfig };
if (isThinkingSupported(model)) {
config.thinkingConfig = {
includeThoughts: true,
thinkingBudget: DEFAULT_THINKING_MODE,
};
}
return new GeminiChat(
this.config,
{
systemInstruction,
...config,
tools,
},
history,
@@ -617,11 +589,6 @@ export class GeminiClient {
): Promise<GenerateContentResponse> {
let currentAttemptModel: string = model;
const configToUse: GenerateContentConfig = {
...this.generateContentConfig,
...generationConfig,
};
try {
const userMemory = this.config.getUserMemory();
const finalSystemInstruction = generationConfig.systemInstruction
@@ -630,7 +597,7 @@ export class GeminiClient {
const requestConfig: GenerateContentConfig = {
abortSignal,
...configToUse,
...generationConfig,
systemInstruction: finalSystemInstruction,
};
@@ -671,7 +638,7 @@ export class GeminiClient {
`Error generating content via API with model ${currentAttemptModel}.`,
{
requestContents: contents,
requestConfig: configToUse,
requestConfig: generationConfig,
},
'generateContent-api',
);

Some files were not shown because too many files have changed in this diff Show More