Compare commits

..

229 Commits

Author SHA1 Message Date
yiliang114
8111511a89 chore(vscode-ide-companion): add comments under window 2026-01-13 10:19:43 +08:00
yiliang114
c845049d26 Merge branch 'main' into fix/vscode-run 2026-01-13 00:27:44 +08:00
qwen-code-ci-bot
09196c6e19 Merge pull request #1470 from QwenLM/release/sdk-typescript/v0.1.2
chore(release): sdk-typescript v0.1.2
2026-01-12 16:26:57 +08:00
github-actions[bot]
4bd01d592b chore(release): sdk-typescript v0.1.2 2026-01-12 08:25:25 +00:00
Mingholy
1aed5ce858 Merge pull request #1462 from QwenLM/mingholy/feat/sdk-skills
fix: SDK release workflow and stability improvements
2026-01-12 15:06:55 +08:00
Mingholy
bad5b0485d Merge pull request #1457 from liqiongyu/fix/1118-auth-fetch-failed-diagnostics
fix(core): improve OAuth fetch-failed diagnostics
2026-01-12 15:06:16 +08:00
tanzhenxin
5a6e5bb452 Merge pull request #1427 from liqiongyu/fix/1333-legacy-settings-alias
fix(cli): warn on deprecated/unknown settings keys
2026-01-12 14:43:27 +08:00
tanzhenxin
5f8e1ebc94 chore(settings): update legacy settings alias implementation and tests
Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
2026-01-12 14:29:40 +08:00
mingholy.lmh
9670456a56 fix: simplify JavaScript runtime detection to fix powershell spawning process issue 2026-01-12 13:42:24 +08:00
liqoingyu
4c186e7c92 refactor(core): extract fetch error troubleshooting 2026-01-12 12:00:01 +08:00
tanzhenxin
2f6b0b233a Merge pull request #1464 from xuewenjie123/fix/windows-background-terminal-execute-x
fix(shell): prevent console window flash on Windows for foreground tasks
2026-01-12 11:48:10 +08:00
xuewenjie
9a8ce605c5 test: update shellExecutionService test for Windows spawn config changes 2026-01-12 11:22:54 +08:00
tanzhenxin
afc693a4ab Merge pull request #1453 from liqiongyu/fix/1359-sandbox-uidgid-default-linux
fix(cli): default sandbox UID/GID mapping on Linux
2026-01-12 11:08:47 +08:00
xuewenjie
7173cba844 fix(shell): prevent console window flash on Windows for foreground tasks 2026-01-12 11:04:05 +08:00
yiliang114
ec8cccafd7 Merge branch 'main' of https://github.com/QwenLM/qwen-code into fix/vscode-run 2026-01-12 10:57:56 +08:00
liqoingyu
8c56b612fb fix(cli): warn on deprecated/unknown settings keys 2026-01-12 10:49:37 +08:00
mingholy.lmh
7d40e1470c chore: add CODEOWNERS for SDK TypeScript package and remove legacy CLI path alias 2026-01-11 21:24:45 +08:00
yiliang114
82c524f87d fix(vscode-ide-companion): window qwen code run command 2026-01-10 22:30:10 +08:00
yiliang114
df75aa06b6 fix(vscode-ide-companion): window qwen code run command 2026-01-10 22:08:14 +08:00
yiliang114
8ea9871d23 fix(vscode-ide-companion): fix positional argument problem due to special handling for Electron app of yargs
- Remove isNodeAvailable function and related child_process import
- Update command execution logic to properly handle ELECTRON_RUN_AS_NODE
- Add proper quoting mechanisms for different platforms (PowerShell vs POSIX)
- Bump version from 0.6.1 to 0.6.2
2026-01-10 19:52:25 +08:00
liqoingyu
097482910e fix(core): improve OAuth fetch-failed diagnostics 2026-01-10 16:49:56 +08:00
liqoingyu
9b78c17638 fix(cli): default sandbox UID/GID mapping on Linux
Fixes #1359.

Default container sandboxing on Linux to use host UID/GID so qwen runs under a user that matches the mounted home directory and persists auth/settings in ~/.qwen.

Also gate the informational log behind DEBUG/DEBUG_MODE and clarify docs about Linux UID/GID mapping and ~/.qwen persistence.
2026-01-10 14:31:08 +08:00
tanzhenxin
bde31d1261 Merge pull request #1448 from QwenLM/fix/openai-compatible
fix(core): handle missing delta in OpenAI stream chunks
2026-01-09 21:38:31 +08:00
mingholy.lmh
7f15256eba fix: improve release workflow 2026-01-09 18:00:01 +08:00
mingholy.lmh
587fc82fbc chore: update version to 0.1.1 in package.json 2026-01-09 17:54:59 +08:00
tanzhenxin
cba9c424eb fix(core): handle missing delta in OpenAI stream chunks
Some OpenAI-compatible providers occasionally emit chat.completion.chunk choices
without a delta object. Guard optional reasoning_content access and add a
regression test to ensure chunk conversion does not throw.
2026-01-09 17:41:07 +08:00
yiliang114
b7828ac765 Merge branch 'main' into fix/vscode-run 2026-01-09 16:39:12 +08:00
mingholy.lmh
8705f734d0 fix: improve bundled CLI path finding and support --experimental-skills 2026-01-09 16:32:55 +08:00
tanzhenxin
95efe89ac0 fix positional argument problem due to special handling for Electron app of yargs 2026-01-09 14:49:57 +08:00
tanzhenxin
6714f9ce3c Merge pull request #1351 from xuewenjie123/fix/editor-launch-issues
fix: resolve external editor launch failure on macOS and Windows
2026-01-09 11:07:43 +08:00
tanzhenxin
155d1f9518 Merge pull request #1428 from liqiongyu/fix/727-memory-show-respects-context-file
fix(cli): /memory show respects context.fileName
2026-01-09 10:29:27 +08:00
Mingholy
f776075aa8 Merge pull request #1439 from QwenLM/mingholy/fix/multi-provider-cold-start
fix: multi provider cold start issue
2026-01-08 18:54:42 +08:00
mingholy.lmh
36c142951a fix: default authType fallback 2026-01-08 18:38:23 +08:00
mingholy.lmh
2b511d0b83 fix: cold start issue and acp integration tests 2026-01-08 18:08:56 +08:00
mingholy.lmh
85bc0833b4 fix: remove authType fallback option for cold start case 2026-01-08 18:08:56 +08:00
tanzhenxin
2662639280 Merge pull request #1424 from xuewenjie123/refactor/ide-context-plain-text
refactor: convert IDE context from JSON to plain text format
2026-01-08 17:11:31 +08:00
Mingholy
b7ac94ecf6 Merge pull request #1291 from QwenLM/mingholy/feat/multi-provider-support
feat: multi-provider models config support
2026-01-08 16:27:00 +08:00
tanzhenxin
be8259b218 Merge pull request #1433 from BlockHand/userandwork
feat: Modify the selection order of user Settings and workspace Settings
2026-01-08 16:07:02 +08:00
xwj02155382
ca4c36f233 feat: wrap selected text in code blocks for IDE context 2026-01-08 16:06:21 +08:00
Mingholy
f41308f34c Merge pull request #1426 from QwenLM/fix/oauth-headless-auth-url-display
fix(core): ensure OAuth URL always displayed in headless mode
2026-01-08 16:03:05 +08:00
Mingholy
0a33510304 Merge pull request #1434 from QwenLM/chore/v0.7.0
chore: bump version to 0.7.0
2026-01-08 15:48:30 +08:00
刘伟光
82cbdee3b4 feat: 恢复组件使用修改默认值 2026-01-08 15:35:17 +08:00
mingholy.lmh
81de79c899 fix: best effort to use resolved authType/model across the repo 2026-01-08 12:11:23 +08:00
刘伟光
f6a753cf78 feat: Modify the selection order of user Settings and workspace Settings 2026-01-08 11:50:55 +08:00
刘伟光
509d304742 feat: Modify the selection order of user Settings and workspace Settings 2026-01-08 11:36:07 +08:00
刘伟光
6319a6ed56 feat: 修改用户设置和工作区设置的选择顺序 2026-01-08 10:45:22 +08:00
mingholy.lmh
ab07c2d89c chore: bump version to 0.7.0 2026-01-08 10:21:55 +08:00
mingholy.lmh
5ea841dd02 fix: refine auth message to give explicit tip 2026-01-07 22:58:11 +08:00
mingholy.lmh
ded1ebcdff fix: fallback and auth issues when configuring a duplicate model id 2026-01-07 22:58:11 +08:00
mingholy.lmh
afe6ba255e fix: align authType & model persisting behavior across dialogs 2026-01-07 22:58:11 +08:00
mingholy.lmh
fe2ed889b9 docs: add modelProviders documents 2026-01-07 22:58:10 +08:00
mingholy.lmh
8da376637a fix: remove detailed generationConfig 2026-01-07 22:58:10 +08:00
mingholy.lmh
15f4c1ebd6 chore: add i18n 2026-01-07 22:58:10 +08:00
mingholy.lmh
492da0c8c0 chore: update copyright notice in modelConfigUtils.ts 2026-01-07 22:58:09 +08:00
mingholy.lmh
90855c93d1 fix: lint & ci issues 2026-01-07 22:58:09 +08:00
mingholy.lmh
db12796df5 refactor: update authentication handling and model configuration
- Enhanced authentication method validation in `auth.ts` and `auth.test.ts`.
- Introduced new model provider configuration logic
- Updated environment variable handling for various auth types.
- Removed deprecated utility functions and tests related to fallback mechanisms.
2026-01-07 22:58:09 +08:00
mingholy.lmh
aa9cdf2a3c review: stage1 2026-01-07 22:58:08 +08:00
yiliang114
052337861b Fix #1416 2026-01-07 21:05:49 +08:00
liqoingyu
0a0ab64da0 test(cli): make memoryCommand path assertions cross-platform 2026-01-07 20:28:28 +08:00
liqoingyu
8a15017593 fix(cli): /memory show respects context.fileName 2026-01-07 20:07:41 +08:00
LaZzyMan
4d54a231b3 fix(core): ensure OAuth URL always displayed in headless mode
- Always show authentication URL before attempting browser launch
- Fixes issue where browser launch silently fails in headless environments
- Improves error logging for browser launch failures

Fixes #1425
2026-01-07 19:37:47 +08:00
tanzhenxin
570ec432af Merge pull request #1282 from BlockHand/fix-sandbox-ideInstall
feat: Optimize the issue where an error message indicating unfriendli…
2026-01-07 17:40:45 +08:00
tanzhenxin
bfc3bbfa9c update user messages 2026-01-07 17:25:27 +08:00
tanzhenxin
91af9bf6c8 Merge branch 'main' into fix-sandbox-ideInstall 2026-01-07 17:12:22 +08:00
tanzhenxin
f6771c0858 Merge pull request #1393 from Weaxs/main
[OpenaiContentGenerate] convertOpenAIResponseToGemini record thoughtsTokenCount
2026-01-07 17:05:28 +08:00
tanzhenxin
2c8be05029 Merge pull request #1415 from QwenLM/fix/openai-reasoning-config
fix(core): don’t force reasoning/topP defaults for OpenAI-compatible APIs
2026-01-07 16:59:26 +08:00
tanzhenxin
4744af1ea8 Merge pull request #1406 from QwenLM/fix/non-interactive-tool-permission
fix(cli,core): honor `tools.core` / `tools.allowed` in non-interactive runs
2026-01-07 16:59:19 +08:00
Mingholy
2c285394c7 Merge pull request #1423 from QwenLM/chore/release-v0.6.1
chore: bump version to 0.6.1
2026-01-07 16:55:45 +08:00
xwj02155382
0f1cb162c9 refactor: convert IDE context from JSON to plain text format
Fixes #1418

- Remove JSON.stringify() and code fences from getIdeContextParts()
- Use human-readable plain text format for better LLM comprehension
  - Full context: 'Active file:', 'Cursor: line X, character Y'
  - Delta updates: 'Files opened:', 'Files closed:', 'Cursor moved:', etc.
- Update all related tests to match new plain text format
- All 49 tests passing

This change improves the model's ability to read and reason about IDE
state by eliminating escaped characters and rigid JSON structure that
can confuse LLMs when interpreting file paths, cursor positions, or
selection ranges.
2026-01-07 16:46:48 +08:00
xwj02155382
3d059b71de refactor: improve IDE context format and editor command error handling
- Change IDE context from JSON to plain text format for better LLM comprehension
  - Remove JSON.stringify() and code fences from getIdeContextParts()
  - Use human-readable format: 'Active file:', 'Cursor: line X, character Y'
  - Apply same format to delta updates: 'Files opened:', 'Files closed:', etc.
  - Update all related tests to match new plain text format

- Fix editor command fallback logic in useLaunchEditor
  - Throw clear error when no editor command is available
  - Remove meaningless fallback to last command in list
  - Provide helpful error message with tried commands and solution
2026-01-07 16:34:12 +08:00
mingholy.lmh
f2d941e469 chore: bump version to 0.6.1 2026-01-07 16:25:14 +08:00
tanzhenxin
9b2dfe1e06 Merge pull request #1374 from QwenLM/fix/resume-command-broken-after-new-chat
Fix resume command broken after new chat
2026-01-07 16:21:47 +08:00
tanzhenxin
3e695cd82b Merge pull request #1146 from QwenLM/fix/windows-background-terminal-execute-x
fix: improve windows background process handling and cleanup
2026-01-07 16:21:14 +08:00
xwj02155382
177a91f1d5 Merge branch 'main' of github.com:QwenLM/qwen-code into fix/windows-background-terminal-execute-x 2026-01-07 15:50:03 +08:00
Weaxs
870d207f18 revert enable_thinking & thinking_budget 2026-01-07 10:25:34 +08:00
tanzhenxin
3f512528cb Merge pull request #1391 from tt-a1i/feat/approval-mode-direct-arg
feat(cli): add direct argument support for /approval-mode command
2026-01-07 09:39:33 +08:00
yiliang114
361492247e fix(vscode-ide-companion): fix cross-platform CLI execution in terminal
- Add platform.ts utility with isWindows constant
- Fix Windows PowerShell execution with & call operator
- Fix macOS Electron helper with ELECTRON_RUN_AS_NODE=1
- Prefer system Node.js, fallback to VS Code runtime
- Refactor platform detection in acpMessageHandler and acpSessionManager

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-07 01:35:05 +08:00
Tu Shaokun
0878ee4cbd fix: handle setApprovalMode error in untrusted folders
Add try/catch to gracefully handle errors when setting privileged
modes (yolo/auto-edit) in untrusted folders, returning an error
message instead of throwing.
2026-01-06 22:04:43 +08:00
Tu Shaokun
bfe7298858 refactor: apply session-only approval mode per review feedback
- Remove persistence to user settings (no setValue call)
- Only use config.setApprovalMode() for session scope
- Remove autocomplete feature for simplicity
- Align with Shift+Tab behavior
2026-01-06 21:57:21 +08:00
Tu Shaokun
2f2937aafe test: add explicit assertions for setApprovalMode argument
Verify the exact mode value passed to config.setApprovalMode to catch
potential regressions in settings merge/update mechanism.
2026-01-06 21:54:33 +08:00
Tu Shaokun
8fcdd86b91 feat(cli): add direct argument support for /approval-mode command
Allow users to set approval mode directly via argument instead of
opening the dialog. For example:
- /approval-mode plan
- /approval-mode yolo
- /approval-mode auto-edit
- /approval-mode default

If no argument is provided, the dialog opens as before.
If an invalid argument is provided, an error message shows valid options.

Also adds tab completion for mode arguments.

Fixes #1353
2026-01-06 21:54:33 +08:00
tanzhenxin
d7d7bf0c39 fix default values of reasoning config for openai compatible api 2026-01-06 19:39:28 +08:00
gwinthis
b95d9a8d2d Merge pull request #1414 from QwenLM/doc/qwencode-java
Doc/qwencode java
2026-01-06 17:54:47 +08:00
顾盼
6f39ae120c Merge pull request #1355 from QwenLM/feat/stable-acp-flag
feat: graduate `--experimental-acp` to stable `--acp` flag
2026-01-06 17:51:43 +08:00
顾盼
627857621a Merge pull request #1365 from QwenLM/fix/missing-whitespaces
fix: preserve whitespace in thinking content for stream-json output format
2026-01-06 17:51:26 +08:00
顾盼
65c7cf5d8f Merge pull request #1376 from QwenLM/fix/missing-error-throw-nonInteractive
fix: exit with non-zero code on API errors in text mode
2026-01-06 17:51:13 +08:00
顾盼
7a823060ac Merge pull request #1383 from QwenLM/fix/tool-result-text-mode
fix: improve tool execution feedback in non-interactive mode
2026-01-06 17:50:58 +08:00
xwj02155382
2c88ea6dc1 Merge branch 'main' of github.com:QwenLM/qwen-code into fix/windows-background-terminal-execute-x 2026-01-06 17:26:01 +08:00
skyfire
ad3086f7dd add qwencode-sdk java doc 2026-01-06 17:18:41 +08:00
skyfire
8f3bbef575 add qwencode-sdk java doc 2026-01-06 17:11:47 +08:00
xwj02155382
e2d6ab9b7e refactor: simplify background shell command handling
- Remove ineffective error detection for background processes
  (stdio is detached/ignored, so cumulativeOutput is always empty)
- Add kill command hints for both Windows and macOS/Linux
- Simplify code from 40 lines to 12 lines with clearer logic
- Add explanatory comment about why startup errors cannot be reliably detected
2026-01-06 16:46:56 +08:00
Weaxs
35bf5ef4d0 remove duplicate reasoning_content handle 2026-01-06 14:38:01 +08:00
Weaxs
1d16513e27 remove duplicate reasoning_content handle && remove extra_body.enable_thinking 2026-01-06 14:38:01 +08:00
Weaxs
731fd99800 remove duplicate reasoning_content handle && remove extra_body.enable_thinking 2026-01-06 14:21:42 +08:00
skyfire
c6ae0a8be7 for alpha stage 2026-01-06 11:16:47 +08:00
xwj02155382
87dc618a21 revert: restore original editor command fallback logic for zed support
- Revert getExecutableCommand to use original fallback logic
- Revert getDiffCommand to use slice(0, -1) pattern
- Maintain proper support for zed editor with multiple command options ['zed', 'zeditor']
- Keep the caching optimization for commandExists
2026-01-06 11:09:29 +08:00
xwj02155382
94a5d828bd refactor: optimize commandExists with caching and simplify editor command logic
- Add caching layer for commandExists in useLaunchEditor.ts to avoid repeated execSync calls
- Import commandExists from core and wrap it with cache in CLI layer
- Simplify getExecutableCommand and getDiffCommand logic to remove redundant fallback
- For editors with single command, directly use first command instead of meaningless self-fallback
- Maintain support for editors with multiple commands (e.g., zed with 'zed' and 'zeditor')
2026-01-06 11:05:03 +08:00
gwinthis
49892a8e17 Merge pull request #1412 from QwenLM/feat/javasdk
Feat/javasdk
2026-01-06 09:49:36 +08:00
skyfire
d1a3e828b7 add license 2026-01-06 09:21:58 +08:00
pomelo
b19bb6cb20 Merge pull request #1378 from afarber/add-german-language-support
feat(i18n): add German language support
2026-01-05 22:14:00 +08:00
skyfire
e8625658ba publish 0.0.1-alpha 2026-01-05 20:27:37 +08:00
tanzhenxin
19f8f631b4 Respect 'tools.core' and 'tools.allowed' settings in non-interactive mode, for both agent execution and custom command 2026-01-05 19:28:52 +08:00
skyfire
a4eb3adea8 for pom 2026-01-05 19:22:50 +08:00
skyfire
7dc7c6380d for pom 2026-01-05 18:14:40 +08:00
skyfire
d2d2b845c5 for README.md 2026-01-05 18:12:48 +08:00
skyfire
96080f84a6 for README.md 2026-01-05 18:00:38 +08:00
skyfire
2b6218e564 for README.md 2026-01-05 17:49:43 +08:00
skyfire
24edf32da8 for README.md 2026-01-05 17:46:18 +08:00
skyfire
51b08f700c for examples 2026-01-05 17:44:07 +08:00
tanzhenxin
58eac7f595 Merge pull request #1397 from liqiongyu/fix/1304-disable-update-nag
fix(cli): skip update check when disableUpdateNag is true
2026-01-05 14:19:13 +08:00
skyfire
32e8b01cf0 for javadoc 2026-01-04 19:39:00 +08:00
skyfire
db9d5cb45d add javadoc 2026-01-04 18:07:56 +08:00
liqoingyu
473cb7b951 fix(cli): skip update check when disableUpdateNag is true 2026-01-04 14:32:38 +08:00
Weaxs
e5cced8813 buildRequest add thinking config && convert Handle reasoning content 2026-01-02 18:59:23 +08:00
skyfire
73848d3867 fix arg 2026-01-01 01:30:58 +08:00
skyfire
6a62167f79 for README.md 2025-12-31 23:36:17 +08:00
skyfire
6ff437671e for README.md 2025-12-31 23:26:20 +08:00
skyfire
30f9e9c782 for README.md 2025-12-31 22:57:20 +08:00
skyfire
e4caa7a856 for partial message processing and event timeout processing 2025-12-31 20:15:51 +08:00
LaZzyMan
aaa66b3172 fix: add tool result and deny warning in text mode 2025-12-31 17:38:33 +08:00
Alexander Farber
0ae59b900c Add German umlauts 2025-12-30 16:50:23 +01:00
Alexander Farber
5a5dae1987 Add German language support and remove a misleading witty phrase 2025-12-30 16:35:34 +01:00
skyfire
ac7ba95d65 add permission 2025-12-30 20:08:05 +08:00
LaZzyMan
15912892f2 fix: missing error throw in non-Interactive mode 2025-12-30 19:40:24 +08:00
cris
e3c20b03bd reslove blank 2025-12-30 16:03:11 +08:00
cris
4db50d4158 fix resume unwork on windows 2025-12-30 16:00:55 +08:00
skyfire
4154493640 message and session use 2025-12-29 21:44:02 +08:00
Mingholy
105ad743fa Merge pull request #1284 from tt-a1i/fix/boolean-string-coercion
fix(core): coerce string boolean values in schema validation
2025-12-29 18:27:36 +08:00
mingholy.lmh
ac3f7cb8c8 fix: ts erros in test file 2025-12-29 17:20:25 +08:00
LaZzyMan
61aad5a162 fix: missing whitespaces for stream-json/json output format via GLM 4.7 model 2025-12-29 16:59:09 +08:00
xuewenjie
98c043bf50 test: update tests for detached process changes 2025-12-29 11:37:54 +08:00
顾盼
e27e9a5f18 Merge pull request #1288 from Weaxs/main
support merge ChatCompletionContentPart && add filterEmptyMessages
2025-12-29 10:50:30 +08:00
pomelo
2578d8c151 Merge pull request #1360 from IceyLiu/icey-feat
docs: add AionUi to ecosystem section
2025-12-29 10:12:53 +08:00
cris
f610133660 improve ad hoc method for windows background terminal task 2025-12-28 22:14:16 +08:00
VeryLiu-lab
a877fedc52 docs: add AionUi to ecosystem section
Add AionUi as a graphical interface option for Qwen Code users.
AionUi provides a modern GUI for command-line AI tools including
Qwen Code, offering an alternative to the terminal interface.
2025-12-28 21:56:59 +08:00
pomelo
2bc8079519 Merge pull request #1332 from QwenLM/fix-language
Fix multi-language and documentation related issues.
2025-12-26 23:02:37 +08:00
Mingholy
17eb20c134 Merge pull request #1322 from QwenLM/mingholy/feat/headless-slash-commands
feat: support /compress and /summary commands for non-interactive & ACP
2025-12-26 18:10:55 +08:00
mingholy.lmh
5d59ceb6f3 fix: explicit output if command is not supported 2025-12-26 17:55:03 +08:00
mingholy.lmh
7f645b9726 fix: wrong slash_command in systemMessage 2025-12-26 17:55:03 +08:00
mingholy.lmh
8c109be48c refactor: unified allow list of supported commands in ACP or non-interactive mode 2025-12-26 17:55:03 +08:00
mingholy.lmh
e9a1d9a927 fix: failed unit test cases 2025-12-26 17:55:02 +08:00
mingholy.lmh
8aceddffa2 feat: support /compress and /summary commands for non-interactive & ACP
integration
2025-12-26 17:55:02 +08:00
tanzhenxin
cebe0448d0 Merge pull request #1327 from QwenLM/feat/vscode-ide-companion-context-left
context left on vscode ide companion
2025-12-26 17:26:35 +08:00
LaZzyMan
fe7ff5b148 feat: stable-acp-flag 2025-12-26 17:09:16 +08:00
tanzhenxin
919560e3a4 Merge pull request #1345 from QwenLM/feat/vscode-ida-companion-bash-toolcall-click-2
feat(vscode-ide-companion): in/output part in the bash toolcall can be clicked to open a temporary file
2025-12-26 16:55:35 +08:00
Mingholy
26bd4f882d Merge pull request #1334 from QwenLM/mingholy/chore/skip-bumping-unstable-sdk-version
chore: improve release-sdk workflow
2025-12-26 16:21:53 +08:00
xwj02155382
fd41309ed2 refactor: share editorCommands between core and cli packages
- Export editorCommands from @qwen-code/qwen-code-core
- Remove duplicate editorCommands definition in useLaunchEditor
- Import shared editorCommands configuration in CLI package
- Reduces code duplication and ensures consistency

This change makes the editor configuration a single source of truth,
making it easier to maintain and add new editors in the future.
2025-12-26 16:03:05 +08:00
xwj02155382
48bc0f35d7 perf: add cache for commandExists to fix CI timeout
- Add commandExistsCache Map to avoid repeated execSync calls
- Cache command existence check results to improve test performance
- Fix CI test timeout issue (was timing out after 7m)

The commandExists() function was being called frequently during tests,
causing slow test execution due to repeated system command calls.
By caching the results, we significantly improve performance in test
environments while maintaining the same functionality.
2025-12-26 13:52:37 +08:00
xwj02155382
e30c2dbe23 Merge branch 'fix/editor-launch-issues' of https://github.com/xuewenjie123/qwen-code into fix/editor-launch-issues 2025-12-26 11:22:22 +08:00
xwj02155382
e9204ecba9 fix: resolve editor launch issue on macOS for subagent editing
- Fixed ENOENT error when launching external editors (VS Code, etc.)
- Added proper editor command mapping (vscode -> code, neovim -> nvim, etc.)
- Implemented command existence check to find available editor executable
- Supports both macOS and Windows platform-specific commands

Fixes #1180
2025-12-26 11:11:24 +08:00
xwj02155382
f24bda3d7b fix: resolve editor launch issue on macOS for subagent editing
- Fixed ENOENT error when launching external editors (VS Code, etc.)
- Added proper editor command mapping (vscode -> code, neovim -> nvim, etc.)
- Implemented command existence check to find available editor executable
- Supports both macOS and Windows platform-specific commands

Fixes #1180
2025-12-26 10:17:52 +08:00
tanzhenxin
3787e95572 Merge pull request #1349 from QwenLM/fix/integration-test-3
fix one flaky integration test
2025-12-26 09:43:02 +08:00
tanzhenxin
7233d37bd1 fix one flaky integration test 2025-12-26 09:20:24 +08:00
yiliang114
93dcca5147 fix(vscode-ide-companion): fix test 2025-12-26 00:28:45 +08:00
cwtuan
f7d04323f3 Enhance VS Code extension description with download link (#1341)
Updated the VS Code extension note with a download link for the Qwen Code Companion.
2025-12-25 23:58:52 +08:00
yiliang114
9a27857f10 feat(vscode-ide-companion): support context left 2025-12-25 23:53:55 +08:00
yiliang114
452f4f3c0e Merge branch 'main' of https://github.com/QwenLM/qwen-code into feat/vscode-ide-companion-context-left 2025-12-25 23:51:57 +08:00
yiliang114
5cc01e5e09 feat(vscode-ide-companion): support context left 2025-12-25 23:51:50 +08:00
yiliang114
ac0be9fb84 feat(vscode-ide-companion): in/output part in the bash toolcall can be clicked to open a temporary file. 2025-12-25 16:59:32 +08:00
xuewenjie
5417de4219 Merge branch 'main' of github.com:QwenLM/qwen-code into fix/windows-background-terminal-execute-x 2025-12-25 16:44:30 +08:00
tanzhenxin
257c6705e1 Merge pull request #1343 from QwenLM/fix/integration-test-2
fix one flaky integration test
2025-12-25 16:08:54 +08:00
tanzhenxin
27e7438b75 fix one flaky integration test 2025-12-25 16:08:06 +08:00
tanzhenxin
8a3ff8db12 Merge pull request #1340 from QwenLM/feat/anthropic-provider-1
Follow up on pr #1331
2025-12-25 15:44:52 +08:00
tanzhenxin
26f8b67d4f add missing file 2025-12-25 15:24:56 +08:00
tanzhenxin
b64d636280 anthropic provider support follow-up 2025-12-25 15:24:42 +08:00
tanzhenxin
781c57b438 Merge pull request #1331 from QwenLM/feat/support-anthropic-provider
feat: add Anthropic provider, normalize auth/env config, and centralize logging
2025-12-25 11:44:38 +08:00
mingholy.lmh
c81c24d45d chore: improve release-sdk workflow 2025-12-25 10:46:57 +08:00
tanzhenxin
c53bdde747 support reasoning.budget_tokens config option 2025-12-25 10:18:38 +08:00
tanzhenxin
99db18069d add interleaved-thinking-2025-05-14 beta header for anthropic content generator 2025-12-25 09:42:06 +08:00
skyfire
422998d7f0 add ProcessTransport unitTest and fix bug 2025-12-24 21:20:47 +08:00
tanzhenxin
a0a5b831d4 add a few more tests 2025-12-24 20:54:40 +08:00
skyfire
68628bf952 add ProcessTransport 2025-12-24 20:45:17 +08:00
tanzhenxin
8f74dd224c add tests for loggingContentGenerator 2025-12-24 19:41:46 +08:00
tanzhenxin
b931d28f35 feat(core,cli): add Anthropic provider, normalize auth/env config, and centralize logging 2025-12-24 19:00:56 +08:00
mingholy.lmh
4407597794 chore: skip bumping sdk version when release nightly/preview or dry run 2025-12-24 18:12:23 +08:00
Mingholy
9f65bd3b39 Merge pull request #1325 from QwenLM/mingholy/chore/revert-sdk-version
chore: revert sdk-typescript version to 0.1.0 and update release workflow
2025-12-24 17:22:02 +08:00
pomelo
2b3830cf83 Merge pull request #1314 from QwenLM/feat/skills
Add experimental Skills feature
2025-12-24 17:01:28 +08:00
yiliang114
90bf101040 chore(vscode-ide-companion): simplify the implementation of context remaining 2025-12-24 14:29:25 +08:00
mingholy.lmh
2b9140940d chore: update release-sdk.yml to sync lockfile 2025-12-24 11:43:46 +08:00
mingholy.lmh
4efdea0981 chore: revert sdk-typescript version to 0.1.0 and update release workflow 2025-12-24 11:04:33 +08:00
pomelo
05791d4200 Merge pull request #1302 from afarber/1095-figma-mcp-client-name
fix(mcp): update OAuth client name for Figma MCP server compatibility
2025-12-24 10:41:51 +08:00
Mingholy
add35d2904 Merge pull request #1321 from QwenLM/mingholy/fix/sdk-cli-path
fix: cli path parsing issue in Windows
2025-12-24 10:35:52 +08:00
yiliang114
660901e1fd Merge branch 'main' of https://github.com/QwenLM/qwen-code into feat/vscode-ide-companion-context-left 2025-12-24 10:10:32 +08:00
skyfire
e5efad89e0 Merge branch 'feat/javasdk' of github.com:QwenLM/qwen-code into feat/javasdk 2025-12-24 10:01:28 +08:00
yiliang114
8e64c5acaf feat(vscode-ide-companion): support context left 2025-12-24 01:09:21 +08:00
tanzhenxin
bc2a7efcb3 Merge pull request #1297 from QwenLM/feat/gemini-3-integration
Add Gemini provider, remove legacy Google OAuth, and tune generation …
2025-12-23 22:16:39 +08:00
tanzhenxin
1dfd880e17 reset default topP to 0.95 as claude modes does not allow topP smaller than 0.95 2025-12-23 21:58:28 +08:00
skyfire
e09bb5f5c0 modify junit version to 5 and add org developers 2025-12-23 20:14:11 +08:00
乾离
24d11179d8 modify junit version to 5 and add org developers 2025-12-23 20:04:58 +08:00
mingholy.lmh
4f970c9987 fix: cli path parsing issue in Windows 2025-12-23 19:00:43 +08:00
乾离
2ef8b6f350 ProcessTransport stru init 2025-12-23 17:44:28 +08:00
乾离
5779f7ab1d project initialize 2025-12-23 17:20:12 +08:00
tanzhenxin
251031cfc5 fix a link in skills.md 2025-12-23 15:09:23 +08:00
tanzhenxin
10a0c843c1 fix flaky tests 2025-12-23 14:52:03 +08:00
tanzhenxin
77c257d9d0 fix flaky tests 2025-12-23 14:50:47 +08:00
tanzhenxin
955547d523 minor updates to address review comments 2025-12-23 14:35:41 +08:00
tanzhenxin
3bc862df89 unset temperature, and set topP=0.8 for default provider 2025-12-23 13:56:06 +08:00
tanzhenxin
4311af96eb add docs 2025-12-23 10:53:09 +08:00
tanzhenxin
b49c11e9a2 add experimental-skills flag to enable skills feature 2025-12-23 10:24:57 +08:00
tanzhenxin
9cdd85c62a Merge branch 'main' into feat/skills 2025-12-22 16:00:57 +08:00
tanzhenxin
87d8d82be7 special handling for summarized thinking 2025-12-22 14:07:23 +08:00
刘伟光
43e0815def feat: 修改链接ide之前的判断逻辑,检测是否安装过ide扩展 2025-12-22 11:22:51 +08:00
刘伟光
0c14f4ce08 Merge branch 'main' into fix-sandbox-ideInstall 2025-12-22 11:00:01 +08:00
tanzhenxin
fefc138485 Merge branch 'main' into feat/gemini-3-integration 2025-12-22 10:08:15 +08:00
Alexander Farber
18e9b2340b Change header to: Copyright 2025 Qwen Team 2025-12-20 09:37:57 +01:00
Alexander Farber
ad427da340 Move constants to a new file for SSOT 2025-12-20 09:35:12 +01:00
Alexander Farber
484e0fd943 Change the client name to: Gemini CLI MCP Client 2025-12-20 09:21:15 +01:00
tanzhenxin
b8a16d362a Merge branch 'main' into feat/gemini-3-integration 2025-12-19 16:39:42 +08:00
tanzhenxin
17129024f4 Add Gemini provider, remove legacy Google OAuth, and tune generation defaults 2025-12-19 16:26:54 +08:00
刘伟光
34d8dbf9b2 feat: 兼容宿主机在不同ide上的instal提示 2025-12-19 11:07:33 +08:00
刘伟光
b3b2bc6ad5 feat: 兼容宿主机在不同ide上的instal提示 2025-12-19 10:39:05 +08:00
Weaxs
d2bc46cbb4 remove filterEmptyMessages 2025-12-18 00:55:47 +08:00
Weaxs
84eb5c562f support merge ChatCompletionContentPart && add filterEmptyMessages 2025-12-18 00:46:48 +08:00
Tu Shaokun
7b01b26ff5 perf(core): avoid recompiling schema on retry 2025-12-17 16:27:42 +08:00
Tu Shaokun
0f3e97ea1c fix(core): coerce string boolean values in schema validation
Self-hosted LLMs sometimes return "true"/"false" strings instead of
actual boolean values for tool parameters like `is_background`. This
causes schema validation to fail with type errors.

Fixes #1267
2025-12-17 16:14:02 +08:00
刘伟光
6ca54beba2 feat: Optimize the issue where an error message indicating unfriendliness occurs after executing the ideinstall command in the sandbox environment 2025-12-17 13:38:38 +08:00
xuewenjie
8673426d5c fix(core): use current chunk for shell output update instead of cumulative 2025-12-16 10:26:20 +08:00
tanzhenxin
177fc42f04 Merge branch 'main' into feat/skills 2025-12-15 14:25:56 +08:00
xuewenjie
b272ac0119 Fix: Make cleanup strategy dynamic to support testing mocks 2025-12-12 17:47:03 +08:00
xuewenjie
574d89da14 Refactor ShellExecutionService cleanup to use strategy pattern 2025-12-12 17:03:04 +08:00
tanzhenxin
2560c2d1a2 Merge branch 'main' into feat/skills 2025-12-11 14:50:07 +08:00
tanzhenxin
bd6e16d41b draft version of skill tool feature 2025-12-10 17:18:44 +08:00
xuewenjie
16939c0bc8 Refactor ShellTool: remove ping hack and timeout, optimize cleanup 2025-12-10 13:49:51 +08:00
xuewenjie
6fc09a82fb fix: use && for windows background keep-alive ping and add test 2025-12-09 13:33:42 +08:00
xuewenjie
d622f8d1bf Merge branch 'main' of github.com:QwenLM/qwen-code into fix/windows-background-terminal-execute-x 2025-12-09 11:32:17 +08:00
xuewenjie
28d178b5c1 fix: handle windows background execution errors and add tests 2025-12-09 11:24:30 +08:00
xuewenjie
4c69d536ac test: fix shell tool tests by updating pid expectation and AbortSignal matching 2025-12-05 10:47:06 +08:00
xuewenjie
403fd06117 chore: update .gitignore 2025-12-04 15:55:17 +08:00
xuewenjie
d9928eab66 fix: improve windows background process handling and cleanup 2025-12-04 15:55:11 +08:00
363 changed files with 30121 additions and 11977 deletions

3
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1,3 @@
* @tanzhenxin @DennisYu07 @gwinthis @LaZzyMan @pomelo-nwu @Mingholy
# SDK TypeScript package changes require review from Mingholy
packages/sdk-typescript/** @Mingholy

View File

@@ -33,6 +33,10 @@ on:
type: 'boolean'
default: false
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: false
jobs:
release-sdk:
runs-on: 'ubuntu-latest'
@@ -46,6 +50,7 @@ jobs:
packages: 'write'
id-token: 'write'
issues: 'write'
pull-requests: 'write'
outputs:
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
@@ -86,6 +91,8 @@ jobs:
with:
node-version-file: '.nvmrc'
cache: 'npm'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Install Dependencies'
run: |-
@@ -121,6 +128,14 @@ jobs:
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
MANUAL_VERSION: '${{ inputs.version }}'
- name: 'Set SDK package version (local only)'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
# Ensure the package version matches the computed release version.
# This is required for nightly/preview because npm does not allow re-publishing the same version.
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Build CLI Bundle'
run: |
npm run build
@@ -153,7 +168,21 @@ jobs:
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
- name: 'Create and switch to a release branch'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'release_branch'
env:
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
@@ -162,50 +191,22 @@ jobs:
git switch -c "${BRANCH_NAME}"
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
- name: 'Update package version'
working-directory: 'packages/sdk-typescript'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
npm version "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Commit and Conditionally Push package version'
- name: 'Commit and Push package version (stable only)'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
git add packages/sdk-typescript/package.json
# Only persist version bumps after a successful publish.
git add packages/sdk-typescript/package.json package-lock.json
if git diff --staged --quiet; then
echo "No version changes to commit"
else
git commit -m "chore(release): sdk-typescript ${RELEASE_TAG}"
fi
if [[ "${IS_DRY_RUN}" == "false" ]]; then
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
else
echo "Dry run enabled. Skipping push."
fi
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Configure npm for publishing'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
- name: 'Create GitHub Release and Tag'
if: |-
@@ -215,12 +216,57 @@ jobs:
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
REF: '${{ github.event.inputs.ref || github.sha }}'
run: |-
# For stable releases, use the release branch; for nightly/preview, use the current ref
if [[ "${IS_NIGHTLY}" == "true" || "${IS_PREVIEW}" == "true" ]]; then
TARGET="${REF}"
PRERELEASE_FLAG="--prerelease"
else
TARGET="${RELEASE_BRANCH}"
PRERELEASE_FLAG=""
fi
gh release create "sdk-typescript-${RELEASE_TAG}" \
--target "$RELEASE_BRANCH" \
--target "${TARGET}" \
--title "SDK TypeScript Release ${RELEASE_TAG}" \
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
--generate-notes
--generate-notes \
${PRERELEASE_FLAG}
- name: 'Create PR to merge release branch into main'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'pr'
env:
GITHUB_TOKEN: '${{ secrets.CI_BOT_PAT }}'
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
set -euo pipefail
pr_url="$(gh pr list --head "${RELEASE_BRANCH}" --base main --json url --jq '.[0].url')"
if [[ -z "${pr_url}" ]]; then
pr_url="$(gh pr create \
--base main \
--head "${RELEASE_BRANCH}" \
--title "chore(release): sdk-typescript ${RELEASE_TAG}" \
--body "Automated release PR for sdk-typescript ${RELEASE_TAG}.")"
fi
echo "PR_URL=${pr_url}" >> "${GITHUB_OUTPUT}"
- name: 'Enable auto-merge for release PR'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.CI_BOT_PAT }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
run: |-
set -euo pipefail
gh pr merge "${PR_URL}" --merge --auto --delete-branch
- name: 'Create Issue on Failure'
if: |-

1
.gitignore vendored
View File

@@ -23,6 +23,7 @@ package-lock.json
.idea
*.iml
.cursor
.qoder
# OS metadata
.DS_Store

View File

@@ -191,6 +191,7 @@ See [settings](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/se
Looking for a graphical interface?
- [**AionUi**](https://github.com/iOfficeAI/AionUi) A modern GUI for command-line AI tools including Qwen Code
- [**Gemini CLI Desktop**](https://github.com/Piebald-AI/gemini-cli-desktop) A cross-platform desktop/web/mobile UI for Qwen Code
## Troubleshooting

View File

@@ -11,6 +11,7 @@ export default {
type: 'separator',
},
'sdk-typescript': 'Typescript SDK',
'sdk-java': 'Java SDK(alpha)',
'Dive Into Qwen Code': {
title: 'Dive Into Qwen Code',
type: 'separator',

312
docs/developers/sdk-java.md Normal file
View File

@@ -0,0 +1,312 @@
# Qwen Code Java SDK
The Qwen Code Java SDK is a minimum experimental SDK for programmatic access to Qwen Code functionality. It provides a Java interface to interact with the Qwen Code CLI, allowing developers to integrate Qwen Code capabilities into their Java applications.
## Requirements
- Java >= 1.8
- Maven >= 3.6.0 (for building from source)
- qwen-code >= 0.5.0
### Dependencies
- **Logging**: ch.qos.logback:logback-classic
- **Utilities**: org.apache.commons:commons-lang3
- **JSON Processing**: com.alibaba.fastjson2:fastjson2
- **Testing**: JUnit 5 (org.junit.jupiter:junit-jupiter)
## Installation
Add the following dependency to your Maven `pom.xml`:
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>qwencode-sdk</artifactId>
<version>{$version}</version>
</dependency>
```
Or if using Gradle, add to your `build.gradle`:
```gradle
implementation 'com.alibaba:qwencode-sdk:{$version}'
```
## Building and Running
### Build Commands
```bash
# Compile the project
mvn compile
# Run tests
mvn test
# Package the JAR
mvn package
# Install to local repository
mvn install
```
## Quick Start
The simplest way to use the SDK is through the `QwenCodeCli.simpleQuery()` method:
```java
public static void runSimpleExample() {
List<String> result = QwenCodeCli.simpleQuery("hello world");
result.forEach(logger::info);
}
```
For more advanced usage with custom transport options:
```java
public static void runTransportOptionsExample() {
TransportOptions options = new TransportOptions()
.setModel("qwen3-coder-flash")
.setPermissionMode(PermissionMode.AUTO_EDIT)
.setCwd("./")
.setEnv(new HashMap<String, String>() {{put("CUSTOM_VAR", "value");}})
.setIncludePartialMessages(true)
.setTurnTimeout(new Timeout(120L, TimeUnit.SECONDS))
.setMessageTimeout(new Timeout(90L, TimeUnit.SECONDS))
.setAllowedTools(Arrays.asList("read_file", "write_file", "list_directory"));
List<String> result = QwenCodeCli.simpleQuery("who are you, what are your capabilities?", options);
result.forEach(logger::info);
}
```
For streaming content handling with custom content consumers:
```java
public static void runStreamingExample() {
QwenCodeCli.simpleQuery("who are you, what are your capabilities?",
new TransportOptions().setMessageTimeout(new Timeout(10L, TimeUnit.SECONDS)), new AssistantContentSimpleConsumers() {
@Override
public void onText(Session session, TextAssistantContent textAssistantContent) {
logger.info("Text content received: {}", textAssistantContent.getText());
}
@Override
public void onThinking(Session session, ThingkingAssistantContent thingkingAssistantContent) {
logger.info("Thinking content received: {}", thingkingAssistantContent.getThinking());
}
@Override
public void onToolUse(Session session, ToolUseAssistantContent toolUseContent) {
logger.info("Tool use content received: {} with arguments: {}",
toolUseContent, toolUseContent.getInput());
}
@Override
public void onToolResult(Session session, ToolResultAssistantContent toolResultContent) {
logger.info("Tool result content received: {}", toolResultContent.getContent());
}
@Override
public void onOtherContent(Session session, AssistantContent<?> other) {
logger.info("Other content received: {}", other);
}
@Override
public void onUsage(Session session, AssistantUsage assistantUsage) {
logger.info("Usage information received: Input tokens: {}, Output tokens: {}",
assistantUsage.getUsage().getInputTokens(), assistantUsage.getUsage().getOutputTokens());
}
}.setDefaultPermissionOperation(Operation.allow));
logger.info("Streaming example completed.");
}
```
other examples see src/test/java/com/alibaba/qwen/code/cli/example
## Architecture
The SDK follows a layered architecture:
- **API Layer**: Provides the main entry points through `QwenCodeCli` class with simple static methods for basic usage
- **Session Layer**: Manages communication sessions with the Qwen Code CLI through the `Session` class
- **Transport Layer**: Handles the communication mechanism between the SDK and CLI process (currently using process transport via `ProcessTransport`)
- **Protocol Layer**: Defines data structures for communication based on the CLI protocol
- **Utils**: Common utilities for concurrent execution, timeout handling, and error management
## Key Features
### Permission Modes
The SDK supports different permission modes for controlling tool execution:
- **`default`**: Write tools are denied unless approved via `canUseTool` callback or in `allowedTools`. Read-only tools execute without confirmation.
- **`plan`**: Blocks all write tools, instructing AI to present a plan first.
- **`auto-edit`**: Auto-approve edit tools (edit, write_file) while other tools require confirmation.
- **`yolo`**: All tools execute automatically without confirmation.
### Session Event Consumers and Assistant Content Consumers
The SDK provides two key interfaces for handling events and content from the CLI:
#### SessionEventConsumers Interface
The `SessionEventConsumers` interface provides callbacks for different types of messages during a session:
- `onSystemMessage`: Handles system messages from the CLI (receives Session and SDKSystemMessage)
- `onResultMessage`: Handles result messages from the CLI (receives Session and SDKResultMessage)
- `onAssistantMessage`: Handles assistant messages (AI responses) (receives Session and SDKAssistantMessage)
- `onPartialAssistantMessage`: Handles partial assistant messages during streaming (receives Session and SDKPartialAssistantMessage)
- `onUserMessage`: Handles user messages (receives Session and SDKUserMessage)
- `onOtherMessage`: Handles other types of messages (receives Session and String message)
- `onControlResponse`: Handles control responses (receives Session and CLIControlResponse)
- `onControlRequest`: Handles control requests (receives Session and CLIControlRequest, returns CLIControlResponse)
- `onPermissionRequest`: Handles permission requests (receives Session and CLIControlRequest<CLIControlPermissionRequest>, returns Behavior)
#### AssistantContentConsumers Interface
The `AssistantContentConsumers` interface handles different types of content within assistant messages:
- `onText`: Handles text content (receives Session and TextAssistantContent)
- `onThinking`: Handles thinking content (receives Session and ThingkingAssistantContent)
- `onToolUse`: Handles tool use content (receives Session and ToolUseAssistantContent)
- `onToolResult`: Handles tool result content (receives Session and ToolResultAssistantContent)
- `onOtherContent`: Handles other content types (receives Session and AssistantContent)
- `onUsage`: Handles usage information (receives Session and AssistantUsage)
- `onPermissionRequest`: Handles permission requests (receives Session and CLIControlPermissionRequest, returns Behavior)
- `onOtherControlRequest`: Handles other control requests (receives Session and ControlRequestPayload, returns ControlResponsePayload)
#### Relationship Between the Interfaces
**Important Note on Event Hierarchy:**
- `SessionEventConsumers` is the **high-level** event processor that handles different message types (system, assistant, user, etc.)
- `AssistantContentConsumers` is the **low-level** content processor that handles different types of content within assistant messages (text, tools, thinking, etc.)
**Processor Relationship:**
- `SessionEventConsumers``AssistantContentConsumers` (SessionEventConsumers uses AssistantContentConsumers to process content within assistant messages)
**Event Derivation Relationships:**
- `onAssistantMessage``onText`, `onThinking`, `onToolUse`, `onToolResult`, `onOtherContent`, `onUsage`
- `onPartialAssistantMessage``onText`, `onThinking`, `onToolUse`, `onToolResult`, `onOtherContent`
- `onControlRequest``onPermissionRequest`, `onOtherControlRequest`
**Event Timeout Relationships:**
Each event handler method has a corresponding timeout method that allows customizing the timeout behavior for that specific event:
- `onSystemMessage``onSystemMessageTimeout`
- `onResultMessage``onResultMessageTimeout`
- `onAssistantMessage``onAssistantMessageTimeout`
- `onPartialAssistantMessage``onPartialAssistantMessageTimeout`
- `onUserMessage``onUserMessageTimeout`
- `onOtherMessage``onOtherMessageTimeout`
- `onControlResponse``onControlResponseTimeout`
- `onControlRequest``onControlRequestTimeout`
For AssistantContentConsumers timeout methods:
- `onText``onTextTimeout`
- `onThinking``onThinkingTimeout`
- `onToolUse``onToolUseTimeout`
- `onToolResult``onToolResultTimeout`
- `onOtherContent``onOtherContentTimeout`
- `onPermissionRequest``onPermissionRequestTimeout`
- `onOtherControlRequest``onOtherControlRequestTimeout`
**Default Timeout Values:**
- `SessionEventSimpleConsumers` default timeout: 180 seconds (Timeout.TIMEOUT_180_SECONDS)
- `AssistantContentSimpleConsumers` default timeout: 60 seconds (Timeout.TIMEOUT_60_SECONDS)
**Timeout Hierarchy Requirements:**
For proper operation, the following timeout relationships should be maintained:
- `onAssistantMessageTimeout` return value should be greater than `onTextTimeout`, `onThinkingTimeout`, `onToolUseTimeout`, `onToolResultTimeout`, and `onOtherContentTimeout` return values
- `onControlRequestTimeout` return value should be greater than `onPermissionRequestTimeout` and `onOtherControlRequestTimeout` return values
### Transport Options
The `TransportOptions` class allows configuration of how the SDK communicates with the Qwen Code CLI:
- `pathToQwenExecutable`: Path to the Qwen Code CLI executable
- `cwd`: Working directory for the CLI process
- `model`: AI model to use for the session
- `permissionMode`: Permission mode that controls tool execution
- `env`: Environment variables to pass to the CLI process
- `maxSessionTurns`: Limits the number of conversation turns in a session
- `coreTools`: List of core tools that should be available to the AI
- `excludeTools`: List of tools to exclude from being available to the AI
- `allowedTools`: List of tools that are pre-approved for use without additional confirmation
- `authType`: Authentication type to use for the session
- `includePartialMessages`: Enables receiving partial messages during streaming responses
- `skillsEnable`: Enables or disables skills functionality for the session
- `turnTimeout`: Timeout for a complete turn of conversation
- `messageTimeout`: Timeout for individual messages within a turn
- `resumeSessionId`: ID of a previous session to resume
- `otherOptions`: Additional command-line options to pass to the CLI
### Session Control Features
- **Session creation**: Use `QwenCodeCli.newSession()` to create a new session with custom options
- **Session management**: The `Session` class provides methods to send prompts, handle responses, and manage session state
- **Session cleanup**: Always close sessions using `session.close()` to properly terminate the CLI process
- **Session resumption**: Use `setResumeSessionId()` in `TransportOptions` to resume a previous session
- **Session interruption**: Use `session.interrupt()` to interrupt a currently running prompt
- **Dynamic model switching**: Use `session.setModel()` to change the model during a session
- **Dynamic permission mode switching**: Use `session.setPermissionMode()` to change the permission mode during a session
### Thread Pool Configuration
The SDK uses a thread pool for managing concurrent operations with the following default configuration:
- **Core Pool Size**: 30 threads
- **Maximum Pool Size**: 100 threads
- **Keep-Alive Time**: 60 seconds
- **Queue Capacity**: 300 tasks (using LinkedBlockingQueue)
- **Thread Naming**: "qwen_code_cli-pool-{number}"
- **Daemon Threads**: false
- **Rejected Execution Handler**: CallerRunsPolicy
## Error Handling
The SDK provides specific exception types for different error scenarios:
- `SessionControlException`: Thrown when there's an issue with session control (creation, initialization, etc.)
- `SessionSendPromptException`: Thrown when there's an issue sending a prompt or receiving a response
- `SessionClosedException`: Thrown when attempting to use a closed session
## FAQ / Troubleshooting
### Q: Do I need to install the Qwen CLI separately?
A: yes, requires Qwen CLI 0.5.5 or higher.
### Q: What Java versions are supported?
A: The SDK requires Java 1.8 or higher.
### Q: How do I handle long-running requests?
A: The SDK includes timeout utilities. You can configure timeouts using the `Timeout` class in `TransportOptions`.
### Q: Why are some tools not executing?
A: This is likely due to permission modes. Check your permission mode settings and consider using `allowedTools` to pre-approve certain tools.
### Q: How do I resume a previous session?
A: Use the `setResumeSessionId()` method in `TransportOptions` to resume a previous session.
### Q: Can I customize the environment for the CLI process?
A: Yes, use the `setEnv()` method in `TransportOptions` to pass environment variables to the CLI process.
## License
Apache-2.0 - see [LICENSE](./LICENSE) for details.

View File

@@ -43,6 +43,7 @@ Qwen Code uses JSON settings files for persistent configuration. There are four
In addition to a project settings file, a project's `.qwen` directory can contain other project-specific files related to Qwen Code's operation, such as:
- [Custom sandbox profiles](../features/sandbox) (e.g. `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
- [Agent Skills](../features/skills) (experimental) under `.qwen/skills/` (each Skill is a directory containing a `SKILL.md`).
### Available settings in `settings.json`
@@ -135,6 +136,95 @@ Settings are organized into categories. All settings should be placed within the
- `"./custom-logs"` - Logs to `./custom-logs` relative to current directory
- `"/tmp/openai-logs"` - Logs to absolute path `/tmp/openai-logs`
#### modelProviders
Use `modelProviders` to declare curated model lists per auth type that the `/model` picker can switch between. Keys must be valid auth types (`openai`, `anthropic`, `gemini`, `vertex-ai`, etc.). Each entry requires an `id` and **must include `envKey`**, with optional `name`, `description`, `baseUrl`, and `generationConfig`. Credentials are never persisted in settings; the runtime reads them from `process.env[envKey]`. Qwen OAuth models remain hard-coded and cannot be overridden.
##### Example
```json
{
"modelProviders": {
"openai": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"envKey": "OPENAI_API_KEY",
"baseUrl": "https://api.openai.com/v1",
"generationConfig": {
"timeout": 60000,
"maxRetries": 3,
"samplingParams": { "temperature": 0.2 }
}
}
],
"anthropic": [
{
"id": "claude-3-5-sonnet",
"envKey": "ANTHROPIC_API_KEY",
"baseUrl": "https://api.anthropic.com/v1"
}
],
"gemini": [
{
"id": "gemini-2.0-flash",
"name": "Gemini 2.0 Flash",
"envKey": "GEMINI_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com"
}
],
"vertex-ai": [
{
"id": "gemini-1.5-pro-vertex",
"envKey": "GOOGLE_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com"
}
]
}
}
```
> [!note]
> Only the `/model` command exposes non-default auth types. Anthropic, Gemini, Vertex AI, etc., must be defined via `modelProviders`. The `/auth` command intentionally lists only the built-in Qwen OAuth and OpenAI flows.
##### Resolution layers and atomicity
The effective auth/model/credential values are chosen per field using the following precedence (first present wins). You can combine `--auth-type` with `--model` to point directly at a provider entry; these CLI flags run before other layers.
| Layer (highest → lowest) | authType | model | apiKey | baseUrl | apiKeyEnvKey | proxy |
| -------------------------- | ----------------------------------- | ----------------------------------------------- | --------------------------------------------------- | ---------------------------------------------------- | ---------------------- | --------------------------------- |
| Programmatic overrides | `/auth ` | `/auth` input | `/auth` input | `/auth` input | — | — |
| Model provider selection | — | `modelProvider.id` | `env[modelProvider.envKey]` | `modelProvider.baseUrl` | `modelProvider.envKey` | — |
| CLI arguments | `--auth-type` | `--model` | `--openaiApiKey` (or provider-specific equivalents) | `--openaiBaseUrl` (or provider-specific equivalents) | — | — |
| Environment variables | — | Provider-specific mapping (e.g. `OPENAI_MODEL`) | Provider-specific mapping (e.g. `OPENAI_API_KEY`) | Provider-specific mapping (e.g. `OPENAI_BASE_URL`) | — | — |
| Settings (`settings.json`) | `security.auth.selectedType` | `model.name` | `security.auth.apiKey` | `security.auth.baseUrl` | — | — |
| Default / computed | Falls back to `AuthType.QWEN_OAUTH` | Built-in default (OpenAI ⇒ `qwen3-coder-plus`) | — | — | — | `Config.getProxy()` if configured |
\*When present, CLI auth flags override settings. Otherwise, `security.auth.selectedType` or the implicit default determine the auth type. Qwen OAuth and OpenAI are the only auth types surfaced without extra configuration.
Model-provider sourced values are applied atomically: once a provider model is active, every field it defines is protected from lower layers until you manually clear credentials via `/auth`. The final `generationConfig` is the projection across all layers—lower layers only fill gaps left by higher ones, and the provider layer remains impenetrable.
The merge strategy for `modelProviders` is REPLACE: the entire `modelProviders` from project settings will override the corresponding section in user settings, rather than merging the two.
##### Generation config layering
Per-field precedence for `generationConfig`:
1. Programmatic overrides (e.g. runtime `/model`, `/auth` changes)
2. `modelProviders[authType][].generationConfig`
3. `settings.model.generationConfig`
4. Content-generator defaults (`getDefaultGenerationConfig` for OpenAI, `getParameterValue` for Gemini, etc.)
`samplingParams` is treated atomically; provider values replace the entire object. Defaults from the content generator apply last so each provider retains its tuned baseline.
##### Selection persistence and recommendations
> [!important]
> Define `modelProviders` in the user-scope `~/.qwen/settings.json` whenever possible and avoid persisting credential overrides in any scope. Keeping the provider catalog in user settings prevents merge/override conflicts between project and user scopes and ensures `/auth` and `/model` updates always write back to a consistent scope.
- `/model` and `/auth` persist `model.name` (where applicable) and `security.auth.selectedType` to the closest writable scope that already defines `modelProviders`; otherwise they fall back to the user scope. This keeps workspace/user files in sync with the active provider catalog.
- Without `modelProviders`, the resolver mixes CLI/env/settings layers, which is fine for single-provider setups but cumbersome when frequently switching. Define provider catalogs whenever multi-model workflows are common so that switches stay atomic, source-attributed, and debuggable.
#### context
| Setting | Type | Description | Default |
@@ -380,6 +470,8 @@ Arguments passed directly when running the CLI can override other configurations
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](../../developers/development/telemetry) for more information. |
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
| `--checkpointing` | | Enables [checkpointing](../features/checkpointing). | | |
| `--acp` | | Enables ACP mode (Agent Control Protocol). Useful for IDE/editor integrations like [Zed](../integration-zed). | | Stable. Replaces the deprecated `--experimental-acp` flag. |
| `--experimental-skills` | | Enables experimental [Agent Skills](../features/skills) (registers the `skill` tool and loads Skills from `.qwen/skills/` and `~/.qwen/skills/`). | | Experimental. |
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |
| `--proxy` | | Sets the proxy for the CLI. | Proxy URL | Example: `--proxy http://localhost:7890`. |

View File

@@ -1,6 +1,7 @@
export default {
commands: 'Commands',
'sub-agents': 'SubAgents',
skills: 'Skills (Experimental)',
headless: 'Headless Mode',
checkpointing: {
display: 'hidden',

View File

@@ -189,19 +189,20 @@ qwen -p "Write code" --output-format stream-json --include-partial-messages | jq
Key command-line options for headless usage:
| Option | Description | Example |
| ---------------------------- | --------------------------------------------------- | ------------------------------------------------------------------------ |
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
| Option | Description | Example |
| ---------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------ |
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
| `--experimental-skills` | Enable experimental Skills (registers the `skill` tool) | `qwen --experimental-skills -p "What Skills are available?"` |
For complete details on all available configuration options, settings files, and environment variables, see the [Configuration Guide](../configuration/settings).

View File

@@ -49,6 +49,8 @@ Cross-platform sandboxing with complete process isolation.
By default, Qwen Code uses a published sandbox image (configured in the CLI package) and will pull it as needed.
The container sandbox mounts your workspace and your `~/.qwen` directory into the container so auth and settings persist between runs.
**Best for**: Strong isolation on any OS, consistent tooling inside a known image.
### Choosing a method
@@ -157,7 +159,7 @@ For a working allowlist-style proxy example, see: [Example Proxy Script](/develo
## Linux UID/GID handling
The sandbox automatically handles user permissions on Linux. Override these permissions with:
On Linux, Qwen Code defaults to enabling UID/GID mapping so the sandbox runs as your user (and reuses the mounted `~/.qwen`). Override with:
```bash
export SANDBOX_SET_UID_GID=true # Force host UID/GID

View File

@@ -0,0 +1,282 @@
# Agent Skills (Experimental)
> Create, manage, and share Skills to extend Qwen Codes capabilities.
This guide shows you how to create, use, and manage Agent Skills in **Qwen Code**. Skills are modular capabilities that extend the models effectiveness through organized folders containing instructions (and optionally scripts/resources).
> [!note]
>
> Skills are currently **experimental** and must be enabled with `--experimental-skills`.
## Prerequisites
- Qwen Code (recent version)
- Run with the experimental flag enabled:
```bash
qwen --experimental-skills
```
- Basic familiarity with Qwen Code ([Quickstart](../quickstart.md))
## What are Agent Skills?
Agent Skills package expertise into discoverable capabilities. Each Skill consists of a `SKILL.md` file with instructions that the model can load when relevant, plus optional supporting files like scripts and templates.
### How Skills are invoked
Skills are **model-invoked** — the model autonomously decides when to use them based on your request and the Skills description. This is different from slash commands, which are **user-invoked** (you explicitly type `/command`).
### Benefits
- Extend Qwen Code for your workflows
- Share expertise across your team via git
- Reduce repetitive prompting
- Compose multiple Skills for complex tasks
## Create a Skill
Skills are stored as directories containing a `SKILL.md` file.
### Personal Skills
Personal Skills are available across all your projects. Store them in `~/.qwen/skills/`:
```bash
mkdir -p ~/.qwen/skills/my-skill-name
```
Use personal Skills for:
- Your individual workflows and preferences
- Experimental Skills youre developing
- Personal productivity helpers
### Project Skills
Project Skills are shared with your team. Store them in `.qwen/skills/` within your project:
```bash
mkdir -p .qwen/skills/my-skill-name
```
Use project Skills for:
- Team workflows and conventions
- Project-specific expertise
- Shared utilities and scripts
Project Skills can be checked into git and automatically become available to teammates.
## Write `SKILL.md`
Create a `SKILL.md` file with YAML frontmatter and Markdown content:
```yaml
---
name: your-skill-name
description: Brief description of what this Skill does and when to use it
---
# Your Skill Name
## Instructions
Provide clear, step-by-step guidance for Qwen Code.
## Examples
Show concrete examples of using this Skill.
```
### Field requirements
Qwen Code currently validates that:
- `name` is a non-empty string
- `description` is a non-empty string
Recommended conventions (not strictly enforced yet):
- Use lowercase letters, numbers, and hyphens in `name`
- Make `description` specific: include both **what** the Skill does and **when** to use it (key words users will naturally mention)
## Add supporting files
Create additional files alongside `SKILL.md`:
```text
my-skill/
├── SKILL.md (required)
├── reference.md (optional documentation)
├── examples.md (optional examples)
├── scripts/
│ └── helper.py (optional utility)
└── templates/
└── template.txt (optional template)
```
Reference these files from `SKILL.md`:
````markdown
For advanced usage, see [reference.md](reference.md).
Run the helper script:
```bash
python scripts/helper.py input.txt
```
````
## View available Skills
When `--experimental-skills` is enabled, Qwen Code discovers Skills from:
- Personal Skills: `~/.qwen/skills/`
- Project Skills: `.qwen/skills/`
To view available Skills, ask Qwen Code directly:
```text
What Skills are available?
```
Or inspect the filesystem:
```bash
# List personal Skills
ls ~/.qwen/skills/
# List project Skills (if in a project directory)
ls .qwen/skills/
# View a specific Skills content
cat ~/.qwen/skills/my-skill/SKILL.md
```
## Test a Skill
After creating a Skill, test it by asking questions that match your description.
Example: if your description mentions “PDF files”:
```text
Can you help me extract text from this PDF?
```
The model autonomously decides to use your Skill if it matches the request — you dont need to explicitly invoke it.
## Debug a Skill
If Qwen Code doesnt use your Skill, check these common issues:
### Make the description specific
Too vague:
```yaml
description: Helps with documents
```
Specific:
```yaml
description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDFs, forms, or document extraction.
```
### Verify file path
- Personal Skills: `~/.qwen/skills/<skill-name>/SKILL.md`
- Project Skills: `.qwen/skills/<skill-name>/SKILL.md`
```bash
# Personal
ls ~/.qwen/skills/my-skill/SKILL.md
# Project
ls .qwen/skills/my-skill/SKILL.md
```
### Check YAML syntax
Invalid YAML prevents the Skill metadata from loading correctly.
```bash
cat SKILL.md | head -n 15
```
Ensure:
- Opening `---` on line 1
- Closing `---` before Markdown content
- Valid YAML syntax (no tabs, correct indentation)
### View errors
Run Qwen Code with debug mode to see Skill loading errors:
```bash
qwen --experimental-skills --debug
```
## Share Skills with your team
You can share Skills through project repositories:
1. Add the Skill under `.qwen/skills/`
2. Commit and push
3. Teammates pull the changes and run with `--experimental-skills`
```bash
git add .qwen/skills/
git commit -m "Add team Skill for PDF processing"
git push
```
## Update a Skill
Edit `SKILL.md` directly:
```bash
# Personal Skill
code ~/.qwen/skills/my-skill/SKILL.md
# Project Skill
code .qwen/skills/my-skill/SKILL.md
```
Changes take effect the next time you start Qwen Code. If Qwen Code is already running, restart it to load the updates.
## Remove a Skill
Delete the Skill directory:
```bash
# Personal
rm -rf ~/.qwen/skills/my-skill
# Project
rm -rf .qwen/skills/my-skill
git commit -m "Remove unused Skill"
```
## Best practices
### Keep Skills focused
One Skill should address one capability:
- Focused: “PDF form filling”, “Excel analysis”, “Git commit messages”
- Too broad: “Document processing” (split into smaller Skills)
### Write clear descriptions
Help the model discover when to use Skills by including specific triggers:
```yaml
description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or .xlsx data.
```
### Test with your team
- Does the Skill activate when expected?
- Are the instructions clear?
- Are there missing examples or edge cases?

View File

@@ -32,7 +32,7 @@
"Qwen Code": {
"type": "custom",
"command": "qwen",
"args": ["--experimental-acp"],
"args": ["--acp"],
"env": {}
}
```

View File

@@ -1,5 +1,8 @@
# Qwen Code overview
[![@qwen-code/qwen-code downloads](https://img.shields.io/npm/dw/@qwen-code/qwen-code.svg)](https://npm-compare.com/@qwen-code/qwen-code)
[![@qwen-code/qwen-code version](https://img.shields.io/npm/v/@qwen-code/qwen-code.svg)](https://www.npmjs.com/package/@qwen-code/qwen-code)
> Learn about Qwen Code, Qwen's agentic coding tool that lives in your terminal and helps you turn ideas into code faster than ever before.
## Get started in 30 seconds
@@ -46,7 +49,7 @@ You'll be prompted to log in on first use. That's it! [Continue with Quickstart
> [!note]
>
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. You can search for **Qwen Code** in the VS Code Marketplace and download it.
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. Download and install the [Qwen Code Companion](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion) now.
## What Qwen Code does for you

View File

@@ -9,11 +9,18 @@ This guide provides solutions to common issues and debugging tips, including top
## Authentication or login errors
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY` or `unable to get local issuer certificate`**
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY`, `UNABLE_TO_VERIFY_LEAF_SIGNATURE`, or `unable to get local issuer certificate`**
- **Cause:** You may be on a corporate network with a firewall that intercepts and inspects SSL/TLS traffic. This often requires a custom root CA certificate to be trusted by Node.js.
- **Solution:** Set the `NODE_EXTRA_CA_CERTS` environment variable to the absolute path of your corporate root CA certificate file.
- Example: `export NODE_EXTRA_CA_CERTS=/path/to/your/corporate-ca.crt`
- **Error: `Device authorization flow failed: fetch failed`**
- **Cause:** Node.js could not reach Qwen OAuth endpoints (often a proxy or SSL/TLS trust issue). When available, Qwen Code will also print the underlying error cause (for example: `UNABLE_TO_VERIFY_LEAF_SIGNATURE`).
- **Solution:**
- Confirm you can access `https://chat.qwen.ai` from the same machine/network.
- If you are behind a proxy, set it via `qwen --proxy <url>` (or the `proxy` setting in `settings.json`).
- If your network uses a corporate TLS inspection CA, set `NODE_EXTRA_CA_CERTS` as described above.
- **Issue: Unable to display UI after authentication failure**
- **Cause:** If authentication fails after selecting an authentication type, the `security.auth.selectedType` setting may be persisted in `settings.json`. On restart, the CLI may get stuck trying to authenticate with the failed auth type and fail to display the UI.
- **Solution:** Clear the `security.auth.selectedType` configuration item in your `settings.json` file:

View File

@@ -80,10 +80,11 @@ type PermissionHandler = (
/**
* Sets up an ACP test environment with all necessary utilities.
* @param useNewFlag - If true, uses --acp; if false, uses --experimental-acp (for backward compatibility testing)
*/
function setupAcpTest(
rig: TestRig,
options?: { permissionHandler?: PermissionHandler },
options?: { permissionHandler?: PermissionHandler; useNewFlag?: boolean },
) {
const pending = new Map<number, PendingRequest>();
let nextRequestId = 1;
@@ -95,9 +96,13 @@ function setupAcpTest(
const permissionHandler =
options?.permissionHandler ?? (() => ({ optionId: 'proceed_once' }));
// Use --acp by default, but allow testing with --experimental-acp for backward compatibility
const acpFlag =
options?.useNewFlag !== false ? '--acp' : '--experimental-acp';
const agent = spawn(
'node',
[rig.bundlePath, '--experimental-acp', '--no-chat-recording'],
[rig.bundlePath, acpFlag, '--no-chat-recording'],
{
cwd: rig.testDir!,
stdio: ['pipe', 'pipe', 'pipe'],
@@ -621,3 +626,99 @@ function setupAcpTest(
}
});
});
(IS_SANDBOX ? describe.skip : describe)(
'acp flag backward compatibility',
() => {
it('should work with deprecated --experimental-acp flag and show warning', async () => {
const rig = new TestRig();
rig.setup('acp backward compatibility');
const { sendRequest, cleanup, stderr } = setupAcpTest(rig, {
useNewFlag: false,
});
try {
const initResult = await sendRequest('initialize', {
protocolVersion: 1,
clientCapabilities: {
fs: { readTextFile: true, writeTextFile: true },
},
});
expect(initResult).toBeDefined();
// Verify deprecation warning is shown
const stderrOutput = stderr.join('');
expect(stderrOutput).toContain('--experimental-acp is deprecated');
expect(stderrOutput).toContain('Please use --acp instead');
await sendRequest('authenticate', { methodId: 'openai' });
const newSession = (await sendRequest('session/new', {
cwd: rig.testDir!,
mcpServers: [],
})) as { sessionId: string };
expect(newSession.sessionId).toBeTruthy();
// Verify functionality still works
const promptResult = await sendRequest('session/prompt', {
sessionId: newSession.sessionId,
prompt: [{ type: 'text', text: 'Say hello.' }],
});
expect(promptResult).toBeDefined();
} catch (e) {
if (stderr.length) {
console.error('Agent stderr:', stderr.join(''));
}
throw e;
} finally {
await cleanup();
}
});
it('should work with new --acp flag without warnings', async () => {
const rig = new TestRig();
rig.setup('acp new flag');
const { sendRequest, cleanup, stderr } = setupAcpTest(rig, {
useNewFlag: true,
});
try {
const initResult = await sendRequest('initialize', {
protocolVersion: 1,
clientCapabilities: {
fs: { readTextFile: true, writeTextFile: true },
},
});
expect(initResult).toBeDefined();
// Verify no deprecation warning is shown
const stderrOutput = stderr.join('');
expect(stderrOutput).not.toContain('--experimental-acp is deprecated');
await sendRequest('authenticate', { methodId: 'openai' });
const newSession = (await sendRequest('session/new', {
cwd: rig.testDir!,
mcpServers: [],
})) as { sessionId: string };
expect(newSession.sessionId).toBeTruthy();
// Verify functionality works
const promptResult = await sendRequest('session/prompt', {
sessionId: newSession.sessionId,
prompt: [{ type: 'text', text: 'Say hello.' }],
});
expect(promptResult).toBeDefined();
} catch (e) {
if (stderr.length) {
console.error('Agent stderr:', stderr.join(''));
}
throw e;
} finally {
await cleanup();
}
});
},
);

View File

@@ -5,8 +5,6 @@
*/
import { describe, it, expect } from 'vitest';
import { existsSync } from 'node:fs';
import * as path from 'node:path';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe('file-system', () => {
@@ -202,8 +200,8 @@ describe('file-system', () => {
const readAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'read_file',
);
const writeAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'write_file',
const editAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'edit_file',
);
const successfulReplace = toolLogs.find(
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
@@ -226,15 +224,15 @@ describe('file-system', () => {
// CRITICAL: Verify that no matter what the model did, it never successfully
// wrote or replaced anything.
if (writeAttempt) {
if (editAttempt) {
console.error(
'A write_file attempt was made when no file should be written.',
'A edit_file attempt was made when no file should be written.',
);
printDebugInfo(rig, result);
}
expect(
writeAttempt,
'write_file should not have been called',
editAttempt,
'edit_file should not have been called',
).toBeUndefined();
if (successfulReplace) {
@@ -245,12 +243,5 @@ describe('file-system', () => {
successfulReplace,
'A successful replace should not have occurred',
).toBeUndefined();
// Final verification: ensure the file was not created.
const filePath = path.join(rig.testDir!, fileName);
const fileExists = existsSync(filePath);
expect(fileExists, 'The non-existent file should not be created').toBe(
false,
);
});
});

View File

@@ -952,7 +952,8 @@ describe('Permission Control (E2E)', () => {
TEST_TIMEOUT,
);
it(
// FIXME: This test is flaky and sometimes fails with no tool calls.
it.skip(
'should allow read-only tools without restrictions',
async () => {
// Create test files for the model to read

View File

@@ -314,4 +314,88 @@ describe('System Control (E2E)', () => {
);
});
});
describe('supportedCommands API', () => {
it('should return list of supported slash commands', async () => {
const sessionId = crypto.randomUUID();
const generator = (async function* () {
yield {
type: 'user',
session_id: sessionId,
message: { role: 'user', content: 'Hello' },
parent_tool_use_id: null,
} as SDKUserMessage;
})();
const q = query({
prompt: generator,
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
model: 'qwen3-max',
debug: false,
},
});
try {
const result = await q.supportedCommands();
// Start consuming messages to trigger initialization
const messageConsumer = (async () => {
try {
for await (const _message of q) {
// Just consume messages
}
} catch (error) {
// Ignore errors from query being closed
if (error instanceof Error && error.message !== 'Query is closed') {
throw error;
}
}
})();
// Verify result structure
expect(result).toBeDefined();
expect(result).toHaveProperty('commands');
expect(Array.isArray(result?.['commands'])).toBe(true);
const commands = result?.['commands'] as string[];
// Verify default allowed built-in commands are present
expect(commands).toContain('init');
expect(commands).toContain('summary');
expect(commands).toContain('compress');
// Verify commands are sorted
const sortedCommands = [...commands].sort();
expect(commands).toEqual(sortedCommands);
// Verify all commands are strings
commands.forEach((cmd) => {
expect(typeof cmd).toBe('string');
expect(cmd.length).toBeGreaterThan(0);
});
await q.close();
await messageConsumer;
} catch (error) {
await q.close();
throw error;
}
});
it('should throw error when supportedCommands is called on closed query', async () => {
const q = query({
prompt: 'Hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
model: 'qwen3-max',
},
});
await q.close();
await expect(q.supportedCommands()).rejects.toThrow('Query is closed');
});
});
});

2086
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0",
"version": "0.7.0",
"engines": {
"node": ">=20.0.0"
},
@@ -13,14 +13,11 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.0"
},
"scripts": {
"start": "cross-env node scripts/start.js",
"debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js",
"auth:npm": "npx google-artifactregistry-auth",
"auth:docker": "gcloud auth configure-docker us-west1-docker.pkg.dev",
"auth": "npm run auth:npm && npm run auth:docker",
"generate": "node scripts/generate-git-commit-info.js",
"build": "node scripts/build.js",
"build-and-start": "npm run build && npm run start",
@@ -95,7 +92,6 @@
"eslint-plugin-react-hooks": "^5.2.0",
"glob": "^10.5.0",
"globals": "^16.0.0",
"google-artifactregistry-auth": "^3.4.0",
"husky": "^9.1.7",
"json": "^11.0.0",
"lint-staged": "^16.1.6",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0",
"version": "0.7.0",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -33,13 +33,13 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.0"
},
"dependencies": {
"@google/genai": "1.16.0",
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
"@qwen-code/qwen-code-core": "file:../core",
"@modelcontextprotocol/sdk": "^1.15.1",
"@modelcontextprotocol/sdk": "^1.25.1",
"@types/update-notifier": "^6.0.8",
"ansi-regex": "^6.2.2",
"command-exists": "^1.2.9",

View File

@@ -98,6 +98,14 @@ export class AgentSideConnection implements Client {
);
}
/**
* Sends a custom notification to the client.
* Used for extension-specific notifications that are not part of the core ACP protocol.
*/
async sendCustomNotification<T>(method: string, params: T): Promise<void> {
return await this.#connection.sendNotification(method, params);
}
/**
* Request permission before running a tool
*
@@ -374,6 +382,7 @@ export interface Client {
): Promise<schema.RequestPermissionResponse>;
sessionUpdate(params: schema.SessionNotification): Promise<void>;
authenticateUpdate(params: schema.AuthenticateUpdate): Promise<void>;
sendCustomNotification<T>(method: string, params: T): Promise<void>;
writeTextFile(
params: schema.WriteTextFileRequest,
): Promise<schema.WriteTextFileResponse>;

View File

@@ -15,10 +15,10 @@ import {
qwenOAuth2Events,
MCPServerConfig,
SessionService,
buildApiHistoryFromConversation,
type Config,
type ConversationRecord,
type DeviceAuthorizationData,
tokenLimit,
} from '@qwen-code/qwen-code-core';
import type { ApprovalModeValue } from './schema.js';
import * as acp from './acp.js';
@@ -165,9 +165,30 @@ class GeminiAgent {
this.setupFileSystem(config);
const session = await this.createAndStoreSession(config);
const configuredModel = (
config.getModel() ||
this.config.getModel() ||
''
).trim();
const modelId = configuredModel || 'default';
const modelName = configuredModel || modelId;
return {
sessionId: session.getId(),
models: {
currentModelId: modelId,
availableModels: [
{
modelId,
name: modelName,
description: null,
_meta: {
contextLimit: tokenLimit(modelId),
},
},
],
_meta: null,
},
};
}
@@ -327,12 +348,20 @@ class GeminiAgent {
const sessionId = config.getSessionId();
const geminiClient = config.getGeminiClient();
const history = conversation
? buildApiHistoryFromConversation(conversation)
: undefined;
const chat = history
? await geminiClient.startChat(history)
: await geminiClient.startChat();
// Use GeminiClient to manage chat lifecycle properly
// This ensures geminiClient.chat is in sync with the session's chat
//
// Note: When loading a session, config.initialize() has already been called
// in newSessionConfig(), which in turn calls geminiClient.initialize().
// The GeminiClient.initialize() method checks config.getResumedSessionData()
// and automatically loads the conversation history into the chat instance.
// So we only need to initialize if it hasn't been done yet.
if (!geminiClient.isInitialized()) {
await geminiClient.initialize();
}
// Now get the chat instance that's managed by GeminiClient
const chat = geminiClient.getChat();
const session = new Session(
sessionId,

View File

@@ -93,6 +93,7 @@ export type ModeInfo = z.infer<typeof modeInfoSchema>;
export type ModesData = z.infer<typeof modesDataSchema>;
export type AgentInfo = z.infer<typeof agentInfoSchema>;
export type ModelInfo = z.infer<typeof modelInfoSchema>;
export type PromptCapabilities = z.infer<typeof promptCapabilitiesSchema>;
@@ -254,8 +255,26 @@ export const authenticateUpdateSchema = z.object({
export type AuthenticateUpdate = z.infer<typeof authenticateUpdateSchema>;
export const acpMetaSchema = z.record(z.unknown()).nullable().optional();
export const modelIdSchema = z.string();
export const modelInfoSchema = z.object({
_meta: acpMetaSchema,
description: z.string().nullable().optional(),
modelId: modelIdSchema,
name: z.string(),
});
export const sessionModelStateSchema = z.object({
_meta: acpMetaSchema,
availableModels: z.array(modelInfoSchema),
currentModelId: modelIdSchema,
});
export const newSessionResponseSchema = z.object({
sessionId: z.string(),
models: sessionModelStateSchema,
});
export const loadSessionResponseSchema = z.null();
@@ -514,6 +533,13 @@ export const currentModeUpdateSchema = z.object({
export type CurrentModeUpdate = z.infer<typeof currentModeUpdateSchema>;
export const currentModelUpdateSchema = z.object({
sessionUpdate: z.literal('current_model_update'),
model: modelInfoSchema,
});
export type CurrentModelUpdate = z.infer<typeof currentModelUpdateSchema>;
export const sessionUpdateSchema = z.union([
z.object({
content: contentBlockSchema,
@@ -555,6 +581,7 @@ export const sessionUpdateSchema = z.union([
sessionUpdate: z.literal('plan'),
}),
currentModeUpdateSchema,
currentModelUpdateSchema,
availableCommandsUpdateSchema,
]);

View File

@@ -41,9 +41,11 @@ import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { z } from 'zod';
import { getErrorMessage } from '../../utils/errors.js';
import { normalizePartList } from '../../utils/nonInteractiveHelpers.js';
import {
handleSlashCommand,
getAvailableCommands,
type NonInteractiveSlashCommandResult,
} from '../../nonInteractiveCliCommands.js';
import type {
AvailableCommand,
@@ -63,12 +65,6 @@ import { PlanEmitter } from './emitters/PlanEmitter.js';
import { MessageEmitter } from './emitters/MessageEmitter.js';
import { SubAgentTracker } from './SubAgentTracker.js';
/**
* Built-in commands that are allowed in ACP integration mode.
* Only safe, read-only commands that don't require interactive UI.
*/
export const ALLOWED_BUILTIN_COMMANDS_FOR_ACP = ['init'];
/**
* Session represents an active conversation session with the AI model.
* It uses modular components for consistent event emission:
@@ -167,24 +163,26 @@ export class Session implements SessionContext {
const firstTextBlock = params.prompt.find((block) => block.type === 'text');
const inputText = firstTextBlock?.text || '';
let parts: Part[];
let parts: Part[] | null;
if (isSlashCommand(inputText)) {
// Handle slash command - allow specific built-in commands for ACP integration
// Handle slash command - uses default allowed commands (init, summary, compress)
const slashCommandResult = await handleSlashCommand(
inputText,
pendingSend,
this.config,
this.settings,
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
);
if (slashCommandResult) {
// Use the result from the slash command
parts = slashCommandResult as Part[];
} else {
// Slash command didn't return a prompt, continue with normal processing
parts = await this.#resolvePrompt(params.prompt, pendingSend.signal);
parts = await this.#processSlashCommandResult(
slashCommandResult,
params.prompt,
);
// If parts is null, the command was fully handled (e.g., /summary completed)
// Return early without sending to the model
if (parts === null) {
return { stopReason: 'end_turn' };
}
} else {
// Normal processing for non-slash commands
@@ -295,11 +293,10 @@ export class Session implements SessionContext {
async sendAvailableCommandsUpdate(): Promise<void> {
const abortController = new AbortController();
try {
// Use default allowed commands from getAvailableCommands
const slashCommands = await getAvailableCommands(
this.config,
this.settings,
abortController.signal,
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
);
// Convert SlashCommand[] to AvailableCommand[] format for ACP protocol
@@ -647,6 +644,103 @@ export class Session implements SessionContext {
}
}
/**
* Processes the result of a slash command execution.
*
* Supported result types in ACP mode:
* - submit_prompt: Submits content to the model
* - stream_messages: Streams multiple messages to the client (ACP-specific)
* - unsupported: Command cannot be executed in ACP mode
* - no_command: No command was found, use original prompt
*
* Note: 'message' type is not supported in ACP mode - commands should use
* 'stream_messages' instead for consistent async handling.
*
* @param result The result from handleSlashCommand
* @param originalPrompt The original prompt blocks
* @returns Parts to use for the prompt, or null if command was handled without needing model interaction
*/
async #processSlashCommandResult(
result: NonInteractiveSlashCommandResult,
originalPrompt: acp.ContentBlock[],
): Promise<Part[] | null> {
switch (result.type) {
case 'submit_prompt':
// Command wants to submit a prompt to the model
// Convert PartListUnion to Part[]
return normalizePartList(result.content);
case 'message': {
// 'message' type is not ideal for ACP mode, but we handle it for compatibility
// by converting it to a stream_messages-like notification
await this.client.sendCustomNotification('_qwencode/slash_command', {
sessionId: this.sessionId,
command: originalPrompt
.filter((block) => block.type === 'text')
.map((block) => (block.type === 'text' ? block.text : ''))
.join(' '),
messageType: result.messageType,
message: result.content || '',
});
if (result.messageType === 'error') {
// Throw error to stop execution
throw new Error(result.content || 'Slash command failed.');
}
// For info messages, return null to indicate command was handled
return null;
}
case 'stream_messages': {
// Command returns multiple messages via async generator (ACP-preferred)
const command = originalPrompt
.filter((block) => block.type === 'text')
.map((block) => (block.type === 'text' ? block.text : ''))
.join(' ');
// Stream all messages to the client
for await (const msg of result.messages) {
await this.client.sendCustomNotification('_qwencode/slash_command', {
sessionId: this.sessionId,
command,
messageType: msg.messageType,
message: msg.content,
});
// If we encounter an error message, throw after sending
if (msg.messageType === 'error') {
throw new Error(msg.content || 'Slash command failed.');
}
}
// All messages sent successfully, return null to indicate command was handled
return null;
}
case 'unsupported': {
// Command returned an unsupported result type
const unsupportedError = `Slash command not supported in ACP integration: ${result.reason}`;
throw new Error(unsupportedError);
}
case 'no_command':
// No command was found or executed, use original prompt
return originalPrompt.map((block) => {
if (block.type === 'text') {
return { text: block.text };
}
throw new Error(`Unsupported block type: ${block.type}`);
});
default: {
// Exhaustiveness check
const _exhaustive: never = result;
const unknownError = `Unknown slash command result type: ${(_exhaustive as NonInteractiveSlashCommandResult).type}`;
throw new Error(unknownError);
}
}
}
async #resolvePrompt(
message: acp.ContentBlock[],
abortSignal: AbortSignal,

View File

@@ -1,41 +1,112 @@
/**
* @license
* Copyright 2025 Google LLC
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { AuthType } from '@qwen-code/qwen-code-core';
import { vi } from 'vitest';
import { validateAuthMethod } from './auth.js';
import * as settings from './settings.js';
vi.mock('./settings.js', () => ({
loadEnvironment: vi.fn(),
loadSettings: vi.fn().mockReturnValue({
merged: vi.fn().mockReturnValue({}),
merged: {},
}),
}));
describe('validateAuthMethod', () => {
beforeEach(() => {
vi.resetModules();
// Reset mock to default
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {},
} as ReturnType<typeof settings.loadSettings>);
});
afterEach(() => {
vi.unstubAllEnvs();
delete process.env['OPENAI_API_KEY'];
delete process.env['CUSTOM_API_KEY'];
delete process.env['GEMINI_API_KEY'];
delete process.env['GEMINI_API_KEY_ALTERED'];
delete process.env['ANTHROPIC_API_KEY'];
delete process.env['ANTHROPIC_BASE_URL'];
delete process.env['GOOGLE_API_KEY'];
});
it('should return null for USE_OPENAI', () => {
it('should return null for USE_OPENAI with default env key', () => {
process.env['OPENAI_API_KEY'] = 'fake-key';
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBeNull();
});
it('should return an error message for USE_OPENAI if OPENAI_API_KEY is not set', () => {
delete process.env['OPENAI_API_KEY'];
it('should return an error message for USE_OPENAI if no API key is available', () => {
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBe(
'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.',
"Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the 'OPENAI_API_KEY' environment variable.",
);
});
it('should return null for USE_OPENAI with custom envKey from modelProviders', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'custom-model' },
modelProviders: {
openai: [{ id: 'custom-model', envKey: 'CUSTOM_API_KEY' }],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['CUSTOM_API_KEY'] = 'custom-key';
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBeNull();
});
it('should return error with custom envKey hint when modelProviders envKey is set but env var is missing', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'custom-model' },
modelProviders: {
openai: [{ id: 'custom-model', envKey: 'CUSTOM_API_KEY' }],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
const result = validateAuthMethod(AuthType.USE_OPENAI);
expect(result).toContain('CUSTOM_API_KEY');
});
it('should return null for USE_GEMINI with custom envKey', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'gemini-1.5-flash' },
modelProviders: {
gemini: [
{ id: 'gemini-1.5-flash', envKey: 'GEMINI_API_KEY_ALTERED' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['GEMINI_API_KEY_ALTERED'] = 'altered-key';
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBeNull();
});
it('should return error with custom envKey for USE_GEMINI when env var is missing', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'gemini-1.5-flash' },
modelProviders: {
gemini: [
{ id: 'gemini-1.5-flash', envKey: 'GEMINI_API_KEY_ALTERED' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
const result = validateAuthMethod(AuthType.USE_GEMINI);
expect(result).toContain('GEMINI_API_KEY_ALTERED');
});
it('should return null for QWEN_OAUTH', () => {
expect(validateAuthMethod(AuthType.QWEN_OAUTH)).toBeNull();
});
@@ -45,4 +116,115 @@ describe('validateAuthMethod', () => {
'Invalid auth method selected.',
);
});
it('should return null for USE_ANTHROPIC with custom envKey and baseUrl', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'claude-3' },
modelProviders: {
anthropic: [
{
id: 'claude-3',
envKey: 'CUSTOM_ANTHROPIC_KEY',
baseUrl: 'https://api.anthropic.com',
},
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['CUSTOM_ANTHROPIC_KEY'] = 'custom-anthropic-key';
expect(validateAuthMethod(AuthType.USE_ANTHROPIC)).toBeNull();
});
it('should return error for USE_ANTHROPIC when baseUrl is missing', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'claude-3' },
modelProviders: {
anthropic: [{ id: 'claude-3', envKey: 'CUSTOM_ANTHROPIC_KEY' }],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['CUSTOM_ANTHROPIC_KEY'] = 'custom-key';
const result = validateAuthMethod(AuthType.USE_ANTHROPIC);
expect(result).toContain('modelProviders[].baseUrl');
});
it('should return null for USE_VERTEX_AI with custom envKey', () => {
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'vertex-model' },
modelProviders: {
'vertex-ai': [
{ id: 'vertex-model', envKey: 'GOOGLE_API_KEY_VERTEX' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
process.env['GOOGLE_API_KEY_VERTEX'] = 'vertex-key';
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
});
it('should use config.modelsConfig.getModel() when Config is provided', () => {
// Settings has a different model
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'settings-model' },
modelProviders: {
openai: [
{ id: 'settings-model', envKey: 'SETTINGS_API_KEY' },
{ id: 'cli-model', envKey: 'CLI_API_KEY' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
// Mock Config object that returns a different model (e.g., from CLI args)
const mockConfig = {
modelsConfig: {
getModel: vi.fn().mockReturnValue('cli-model'),
},
} as unknown as import('@qwen-code/qwen-code-core').Config;
// Set the env key for the CLI model, not the settings model
process.env['CLI_API_KEY'] = 'cli-key';
// Should use 'cli-model' from config.modelsConfig.getModel(), not 'settings-model'
const result = validateAuthMethod(AuthType.USE_OPENAI, mockConfig);
expect(result).toBeNull();
expect(mockConfig.modelsConfig.getModel).toHaveBeenCalled();
});
it('should fail validation when Config provides different model without matching env key', () => {
// Clean up any existing env keys first
delete process.env['CLI_API_KEY'];
delete process.env['SETTINGS_API_KEY'];
delete process.env['OPENAI_API_KEY'];
vi.mocked(settings.loadSettings).mockReturnValue({
merged: {
model: { name: 'settings-model' },
modelProviders: {
openai: [
{ id: 'settings-model', envKey: 'SETTINGS_API_KEY' },
{ id: 'cli-model', envKey: 'CLI_API_KEY' },
],
},
},
} as unknown as ReturnType<typeof settings.loadSettings>);
const mockConfig = {
modelsConfig: {
getModel: vi.fn().mockReturnValue('cli-model'),
},
} as unknown as import('@qwen-code/qwen-code-core').Config;
// Don't set CLI_API_KEY - validation should fail
const result = validateAuthMethod(AuthType.USE_OPENAI, mockConfig);
expect(result).not.toBeNull();
expect(result).toContain('CLI_API_KEY');
});
});

View File

@@ -1,21 +1,169 @@
/**
* @license
* Copyright 2025 Google LLC
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { AuthType } from '@qwen-code/qwen-code-core';
import { loadEnvironment, loadSettings } from './settings.js';
import {
AuthType,
type Config,
type ModelProvidersConfig,
type ProviderModelConfig,
} from '@qwen-code/qwen-code-core';
import { loadEnvironment, loadSettings, type Settings } from './settings.js';
import { t } from '../i18n/index.js';
export function validateAuthMethod(authMethod: string): string | null {
/**
* Default environment variable names for each auth type
*/
const DEFAULT_ENV_KEYS: Record<string, string> = {
[AuthType.USE_OPENAI]: 'OPENAI_API_KEY',
[AuthType.USE_ANTHROPIC]: 'ANTHROPIC_API_KEY',
[AuthType.USE_GEMINI]: 'GEMINI_API_KEY',
[AuthType.USE_VERTEX_AI]: 'GOOGLE_API_KEY',
};
/**
* Find model configuration from modelProviders by authType and modelId
*/
function findModelConfig(
modelProviders: ModelProvidersConfig | undefined,
authType: string,
modelId: string | undefined,
): ProviderModelConfig | undefined {
if (!modelProviders || !modelId) {
return undefined;
}
const models = modelProviders[authType];
if (!Array.isArray(models)) {
return undefined;
}
return models.find((m) => m.id === modelId);
}
/**
* Check if API key is available for the given auth type and model configuration.
* Prioritizes custom envKey from modelProviders over default environment variables.
*/
function hasApiKeyForAuth(
authType: string,
settings: Settings,
config?: Config,
): {
hasKey: boolean;
checkedEnvKey: string | undefined;
isExplicitEnvKey: boolean;
} {
const modelProviders = settings.modelProviders as
| ModelProvidersConfig
| undefined;
// Use config.modelsConfig.getModel() if available for accurate model ID resolution
// that accounts for CLI args, env vars, and settings. Fall back to settings.model.name.
const modelId = config?.modelsConfig.getModel() ?? settings.model?.name;
// Try to find model-specific envKey from modelProviders
const modelConfig = findModelConfig(modelProviders, authType, modelId);
if (modelConfig?.envKey) {
// Explicit envKey configured - only check this env var, no apiKey fallback
const hasKey = !!process.env[modelConfig.envKey];
return {
hasKey,
checkedEnvKey: modelConfig.envKey,
isExplicitEnvKey: true,
};
}
// Using default environment variable - apiKey fallback is allowed
const defaultEnvKey = DEFAULT_ENV_KEYS[authType];
if (defaultEnvKey) {
const hasKey = !!process.env[defaultEnvKey];
if (hasKey) {
return { hasKey, checkedEnvKey: defaultEnvKey, isExplicitEnvKey: false };
}
}
// Also check settings.security.auth.apiKey as fallback (only for default env key)
if (settings.security?.auth?.apiKey) {
return {
hasKey: true,
checkedEnvKey: defaultEnvKey || undefined,
isExplicitEnvKey: false,
};
}
return {
hasKey: false,
checkedEnvKey: defaultEnvKey,
isExplicitEnvKey: false,
};
}
/**
* Generate API key error message based on auth check result.
* Returns null if API key is present, otherwise returns the appropriate error message.
*/
function getApiKeyError(
authMethod: string,
settings: Settings,
config?: Config,
): string | null {
const { hasKey, checkedEnvKey, isExplicitEnvKey } = hasApiKeyForAuth(
authMethod,
settings,
config,
);
if (hasKey) {
return null;
}
const envKeyHint = checkedEnvKey || DEFAULT_ENV_KEYS[authMethod];
if (isExplicitEnvKey) {
return t(
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.',
{ envKeyHint },
);
}
return t(
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.',
{ envKeyHint },
);
}
/**
* Validate that the required credentials and configuration exist for the given auth method.
*/
export function validateAuthMethod(
authMethod: string,
config?: Config,
): string | null {
const settings = loadSettings();
loadEnvironment(settings.merged);
if (authMethod === AuthType.USE_OPENAI) {
const hasApiKey =
process.env['OPENAI_API_KEY'] || settings.merged.security?.auth?.apiKey;
if (!hasApiKey) {
return 'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
const { hasKey, checkedEnvKey, isExplicitEnvKey } = hasApiKeyForAuth(
authMethod,
settings.merged,
config,
);
if (!hasKey) {
const envKeyHint = checkedEnvKey
? `'${checkedEnvKey}'`
: "'OPENAI_API_KEY'";
if (isExplicitEnvKey) {
// Explicit envKey configured - only suggest setting the env var
return t(
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.',
{ envKeyHint },
);
}
// Default env key - can use either apiKey or env var
return t(
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.',
{ envKeyHint },
);
}
return null;
}
@@ -26,5 +174,50 @@ export function validateAuthMethod(authMethod: string): string | null {
return null;
}
return 'Invalid auth method selected.';
if (authMethod === AuthType.USE_ANTHROPIC) {
const apiKeyError = getApiKeyError(authMethod, settings.merged, config);
if (apiKeyError) {
return apiKeyError;
}
// Check baseUrl - can come from modelProviders or environment
const modelProviders = settings.merged.modelProviders as
| ModelProvidersConfig
| undefined;
// Use config.modelsConfig.getModel() if available for accurate model ID
const modelId =
config?.modelsConfig.getModel() ?? settings.merged.model?.name;
const modelConfig = findModelConfig(modelProviders, authMethod, modelId);
if (modelConfig && !modelConfig.baseUrl) {
return t(
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.',
);
}
if (!modelConfig && !process.env['ANTHROPIC_BASE_URL']) {
return t('ANTHROPIC_BASE_URL environment variable not found.');
}
return null;
}
if (authMethod === AuthType.USE_GEMINI) {
const apiKeyError = getApiKeyError(authMethod, settings.merged, config);
if (apiKeyError) {
return apiKeyError;
}
return null;
}
if (authMethod === AuthType.USE_VERTEX_AI) {
const apiKeyError = getApiKeyError(authMethod, settings.merged, config);
if (apiKeyError) {
return apiKeyError;
}
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
return null;
}
return t('Invalid auth method selected.');
}

View File

@@ -77,10 +77,8 @@ vi.mock('read-package-up', () => ({
),
}));
vi.mock('@qwen-code/qwen-code-core', async () => {
const actualServer = await vi.importActual<typeof ServerConfig>(
'@qwen-code/qwen-code-core',
);
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const actualServer = await importOriginal<typeof ServerConfig>();
return {
...actualServer,
IdeClient: {
@@ -1597,6 +1595,58 @@ describe('Approval mode tool exclusion logic', () => {
expect(excludedTools).toContain(WriteFileTool.Name);
});
it('should not exclude a tool explicitly allowed in tools.allowed', async () => {
process.argv = ['node', 'script.js', '-p', 'test'];
const argv = await parseArguments({} as Settings);
const settings: Settings = {
tools: {
allowed: [ShellTool.Name],
},
};
const extensions: Extension[] = [];
const config = await loadCliConfig(
settings,
extensions,
new ExtensionEnablementManager(
ExtensionStorage.getUserExtensionsDir(),
argv.extensions,
),
argv,
);
const excludedTools = config.getExcludeTools();
expect(excludedTools).not.toContain(ShellTool.Name);
expect(excludedTools).toContain(EditTool.Name);
expect(excludedTools).toContain(WriteFileTool.Name);
});
it('should not exclude a tool explicitly allowed in tools.core', async () => {
process.argv = ['node', 'script.js', '-p', 'test'];
const argv = await parseArguments({} as Settings);
const settings: Settings = {
tools: {
core: [ShellTool.Name],
},
};
const extensions: Extension[] = [];
const config = await loadCliConfig(
settings,
extensions,
new ExtensionEnablementManager(
ExtensionStorage.getUserExtensionsDir(),
argv.extensions,
),
argv,
);
const excludedTools = config.getExcludeTools();
expect(excludedTools).not.toContain(ShellTool.Name);
expect(excludedTools).toContain(EditTool.Name);
expect(excludedTools).toContain(WriteFileTool.Name);
});
it('should exclude only shell tools in non-interactive mode with auto-edit approval mode', async () => {
process.argv = [
'node',
@@ -2114,7 +2164,14 @@ describe('loadCliConfig model selection', () => {
});
it('always prefers model from argvs', async () => {
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{
@@ -2134,7 +2191,14 @@ describe('loadCliConfig model selection', () => {
});
it('selects the model from argvs if provided', async () => {
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{

View File

@@ -10,25 +10,31 @@ import {
Config,
DEFAULT_QWEN_EMBEDDING_MODEL,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
EditTool,
FileDiscoveryService,
getCurrentGeminiMdFilename,
loadServerHierarchicalMemory,
setGeminiMdFilename as setServerGeminiMdFilename,
ShellTool,
WriteFileTool,
resolveTelemetrySettings,
FatalConfigError,
Storage,
InputFormat,
OutputFormat,
isToolEnabled,
SessionService,
type ResumedSessionData,
type FileFilteringOptions,
type MCPServerConfig,
type ToolName,
EditTool,
ShellTool,
WriteFileTool,
} from '@qwen-code/qwen-code-core';
import { extensionsCommand } from '../commands/extensions.js';
import type { Settings } from './settings.js';
import {
resolveCliGenerationConfig,
getAuthTypeFromEnv,
} from '../utils/modelConfigUtils.js';
import yargs, { type Argv } from 'yargs';
import { hideBin } from 'yargs/helpers';
import * as fs from 'node:fs';
@@ -111,7 +117,9 @@ export interface CliArgs {
telemetryOutfile: string | undefined;
allowedMcpServerNames: string[] | undefined;
allowedTools: string[] | undefined;
acp: boolean | undefined;
experimentalAcp: boolean | undefined;
experimentalSkills: boolean | undefined;
extensions: string[] | undefined;
listExtensions: boolean | undefined;
openaiLogging: boolean | undefined;
@@ -162,7 +170,17 @@ function normalizeOutputFormat(
}
export async function parseArguments(settings: Settings): Promise<CliArgs> {
const rawArgv = hideBin(process.argv);
let rawArgv = hideBin(process.argv);
// hack: if the first argument is the CLI entry point, remove it
if (
rawArgv.length > 0 &&
(rawArgv[0].endsWith('/dist/qwen-cli/cli.js') ||
rawArgv[0].endsWith('/dist/cli.js'))
) {
rawArgv = rawArgv.slice(1);
}
const yargsInstance = yargs(rawArgv)
.locale('en')
.scriptName('qwen')
@@ -303,10 +321,21 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
description: 'Enables checkpointing of file edits',
default: false,
})
.option('experimental-acp', {
.option('acp', {
type: 'boolean',
description: 'Starts the agent in ACP mode',
})
.option('experimental-acp', {
type: 'boolean',
description:
'Starts the agent in ACP mode (deprecated, use --acp instead)',
hidden: true,
})
.option('experimental-skills', {
type: 'boolean',
description: 'Enable experimental Skills feature',
default: false,
})
.option('channel', {
type: 'string',
choices: ['VSCode', 'ACP', 'SDK', 'CI'],
@@ -460,7 +489,13 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
})
.option('auth-type', {
type: 'string',
choices: [AuthType.USE_OPENAI, AuthType.QWEN_OAUTH],
choices: [
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.QWEN_OAUTH,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
],
description: 'Authentication type',
})
.deprecateOption(
@@ -577,8 +612,19 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
// The import format is now only controlled by settings.memoryImportFormat
// We no longer accept it as a CLI argument
// Apply ACP fallback: if experimental-acp is present but no explicit --channel, treat as ACP
if (result['experimentalAcp'] && !result['channel']) {
// Handle deprecated --experimental-acp flag
if (result['experimentalAcp']) {
console.warn(
'\x1b[33m⚠ Warning: --experimental-acp is deprecated and will be removed in a future release. Please use --acp instead.\x1b[0m',
);
// Map experimental-acp to acp if acp is not explicitly set
if (!result['acp']) {
(result as Record<string, unknown>)['acp'] = true;
}
}
// Apply ACP fallback: if acp or experimental-acp is present but no explicit --channel, treat as ACP
if ((result['acp'] || result['experimentalAcp']) && !result['channel']) {
(result as Record<string, unknown>)['channel'] = 'ACP';
}
@@ -806,6 +852,28 @@ export async function loadCliConfig(
// However, if stream-json input is used, control can be requested via JSON messages,
// so tools should not be excluded in that case.
const extraExcludes: string[] = [];
const resolvedCoreTools = argv.coreTools || settings.tools?.core || [];
const resolvedAllowedTools =
argv.allowedTools || settings.tools?.allowed || [];
const isExplicitlyEnabled = (toolName: ToolName): boolean => {
if (resolvedCoreTools.length > 0) {
if (isToolEnabled(toolName, resolvedCoreTools, [])) {
return true;
}
}
if (resolvedAllowedTools.length > 0) {
if (isToolEnabled(toolName, resolvedAllowedTools, [])) {
return true;
}
}
return false;
};
const excludeUnlessExplicit = (toolName: ToolName): void => {
if (!isExplicitlyEnabled(toolName)) {
extraExcludes.push(toolName);
}
};
if (
!interactive &&
!argv.experimentalAcp &&
@@ -814,12 +882,15 @@ export async function loadCliConfig(
switch (approvalMode) {
case ApprovalMode.PLAN:
case ApprovalMode.DEFAULT:
// In default non-interactive mode, all tools that require approval are excluded.
extraExcludes.push(ShellTool.Name, EditTool.Name, WriteFileTool.Name);
// In default non-interactive mode, all tools that require approval are excluded,
// unless explicitly enabled via coreTools/allowedTools.
excludeUnlessExplicit(ShellTool.Name as ToolName);
excludeUnlessExplicit(EditTool.Name as ToolName);
excludeUnlessExplicit(WriteFileTool.Name as ToolName);
break;
case ApprovalMode.AUTO_EDIT:
// In auto-edit non-interactive mode, only tools that still require a prompt are excluded.
extraExcludes.push(ShellTool.Name);
excludeUnlessExplicit(ShellTool.Name as ToolName);
break;
case ApprovalMode.YOLO:
// No extra excludes for YOLO mode.
@@ -865,11 +936,27 @@ export async function loadCliConfig(
);
}
const resolvedModel =
argv.model ||
process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name;
const selectedAuthType =
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType ||
/* getAuthTypeFromEnv means no authType was explicitly provided, we infer the authType from env vars */
getAuthTypeFromEnv();
// Unified resolution of generation config with source attribution
const resolvedCliConfig = resolveCliGenerationConfig({
argv: {
model: argv.model,
openaiApiKey: argv.openaiApiKey,
openaiBaseUrl: argv.openaiBaseUrl,
openaiLogging: argv.openaiLogging,
openaiLoggingDir: argv.openaiLoggingDir,
},
settings,
selectedAuthType,
env: process.env as Record<string, string | undefined>,
});
const { model: resolvedModel } = resolvedCliConfig;
const sandboxConfig = await loadSandboxConfig(settings, argv);
const screenReader =
@@ -903,6 +990,8 @@ export async function loadCliConfig(
}
}
const modelProvidersConfig = settings.modelProviders;
return new Config({
sessionId,
sessionData,
@@ -950,41 +1039,21 @@ export async function loadCliConfig(
sessionTokenLimit: settings.model?.sessionTokenLimit ?? -1,
maxSessionTurns:
argv.maxSessionTurns ?? settings.model?.maxSessionTurns ?? -1,
experimentalZedIntegration: argv.experimentalAcp || false,
experimentalZedIntegration: argv.acp || argv.experimentalAcp || false,
experimentalSkills: argv.experimentalSkills || false,
listExtensions: argv.listExtensions || false,
extensions: allExtensions,
blockedMcpServers,
noBrowser: !!process.env['NO_BROWSER'],
authType:
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType,
authType: selectedAuthType,
inputFormat,
outputFormat,
includePartialMessages,
generationConfig: {
...(settings.model?.generationConfig || {}),
model: resolvedModel,
apiKey:
argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey,
baseUrl:
argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging
: argv.openaiLogging) ?? false,
openAILoggingDir:
argv.openaiLoggingDir || settings.model?.openAILoggingDir,
},
modelProvidersConfig,
generationConfigSources: resolvedCliConfig.sources,
generationConfig: resolvedCliConfig.generationConfig,
cliVersion: await getCliVersion(),
webSearch: buildWebSearchConfig(
argv,
settings,
settings.security?.auth?.selectedType,
),
webSearch: buildWebSearchConfig(argv, settings, selectedAuthType),
summarizeToolOutput: settings.model?.summarizeToolOutput,
ideMode,
chatCompression: settings.model?.chatCompression,

View File

@@ -56,6 +56,17 @@ vi.mock('simple-git', () => ({
}),
}));
vi.mock('./extensions/github.js', async (importOriginal) => {
const actual =
await importOriginal<typeof import('./extensions/github.js')>();
return {
...actual,
downloadFromGitHubRelease: vi
.fn()
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
};
});
vi.mock('os', async (importOriginal) => {
const mockedOs = await importOriginal<typeof os>();
return {

View File

@@ -41,6 +41,17 @@ vi.mock('simple-git', () => ({
}),
}));
vi.mock('../extensions/github.js', async (importOriginal) => {
const actual =
await importOriginal<typeof import('../extensions/github.js')>();
return {
...actual,
downloadFromGitHubRelease: vi
.fn()
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
};
});
vi.mock('os', async (importOriginal) => {
const mockedOs = await importOriginal<typeof os>();
return {

View File

@@ -0,0 +1,87 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect, it } from 'vitest';
import { SettingScope } from './settings.js';
import { getPersistScopeForModelSelection } from './modelProvidersScope.js';
function makeSettings({
isTrusted,
userModelProviders,
workspaceModelProviders,
}: {
isTrusted: boolean;
userModelProviders?: unknown;
workspaceModelProviders?: unknown;
}) {
const userSettings: Record<string, unknown> = {};
const workspaceSettings: Record<string, unknown> = {};
// When undefined, treat as "not present in this scope" (the key is omitted),
// matching how LoadedSettings is shaped when a settings file doesn't define it.
if (userModelProviders !== undefined) {
userSettings['modelProviders'] = userModelProviders;
}
if (workspaceModelProviders !== undefined) {
workspaceSettings['modelProviders'] = workspaceModelProviders;
}
return {
isTrusted,
user: { settings: userSettings },
workspace: { settings: workspaceSettings },
} as unknown as import('./settings.js').LoadedSettings;
}
describe('getPersistScopeForModelSelection', () => {
it('prefers workspace when trusted and workspace defines modelProviders', () => {
const settings = makeSettings({
isTrusted: true,
workspaceModelProviders: {},
userModelProviders: { anything: true },
});
expect(getPersistScopeForModelSelection(settings)).toBe(
SettingScope.Workspace,
);
});
it('falls back to user when workspace does not define modelProviders', () => {
const settings = makeSettings({
isTrusted: true,
workspaceModelProviders: undefined,
userModelProviders: {},
});
expect(getPersistScopeForModelSelection(settings)).toBe(SettingScope.User);
});
it('ignores workspace modelProviders when workspace is untrusted', () => {
const settings = makeSettings({
isTrusted: false,
workspaceModelProviders: {},
userModelProviders: undefined,
});
expect(getPersistScopeForModelSelection(settings)).toBe(SettingScope.User);
});
it('falls back to legacy trust heuristic when neither scope defines modelProviders', () => {
const trusted = makeSettings({
isTrusted: true,
userModelProviders: undefined,
workspaceModelProviders: undefined,
});
expect(getPersistScopeForModelSelection(trusted)).toBe(SettingScope.User);
const untrusted = makeSettings({
isTrusted: false,
userModelProviders: undefined,
workspaceModelProviders: undefined,
});
expect(getPersistScopeForModelSelection(untrusted)).toBe(SettingScope.User);
});
});

View File

@@ -0,0 +1,48 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { SettingScope, type LoadedSettings } from './settings.js';
function hasOwnModelProviders(settingsObj: unknown): boolean {
if (!settingsObj || typeof settingsObj !== 'object') {
return false;
}
const obj = settingsObj as Record<string, unknown>;
// Treat an explicitly configured empty object (modelProviders: {}) as "owned"
// by this scope, which is important when mergeStrategy is REPLACE.
return Object.prototype.hasOwnProperty.call(obj, 'modelProviders');
}
/**
* Returns which writable scope (Workspace/User) owns the effective modelProviders
* configuration.
*
* Note: Workspace scope is only considered when the workspace is trusted.
*/
export function getModelProvidersOwnerScope(
settings: LoadedSettings,
): SettingScope | undefined {
if (settings.isTrusted && hasOwnModelProviders(settings.workspace.settings)) {
return SettingScope.Workspace;
}
if (hasOwnModelProviders(settings.user.settings)) {
return SettingScope.User;
}
return undefined;
}
/**
* Choose the settings scope to persist a model selection.
* Prefer persisting back to the scope that contains the effective modelProviders
* config, otherwise fall back to the legacy trust-based heuristic.
*/
export function getPersistScopeForModelSelection(
settings: LoadedSettings,
): SettingScope {
return getModelProvidersOwnerScope(settings) ?? SettingScope.User;
}

View File

@@ -55,6 +55,7 @@ import { disableExtension } from './extension.js';
// These imports will get the versions from the vi.mock('./settings.js', ...) factory.
import {
getSettingsWarnings,
loadSettings,
USER_SETTINGS_PATH, // This IS the mocked path.
getSystemSettingsPath,
@@ -418,6 +419,86 @@ describe('Settings Loading and Merging', () => {
});
});
it('should warn about ignored legacy keys in a v2 settings file', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
usageStatisticsEnabled: false,
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining(
"Legacy setting 'usageStatisticsEnabled' will be ignored",
),
]),
);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining("'privacy.usageStatisticsEnabled'"),
]),
);
});
it('should warn about unknown top-level keys in a v2 settings file', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
someUnknownKey: 'value',
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining(
"Unknown setting 'someUnknownKey' will be ignored",
),
]),
);
});
it('should not warn for valid v2 container keys', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
model: { name: 'qwen-coder' },
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual([]);
});
it('should rewrite allowedTools to tools.allowed during migration', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,

View File

@@ -344,6 +344,97 @@ const KNOWN_V2_CONTAINERS = new Set(
Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]),
);
function getSettingsFileKeyWarnings(
settings: Record<string, unknown>,
settingsFilePath: string,
): string[] {
const version = settings[SETTINGS_VERSION_KEY];
if (typeof version !== 'number' || version < SETTINGS_VERSION) {
return [];
}
const warnings: string[] = [];
const ignoredLegacyKeys = new Set<string>();
// Ignored legacy keys (V1 top-level keys that moved to a nested V2 path).
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
if (oldKey === newPath) {
continue;
}
if (!(oldKey in settings)) {
continue;
}
const oldValue = settings[oldKey];
// If this key is a V2 container (like 'model') and it's already an object,
// it's likely already in V2 format. Don't warn.
if (
KNOWN_V2_CONTAINERS.has(oldKey) &&
typeof oldValue === 'object' &&
oldValue !== null &&
!Array.isArray(oldValue)
) {
continue;
}
ignoredLegacyKeys.add(oldKey);
warnings.push(
`⚠️ Legacy setting '${oldKey}' will be ignored in ${settingsFilePath}. Please use '${newPath}' instead.`,
);
}
// Unknown top-level keys.
const schemaKeys = new Set(Object.keys(getSettingsSchema()));
for (const key of Object.keys(settings)) {
if (key === SETTINGS_VERSION_KEY) {
continue;
}
if (ignoredLegacyKeys.has(key)) {
continue;
}
if (schemaKeys.has(key)) {
continue;
}
warnings.push(
`⚠️ Unknown setting '${key}' will be ignored in ${settingsFilePath}.`,
);
}
return warnings;
}
/**
* Collects warnings for ignored legacy and unknown settings keys.
*
* For `$version: 2` settings files, we do not apply implicit migrations.
* Instead, we surface actionable, de-duplicated warnings in the terminal UI.
*/
export function getSettingsWarnings(loadedSettings: LoadedSettings): string[] {
const warningSet = new Set<string>();
for (const scope of [SettingScope.User, SettingScope.Workspace]) {
const settingsFile = loadedSettings.forScope(scope);
if (settingsFile.rawJson === undefined) {
continue; // File not present / not loaded.
}
const settingsObject = settingsFile.originalSettings as unknown as Record<
string,
unknown
>;
for (const warning of getSettingsFileKeyWarnings(
settingsObject,
settingsFile.path,
)) {
warningSet.add(warning);
}
}
return [...warningSet];
}
export function migrateSettingsToV1(
v2Settings: Record<string, unknown>,
): Record<string, unknown> {

View File

@@ -10,6 +10,7 @@ import type {
TelemetrySettings,
AuthType,
ChatCompressionSettings,
ModelProvidersConfig,
} from '@qwen-code/qwen-code-core';
import {
ApprovalMode,
@@ -102,6 +103,19 @@ const SETTINGS_SCHEMA = {
mergeStrategy: MergeStrategy.SHALLOW_MERGE,
},
// Model providers configuration grouped by authType
modelProviders: {
type: 'object',
label: 'Model Providers',
category: 'Model',
requiresRestart: false,
default: {} as ModelProvidersConfig,
description:
'Model providers configuration grouped by authType. Each authType contains an array of model configurations.',
showInDialog: false,
mergeStrategy: MergeStrategy.REPLACE,
},
general: {
type: 'object',
label: 'General',
@@ -202,6 +216,7 @@ const SETTINGS_SCHEMA = {
{ value: 'en', label: 'English' },
{ value: 'zh', label: '中文 (Chinese)' },
{ value: 'ru', label: 'Русский (Russian)' },
{ value: 'de', label: 'Deutsch (German)' },
],
},
terminalBell: {

View File

@@ -45,7 +45,9 @@ export async function initializeApp(
// Auto-detect and set LLM output language on first use
initializeLlmOutputLanguage();
const authType = settings.merged.security?.auth?.selectedType;
// Use authType from modelsConfig which respects CLI --auth-type argument
// over settings.security.auth.selectedType
const authType = config.modelsConfig.getCurrentAuthType();
const authError = await performInitialAuth(config, authType);
// Fallback to user select when initial authentication fails
@@ -59,7 +61,7 @@ export async function initializeApp(
const themeError = validateTheme(settings);
const shouldOpenAuthDialog =
settings.merged.security?.auth?.selectedType === undefined || !!authError;
!config.modelsConfig.wasAuthTypeExplicitlyProvided() || !!authError;
if (config.getIdeMode()) {
const ideClient = await IdeClient.getInstance();

View File

@@ -87,6 +87,15 @@ vi.mock('./config/sandboxConfig.js', () => ({
loadSandboxConfig: vi.fn(),
}));
vi.mock('./core/initializer.js', () => ({
initializeApp: vi.fn().mockResolvedValue({
authError: null,
themeError: null,
shouldOpenAuthDialog: false,
geminiMdFileCount: 0,
}),
}));
describe('gemini.tsx main function', () => {
let originalEnvGeminiSandbox: string | undefined;
let originalEnvSandbox: string | undefined;
@@ -362,7 +371,6 @@ describe('gemini.tsx main function', () => {
expect(inputArg).toBe('hello stream');
expect(validateAuthSpy).toHaveBeenCalledWith(
undefined,
undefined,
configStub,
expect.any(Object),
@@ -460,7 +468,9 @@ describe('gemini.tsx main function kitty protocol', () => {
telemetryOutfile: undefined,
allowedMcpServerNames: undefined,
allowedTools: undefined,
acp: undefined,
experimentalAcp: undefined,
experimentalSkills: undefined,
extensions: undefined,
listExtensions: undefined,
openaiLogging: undefined,
@@ -638,4 +648,37 @@ describe('startInteractiveUI', () => {
await new Promise((resolve) => setTimeout(resolve, 0));
expect(checkForUpdates).toHaveBeenCalledTimes(1);
});
it('should not check for updates when update nag is disabled', async () => {
const { checkForUpdates } = await import('./ui/utils/updateCheck.js');
const mockInitializationResult = {
authError: null,
themeError: null,
shouldOpenAuthDialog: false,
geminiMdFileCount: 0,
};
const settingsWithUpdateNagDisabled = {
merged: {
general: {
disableUpdateNag: true,
},
ui: {
hideWindowTitle: false,
},
},
} as LoadedSettings;
await startInteractiveUI(
mockConfig,
settingsWithUpdateNagDisabled,
mockStartupWarnings,
mockWorkspaceRoot,
mockInitializationResult,
);
await new Promise((resolve) => setTimeout(resolve, 0));
expect(checkForUpdates).not.toHaveBeenCalled();
});
});

View File

@@ -5,12 +5,7 @@
*/
import type { Config } from '@qwen-code/qwen-code-core';
import {
AuthType,
getOauthClient,
InputFormat,
logUserPrompt,
} from '@qwen-code/qwen-code-core';
import { InputFormat, logUserPrompt } from '@qwen-code/qwen-code-core';
import { render } from 'ink';
import dns from 'node:dns';
import os from 'node:os';
@@ -22,7 +17,11 @@ import * as cliConfig from './config/config.js';
import { loadCliConfig, parseArguments } from './config/config.js';
import { ExtensionStorage, loadExtensions } from './config/extension.js';
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
import { loadSettings, migrateDeprecatedSettings } from './config/settings.js';
import {
getSettingsWarnings,
loadSettings,
migrateDeprecatedSettings,
} from './config/settings.js';
import {
initializeApp,
type InitializationResult,
@@ -188,16 +187,18 @@ export async function startInteractiveUI(
},
);
checkForUpdates()
.then((info) => {
handleAutoUpdate(info, settings, config.getProjectRoot());
})
.catch((err) => {
// Silently ignore update check errors.
if (config.getDebugMode()) {
console.error('Update check failed:', err);
}
});
if (!settings.merged.general?.disableUpdateNag) {
checkForUpdates()
.then((info) => {
handleAutoUpdate(info, settings, config.getProjectRoot());
})
.catch((err) => {
// Silently ignore update check errors.
if (config.getDebugMode()) {
console.error('Update check failed:', err);
}
});
}
registerCleanup(() => instance.unmount());
}
@@ -255,22 +256,20 @@ export async function main() {
argv,
);
if (
settings.merged.security?.auth?.selectedType &&
!settings.merged.security?.auth?.useExternal
) {
if (!settings.merged.security?.auth?.useExternal) {
// Validate authentication here because the sandbox will interfere with the Oauth2 web redirect.
try {
const err = validateAuthMethod(
settings.merged.security.auth.selectedType,
);
if (err) {
throw new Error(err);
}
const authType = partialConfig.modelsConfig.getCurrentAuthType();
// Fresh users may not have selected/persisted an authType yet.
// In that case, defer auth prompting/selection to the main interactive flow.
if (authType) {
const err = validateAuthMethod(authType, partialConfig);
if (err) {
throw new Error(err);
}
await partialConfig.refreshAuth(
settings.merged.security.auth.selectedType,
);
await partialConfig.refreshAuth(authType);
}
} catch (err) {
console.error('Error authenticating:', err);
process.exit(1);
@@ -399,27 +398,21 @@ export async function main() {
initializationResult = await initializeApp(config, settings);
}
if (
settings.merged.security?.auth?.selectedType ===
AuthType.LOGIN_WITH_GOOGLE &&
config.isBrowserLaunchSuppressed()
) {
// Do oauth before app renders to make copying the link possible.
await getOauthClient(settings.merged.security.auth.selectedType, config);
}
if (config.getExperimentalZedIntegration()) {
return runAcpAgent(config, settings, extensions, argv);
}
let input = config.getQuestion();
const startupWarnings = [
...(await getStartupWarnings()),
...(await getUserStartupWarnings({
workspaceRoot: process.cwd(),
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
})),
...new Set([
...(await getStartupWarnings()),
...(await getUserStartupWarnings({
workspaceRoot: process.cwd(),
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
})),
...getSettingsWarnings(settings),
]),
];
// Render UI, passing necessary config values. Check that there is no command line question.
@@ -452,8 +445,6 @@ export async function main() {
}
const nonInteractiveConfig = await validateNonInteractiveAuth(
(argv.authType as AuthType) ||
settings.merged.security?.auth?.selectedType,
settings.merged.security?.auth?.useExternal,
config,
settings,

File diff suppressed because it is too large Load Diff

View File

@@ -89,6 +89,9 @@ export default {
'No tools available': 'No tools available',
'View or change the approval mode for tool usage':
'View or change the approval mode for tool usage',
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}':
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}',
'Approval mode set to "{{mode}}"': 'Approval mode set to "{{mode}}"',
'View or change the language setting': 'View or change the language setting',
'change the theme': 'change the theme',
'Select Theme': 'Select Theme',
@@ -258,6 +261,8 @@ export default {
', Tab to change focus': ', Tab to change focus',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.',
'The command "/{{command}}" is not supported in non-interactive mode.':
'The command "/{{command}}" is not supported in non-interactive mode.',
// ============================================================================
// Settings Labels
// ============================================================================
@@ -590,6 +595,12 @@ export default {
'No conversation found to summarize.': 'No conversation found to summarize.',
'Failed to generate project context summary: {{error}}':
'Failed to generate project context summary: {{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'Saved project summary to {{filePathForDisplay}}.',
'Saving project summary...': 'Saving project summary...',
'Generating project summary...': 'Generating project summary...',
'Failed to generate summary - no text content received from LLM response':
'Failed to generate summary - no text content received from LLM response',
// ============================================================================
// Commands - Model
@@ -759,6 +770,21 @@ export default {
'Authentication timed out. Please try again.',
'Waiting for auth... (Press ESC or CTRL+C to cancel)':
'Waiting for auth... (Press ESC or CTRL+C to cancel)',
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.':
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.',
'{{envKeyHint}} environment variable not found.':
'{{envKeyHint}} environment variable not found.',
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.':
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.',
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.':
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.',
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.':
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.',
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.':
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.',
'ANTHROPIC_BASE_URL environment variable not found.':
'ANTHROPIC_BASE_URL environment variable not found.',
'Invalid auth method selected.': 'Invalid auth method selected.',
'Failed to authenticate. Message: {{message}}':
'Failed to authenticate. Message: {{message}}',
'Authenticated successfully with {{authType}} credentials.':
@@ -780,6 +806,15 @@ export default {
// ============================================================================
'Select Model': 'Select Model',
'(Press Esc to close)': '(Press Esc to close)',
'Current (effective) configuration': 'Current (effective) configuration',
AuthType: 'AuthType',
'API Key': 'API Key',
unset: 'unset',
'(default)': '(default)',
'(set)': '(set)',
'(not set)': '(not set)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Failed to switch model to '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
@@ -1029,7 +1064,6 @@ export default {
'Applying percussive maintenance...',
'Searching for the correct USB orientation...',
'Ensuring the magic smoke stays inside the wires...',
'Rewriting in Rust for no particular reason...',
'Trying to exit Vim...',
'Spinning up the hamster wheel...',
"That's not a bug, it's an undocumented feature...",

View File

@@ -89,6 +89,10 @@ export default {
'No tools available': 'Нет доступных инструментов',
'View or change the approval mode for tool usage':
'Просмотр или изменение режима подтверждения для использования инструментов',
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}':
'Недопустимый режим подтверждения "{{arg}}". Допустимые режимы: {{modes}}',
'Approval mode set to "{{mode}}"':
'Режим подтверждения установлен на "{{mode}}"',
'View or change the language setting':
'Просмотр или изменение настроек языка',
'change the theme': 'Изменение темы',
@@ -260,7 +264,8 @@ export default {
', Tab to change focus': ', Tab для смены фокуса',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'Для применения изменений необходимо перезапустить Qwen Code. Нажмите r для выхода и применения изменений.',
'The command "/{{command}}" is not supported in non-interactive mode.':
'Команда "/{{command}}" не поддерживается в неинтерактивном режиме.',
// ============================================================================
// Метки настроек
// ============================================================================
@@ -605,6 +610,12 @@ export default {
'Не найдено диалогов для создания сводки.',
'Failed to generate project context summary: {{error}}':
'Не удалось сгенерировать сводку контекста проекта: {{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'Сводка проекта сохранена в {{filePathForDisplay}}',
'Saving project summary...': 'Сохранение сводки проекта...',
'Generating project summary...': 'Генерация сводки проекта...',
'Failed to generate summary - no text content received from LLM response':
'Не удалось сгенерировать сводку - не получен текстовый контент из ответа LLM',
// ============================================================================
// Команды - Модель
@@ -775,6 +786,21 @@ export default {
'Время ожидания авторизации истекло. Пожалуйста, попробуйте снова.',
'Waiting for auth... (Press ESC or CTRL+C to cancel)':
'Ожидание авторизации... (Нажмите ESC или CTRL+C для отмены)',
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.':
'Отсутствует API-ключ для аутентификации, совместимой с OpenAI. Укажите settings.security.auth.apiKey или переменную окружения {{envKeyHint}}.',
'{{envKeyHint}} environment variable not found.':
'Переменная окружения {{envKeyHint}} не найдена.',
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.':
'Переменная окружения {{envKeyHint}} не найдена. Укажите её в файле .env или среди системных переменных.',
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.':
'Переменная окружения {{envKeyHint}} не найдена (или установите settings.security.auth.apiKey). Укажите её в файле .env или среди системных переменных.',
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.':
'Отсутствует API-ключ для аутентификации, совместимой с OpenAI. Установите переменную окружения {{envKeyHint}}.',
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.':
'У провайдера Anthropic отсутствует обязательный baseUrl в modelProviders[].baseUrl.',
'ANTHROPIC_BASE_URL environment variable not found.':
'Переменная окружения ANTHROPIC_BASE_URL не найдена.',
'Invalid auth method selected.': 'Выбран недопустимый метод авторизации.',
'Failed to authenticate. Message: {{message}}':
'Не удалось авторизоваться. Сообщение: {{message}}',
'Authenticated successfully with {{authType}} credentials.':
@@ -796,6 +822,15 @@ export default {
// ============================================================================
'Select Model': 'Выбрать модель',
'(Press Esc to close)': '(Нажмите Esc для закрытия)',
'Current (effective) configuration': 'Текущая (фактическая) конфигурация',
AuthType: 'Тип авторизации',
'API Key': 'API-ключ',
unset: 'не задано',
'(default)': '(по умолчанию)',
'(set)': '(установлено)',
'(not set)': '(не задано)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Не удалось переключиться на модель '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'Последняя модель Qwen Coder от Alibaba Cloud ModelStudio (версия: qwen3-coder-plus-2025-09-23)',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
@@ -1049,7 +1084,6 @@ export default {
'Провожу настройку методом тыка...',
'Ищем, какой стороной вставлять флешку...',
'Следим, чтобы волшебный дым не вышел из проводов...',
'Переписываем всё на Rust без особой причины...',
'Пытаемся выйти из Vim...',
'Раскручиваем колесо для хомяка...',
'Это не баг, а фича...',

View File

@@ -88,6 +88,9 @@ export default {
'No tools available': '没有可用工具',
'View or change the approval mode for tool usage':
'查看或更改工具使用的审批模式',
'Invalid approval mode "{{arg}}". Valid modes: {{modes}}':
'无效的审批模式 "{{arg}}"。有效模式:{{modes}}',
'Approval mode set to "{{mode}}"': '审批模式已设置为 "{{mode}}"',
'View or change the language setting': '查看或更改语言设置',
'change the theme': '更改主题',
'Select Theme': '选择主题',
@@ -249,6 +252,8 @@ export default {
', Tab to change focus': 'Tab 切换焦点',
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
'要查看更改,必须重启 Qwen Code。按 r 退出并立即应用更改。',
'The command "/{{command}}" is not supported in non-interactive mode.':
'不支持在非交互模式下使用命令 "/{{command}}"。',
// ============================================================================
// Settings Labels
// ============================================================================
@@ -560,6 +565,12 @@ export default {
'No conversation found to summarize.': '未找到要总结的对话',
'Failed to generate project context summary: {{error}}':
'生成项目上下文摘要失败:{{error}}',
'Saved project summary to {{filePathForDisplay}}.':
'项目摘要已保存到 {{filePathForDisplay}}',
'Saving project summary...': '正在保存项目摘要...',
'Generating project summary...': '正在生成项目摘要...',
'Failed to generate summary - no text content received from LLM response':
'生成摘要失败 - 未从 LLM 响应中接收到文本内容',
// ============================================================================
// Commands - Model
@@ -717,6 +728,21 @@ export default {
'Authentication timed out. Please try again.': '认证超时。请重试。',
'Waiting for auth... (Press ESC or CTRL+C to cancel)':
'正在等待认证...(按 ESC 或 CTRL+C 取消)',
'Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the {{envKeyHint}} environment variable.':
'缺少 OpenAI 兼容认证的 API 密钥。请设置 settings.security.auth.apiKey 或设置 {{envKeyHint}} 环境变量。',
'{{envKeyHint}} environment variable not found.':
'未找到 {{envKeyHint}} 环境变量。',
'{{envKeyHint}} environment variable not found. Please set it in your .env file or environment variables.':
'未找到 {{envKeyHint}} 环境变量。请在 .env 文件或系统环境变量中进行设置。',
'{{envKeyHint}} environment variable not found (or set settings.security.auth.apiKey). Please set it in your .env file or environment variables.':
'未找到 {{envKeyHint}} 环境变量(或设置 settings.security.auth.apiKey。请在 .env 文件或系统环境变量中进行设置。',
'Missing API key for OpenAI-compatible auth. Set the {{envKeyHint}} environment variable.':
'缺少 OpenAI 兼容认证的 API 密钥。请设置 {{envKeyHint}} 环境变量。',
'Anthropic provider missing required baseUrl in modelProviders[].baseUrl.':
'Anthropic 提供商缺少必需的 baseUrl请在 modelProviders[].baseUrl 中配置。',
'ANTHROPIC_BASE_URL environment variable not found.':
'未找到 ANTHROPIC_BASE_URL 环境变量。',
'Invalid auth method selected.': '选择了无效的认证方式。',
'Failed to authenticate. Message: {{message}}': '认证失败。消息:{{message}}',
'Authenticated successfully with {{authType}} credentials.':
'使用 {{authType}} 凭据成功认证。',
@@ -736,6 +762,15 @@ export default {
// ============================================================================
'Select Model': '选择模型',
'(Press Esc to close)': '(按 Esc 关闭)',
'Current (effective) configuration': '当前(实际生效)配置',
AuthType: '认证方式',
'API Key': 'API 密钥',
unset: '未设置',
'(default)': '(默认)',
'(set)': '(已设置)',
'(not set)': '(未设置)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"无法切换到模型 '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'来自阿里云 ModelStudio 的最新 Qwen Coder 模型版本qwen3-coder-plus-2025-09-23',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':

View File

@@ -20,8 +20,7 @@ import type {
CLIControlSetModelRequest,
CLIMcpServerConfig,
} from '../../types.js';
import { CommandService } from '../../../services/CommandService.js';
import { BuiltinCommandLoader } from '../../../services/BuiltinCommandLoader.js';
import { getAvailableCommands } from '../../../nonInteractiveCliCommands.js';
import {
MCPServerConfig,
AuthProviderType,
@@ -407,7 +406,7 @@ export class SystemController extends BaseController {
}
/**
* Load slash command names using CommandService
* Load slash command names using getAvailableCommands
*
* @param signal - AbortSignal to respect for cancellation
* @returns Promise resolving to array of slash command names
@@ -418,21 +417,14 @@ export class SystemController extends BaseController {
}
try {
const service = await CommandService.create(
[new BuiltinCommandLoader(this.context.config)],
signal,
);
const commands = await getAvailableCommands(this.context.config, signal);
if (signal.aborted) {
return [];
}
const names = new Set<string>();
const commands = service.getCommands();
for (const command of commands) {
names.add(command.name);
}
return Array.from(names).sort();
// Extract command names and sort
return commands.map((cmd) => cmd.name).sort();
} catch (error) {
// Check if the error is due to abort
if (signal.aborted) {

View File

@@ -630,6 +630,67 @@ describe('BaseJsonOutputAdapter', () => {
expect(state.blocks).toHaveLength(0);
});
it('should preserve whitespace in thinking content', () => {
const state = adapter.exposeCreateMessageState();
adapter.startAssistantMessage();
adapter.exposeAppendThinking(
state,
'',
'The user just said "Hello"',
null,
);
expect(state.blocks).toHaveLength(1);
expect(state.blocks[0]).toMatchObject({
type: 'thinking',
thinking: 'The user just said "Hello"',
});
// Verify spaces are preserved
const block = state.blocks[0] as { thinking: string };
expect(block.thinking).toContain('user just');
expect(block.thinking).not.toContain('userjust');
});
it('should preserve whitespace when appending multiple thinking fragments', () => {
const state = adapter.exposeCreateMessageState();
adapter.startAssistantMessage();
// Simulate streaming thinking content in fragments
adapter.exposeAppendThinking(state, '', 'The user just', null);
adapter.exposeAppendThinking(state, '', ' said "Hello"', null);
adapter.exposeAppendThinking(
state,
'',
'. This is a simple greeting',
null,
);
expect(state.blocks).toHaveLength(1);
const block = state.blocks[0] as { thinking: string };
// Verify the complete text with all spaces preserved
expect(block.thinking).toBe(
'The user just said "Hello". This is a simple greeting',
);
// Verify specific space preservation
expect(block.thinking).toContain('user just ');
expect(block.thinking).toContain(' said');
expect(block.thinking).toContain('". This');
expect(block.thinking).not.toContain('userjust');
expect(block.thinking).not.toContain('justsaid');
});
it('should preserve leading and trailing whitespace in description', () => {
const state = adapter.exposeCreateMessageState();
adapter.startAssistantMessage();
adapter.exposeAppendThinking(state, '', ' content with spaces ', null);
expect(state.blocks).toHaveLength(1);
const block = state.blocks[0] as { thinking: string };
expect(block.thinking).toBe(' content with spaces ');
});
});
describe('appendToolUse', () => {

View File

@@ -610,8 +610,6 @@ export abstract class BaseJsonOutputAdapter {
const errorText = parseAndFormatApiError(
event.value.error,
this.config.getContentGeneratorConfig()?.authType,
undefined,
this.config.getModel(),
);
this.appendText(state, errorText, null);
break;
@@ -818,9 +816,18 @@ export abstract class BaseJsonOutputAdapter {
parentToolUseId?: string | null,
): void {
const actualParentToolUseId = parentToolUseId ?? null;
const fragment = [subject?.trim(), description?.trim()]
.filter((value) => value && value.length > 0)
.join(': ');
// Build fragment without trimming to preserve whitespace in streaming content
// Only filter out null/undefined/empty values
const parts: string[] = [];
if (subject && subject.length > 0) {
parts.push(subject);
}
if (description && description.length > 0) {
parts.push(description);
}
const fragment = parts.join(': ');
if (!fragment) {
return;
}

View File

@@ -323,6 +323,68 @@ describe('StreamJsonOutputAdapter', () => {
});
});
it('should preserve whitespace in thinking content (issue #1356)', () => {
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: 'The user just said "Hello"',
},
});
const message = adapter.finalizeAssistantMessage();
expect(message.message.content).toHaveLength(1);
const block = message.message.content[0] as {
type: string;
thinking: string;
};
expect(block.type).toBe('thinking');
expect(block.thinking).toBe('The user just said "Hello"');
// Verify spaces are preserved
expect(block.thinking).toContain('user just');
expect(block.thinking).not.toContain('userjust');
});
it('should preserve whitespace when streaming multiple thinking fragments (issue #1356)', () => {
// Simulate streaming thinking content in multiple events
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: 'The user just',
},
});
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: ' said "Hello"',
},
});
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: '. This is a simple greeting',
},
});
const message = adapter.finalizeAssistantMessage();
expect(message.message.content).toHaveLength(1);
const block = message.message.content[0] as {
type: string;
thinking: string;
};
expect(block.thinking).toBe(
'The user just said "Hello". This is a simple greeting',
);
// Verify specific spaces are preserved
expect(block.thinking).toContain('user just ');
expect(block.thinking).toContain(' said');
expect(block.thinking).not.toContain('userjust');
expect(block.thinking).not.toContain('justsaid');
});
it('should append tool use from ToolCallRequest events', () => {
adapter.processEvent({
type: GeminiEventType.ToolCallRequest,

View File

@@ -68,6 +68,7 @@ describe('runNonInteractive', () => {
let mockShutdownTelemetry: Mock;
let consoleErrorSpy: MockInstance;
let processStdoutSpy: MockInstance;
let processStderrSpy: MockInstance;
let mockGeminiClient: {
sendMessageStream: Mock;
getChatRecordingService: Mock;
@@ -86,6 +87,9 @@ describe('runNonInteractive', () => {
processStdoutSpy = vi
.spyOn(process.stdout, 'write')
.mockImplementation(() => true);
processStderrSpy = vi
.spyOn(process.stderr, 'write')
.mockImplementation(() => true);
vi.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit(${code}) called`);
});
@@ -139,6 +143,8 @@ describe('runNonInteractive', () => {
setModel: vi.fn(async (model: string) => {
currentModel = model;
}),
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
isInteractive: vi.fn().mockReturnValue(false),
} as unknown as Config;
mockSettings = {
@@ -292,7 +298,9 @@ describe('runNonInteractive', () => {
mockConfig,
expect.objectContaining({ name: 'testTool' }),
expect.any(AbortSignal),
undefined,
expect.objectContaining({
outputUpdateHandler: expect.any(Function),
}),
);
// Verify first call has isContinuation: false
expect(mockGeminiClient.sendMessageStream).toHaveBeenNthCalledWith(
@@ -765,6 +773,52 @@ describe('runNonInteractive', () => {
);
});
it('should handle API errors in text mode and exit with error code', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.TEXT);
setupMetricsMock();
// Simulate an API error event (like 401 unauthorized)
const apiErrorEvent: ServerGeminiStreamEvent = {
type: GeminiEventType.Error,
value: {
error: {
message: '401 Incorrect API key provided',
status: 401,
},
},
};
mockGeminiClient.sendMessageStream.mockReturnValue(
createStreamFromEvents([apiErrorEvent]),
);
let thrownError: Error | null = null;
try {
await runNonInteractive(
mockConfig,
mockSettings,
'Test input',
'prompt-id-api-error',
);
// Should not reach here
expect.fail('Expected error to be thrown');
} catch (error) {
thrownError = error as Error;
}
// Should throw with the API error message
expect(thrownError).toBeTruthy();
expect(thrownError?.message).toContain('401');
expect(thrownError?.message).toContain('Incorrect API key provided');
// Verify error was written to stderr
expect(processStderrSpy).toHaveBeenCalled();
const stderrCalls = processStderrSpy.mock.calls;
const errorOutput = stderrCalls.map((call) => call[0]).join('');
expect(errorOutput).toContain('401');
expect(errorOutput).toContain('Incorrect API key provided');
});
it('should handle FatalInputError with custom exit code in JSON format', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON);
setupMetricsMock();
@@ -852,7 +906,7 @@ describe('runNonInteractive', () => {
expect(processStdoutSpy).toHaveBeenCalledWith('Response from command');
});
it('should throw FatalInputError if a command requires confirmation', async () => {
it('should handle command that requires confirmation by returning early', async () => {
const mockCommand = {
name: 'confirm',
description: 'a command that needs confirmation',
@@ -864,15 +918,16 @@ describe('runNonInteractive', () => {
};
mockGetCommands.mockReturnValue([mockCommand]);
await expect(
runNonInteractive(
mockConfig,
mockSettings,
'/confirm',
'prompt-id-confirm',
),
).rejects.toThrow(
'Exiting due to a confirmation prompt requested by the command.',
await runNonInteractive(
mockConfig,
mockSettings,
'/confirm',
'prompt-id-confirm',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.\n',
);
});
@@ -909,7 +964,30 @@ describe('runNonInteractive', () => {
expect(processStdoutSpy).toHaveBeenCalledWith('Response to unknown');
});
it('should throw for unhandled command result types', async () => {
it('should handle known but unsupported slash commands like /help by returning early', async () => {
// Mock a built-in command that exists but is not in the allowed list
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
await runNonInteractive(
mockConfig,
mockSettings,
'/help',
'prompt-id-help',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'The command "/help" is not supported in non-interactive mode.\n',
);
});
it('should handle unhandled command result types by returning early with error', async () => {
const mockCommand = {
name: 'noaction',
description: 'unhandled type',
@@ -920,15 +998,16 @@ describe('runNonInteractive', () => {
};
mockGetCommands.mockReturnValue([mockCommand]);
await expect(
runNonInteractive(
mockConfig,
mockSettings,
'/noaction',
'prompt-id-unhandled',
),
).rejects.toThrow(
'Exiting due to command result that is not supported in non-interactive mode.',
await runNonInteractive(
mockConfig,
mockSettings,
'/noaction',
'prompt-id-unhandled',
);
// Should write error message to stderr
expect(processStderrSpy).toHaveBeenCalledWith(
'Unknown command result type: unhandled\n',
);
});
@@ -1746,4 +1825,84 @@ describe('runNonInteractive', () => {
{ isContinuation: false },
);
});
it('should print tool output to console in text mode (non-Task tools)', async () => {
// Test that tool output is printed to stdout in text mode
const toolCallEvent: ServerGeminiStreamEvent = {
type: GeminiEventType.ToolCallRequest,
value: {
callId: 'tool-1',
name: 'run_in_terminal',
args: { command: 'npm outdated' },
isClientInitiated: false,
prompt_id: 'prompt-id-tool-output',
},
};
// Mock tool execution with outputUpdateHandler being called
mockCoreExecuteToolCall.mockImplementation(
async (_config, _request, _signal, options) => {
// Simulate tool calling outputUpdateHandler with output chunks
if (options?.outputUpdateHandler) {
options.outputUpdateHandler('tool-1', 'Package outdated\n');
options.outputUpdateHandler('tool-1', 'npm@1.0.0 -> npm@2.0.0\n');
}
return {
responseParts: [
{
functionResponse: {
id: 'tool-1',
name: 'run_in_terminal',
response: {
output: 'Package outdated\nnpm@1.0.0 -> npm@2.0.0',
},
},
},
],
};
},
);
const firstCallEvents: ServerGeminiStreamEvent[] = [
toolCallEvent,
{
type: GeminiEventType.Finished,
value: { reason: undefined, usageMetadata: { totalTokenCount: 5 } },
},
];
const secondCallEvents: ServerGeminiStreamEvent[] = [
{ type: GeminiEventType.Content, value: 'Dependencies checked' },
{
type: GeminiEventType.Finished,
value: { reason: undefined, usageMetadata: { totalTokenCount: 3 } },
},
];
mockGeminiClient.sendMessageStream
.mockReturnValueOnce(createStreamFromEvents(firstCallEvents))
.mockReturnValueOnce(createStreamFromEvents(secondCallEvents));
await runNonInteractive(
mockConfig,
mockSettings,
'Check dependencies',
'prompt-id-tool-output',
);
// Verify that executeToolCall was called with outputUpdateHandler
expect(mockCoreExecuteToolCall).toHaveBeenCalledWith(
mockConfig,
expect.objectContaining({ name: 'run_in_terminal' }),
expect.any(AbortSignal),
expect.objectContaining({
outputUpdateHandler: expect.any(Function),
}),
);
// Verify tool output was written to stdout
expect(processStdoutSpy).toHaveBeenCalledWith('Package outdated\n');
expect(processStdoutSpy).toHaveBeenCalledWith('npm@1.0.0 -> npm@2.0.0\n');
expect(processStdoutSpy).toHaveBeenCalledWith('Dependencies checked');
});
});

View File

@@ -4,7 +4,11 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config, ToolCallRequestInfo } from '@qwen-code/qwen-code-core';
import type {
Config,
ToolCallRequestInfo,
ToolResultDisplay,
} from '@qwen-code/qwen-code-core';
import { isSlashCommand } from './ui/utils/commandUtils.js';
import type { LoadedSettings } from './config/settings.js';
import {
@@ -42,6 +46,55 @@ import {
computeUsageFromMetrics,
} from './utils/nonInteractiveHelpers.js';
/**
* Emits a final message for slash command results.
* Note: systemMessage should already be emitted before calling this function.
*/
async function emitNonInteractiveFinalMessage(params: {
message: string;
isError: boolean;
adapter?: JsonOutputAdapterInterface;
config: Config;
startTimeMs: number;
}): Promise<void> {
const { message, isError, adapter, config } = params;
if (!adapter) {
// Text output mode: write directly to stdout/stderr
const target = isError ? process.stderr : process.stdout;
target.write(`${message}\n`);
return;
}
// JSON output mode: emit assistant message and result
// (systemMessage should already be emitted by caller)
adapter.startAssistantMessage();
adapter.processEvent({
type: GeminiEventType.Content,
value: message,
} as unknown as Parameters<JsonOutputAdapterInterface['processEvent']>[0]);
adapter.finalizeAssistantMessage();
const metrics = uiTelemetryService.getMetrics();
const usage = computeUsageFromMetrics(metrics);
const outputFormat = config.getOutputFormat();
const stats =
outputFormat === OutputFormat.JSON
? uiTelemetryService.getMetrics()
: undefined;
adapter.emitResult({
isError,
durationMs: Date.now() - params.startTimeMs,
apiDurationMs: 0,
numTurns: 0,
errorMessage: isError ? message : undefined,
usage,
stats,
summary: message,
});
}
/**
* Provides optional overrides for `runNonInteractive` execution.
*
@@ -115,6 +168,16 @@ export async function runNonInteractive(
process.on('SIGINT', shutdownHandler);
process.on('SIGTERM', shutdownHandler);
// Emit systemMessage first (always the first message in JSON mode)
if (adapter) {
const systemMessage = await buildSystemMessage(
config,
sessionId,
permissionMode,
);
adapter.emitMessage(systemMessage);
}
let initialPartList: PartListUnion | null = extractPartsFromUserMessage(
options.userMessage,
);
@@ -128,10 +191,45 @@ export async function runNonInteractive(
config,
settings,
);
if (slashCommandResult) {
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
initialPartList = slashCommandResult as PartListUnion;
slashHandled = true;
switch (slashCommandResult.type) {
case 'submit_prompt':
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
initialPartList = slashCommandResult.content;
slashHandled = true;
break;
case 'message': {
// systemMessage already emitted above
await emitNonInteractiveFinalMessage({
message: slashCommandResult.content,
isError: slashCommandResult.messageType === 'error',
adapter,
config,
startTimeMs: startTime,
});
return;
}
case 'stream_messages':
throw new FatalInputError(
'Stream messages mode is not supported in non-interactive CLI',
);
case 'unsupported': {
await emitNonInteractiveFinalMessage({
message: slashCommandResult.reason,
isError: true,
adapter,
config,
startTimeMs: startTime,
});
return;
}
case 'no_command':
break;
default: {
const _exhaustive: never = slashCommandResult;
throw new FatalInputError(
`Unhandled slash command result type: ${(_exhaustive as { type: string }).type}`,
);
}
}
}
@@ -163,15 +261,6 @@ export async function runNonInteractive(
const initialParts = normalizePartList(initialPartList);
let currentMessages: Content[] = [{ role: 'user', parts: initialParts }];
if (adapter) {
const systemMessage = await buildSystemMessage(
config,
sessionId,
permissionMode,
);
adapter.emitMessage(systemMessage);
}
let isFirstTurn = true;
while (true) {
turnCount++;
@@ -221,10 +310,10 @@ export async function runNonInteractive(
const errorText = parseAndFormatApiError(
event.value.error,
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
);
process.stderr.write(`${errorText}\n`);
// Throw error to exit with non-zero code
throw new Error(errorText);
}
}
}
@@ -250,7 +339,7 @@ export async function runNonInteractive(
? options.controlService.permission.getToolCallUpdateCallback()
: undefined;
// Only pass outputUpdateHandler for Task tool
// Create output handler for Task tool (for subagent execution)
const isTaskTool = finalRequestInfo.name === 'task';
const taskToolProgress = isTaskTool
? createTaskToolProgressHandler(
@@ -260,20 +349,41 @@ export async function runNonInteractive(
)
: undefined;
const taskToolProgressHandler = taskToolProgress?.handler;
// Create output handler for non-Task tools in text mode (for console output)
const nonTaskOutputHandler =
!isTaskTool && !adapter
? (callId: string, outputChunk: ToolResultDisplay) => {
// Print tool output to console in text mode
if (typeof outputChunk === 'string') {
process.stdout.write(outputChunk);
} else if (
outputChunk &&
typeof outputChunk === 'object' &&
'ansiOutput' in outputChunk
) {
// Handle ANSI output - just print as string for now
process.stdout.write(String(outputChunk.ansiOutput));
}
}
: undefined;
// Combine output handlers
const outputUpdateHandler =
taskToolProgressHandler || nonTaskOutputHandler;
const toolResponse = await executeToolCall(
config,
finalRequestInfo,
abortController.signal,
isTaskTool && taskToolProgressHandler
outputUpdateHandler || toolCallUpdateCallback
? {
outputUpdateHandler: taskToolProgressHandler,
onToolCallsUpdate: toolCallUpdateCallback,
}
: toolCallUpdateCallback
? {
...(outputUpdateHandler && { outputUpdateHandler }),
...(toolCallUpdateCallback && {
onToolCallsUpdate: toolCallUpdateCallback,
}
: undefined,
}),
}
: undefined,
);
// Note: In JSON mode, subagent messages are automatically added to the main

View File

@@ -0,0 +1,242 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { handleSlashCommand } from './nonInteractiveCliCommands.js';
import type { Config } from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from './config/settings.js';
import { CommandKind } from './ui/commands/types.js';
// Mock the CommandService
const mockGetCommands = vi.hoisted(() => vi.fn());
const mockCommandServiceCreate = vi.hoisted(() => vi.fn());
vi.mock('./services/CommandService.js', () => ({
CommandService: {
create: mockCommandServiceCreate,
},
}));
describe('handleSlashCommand', () => {
let mockConfig: Config;
let mockSettings: LoadedSettings;
let abortController: AbortController;
beforeEach(() => {
mockCommandServiceCreate.mockResolvedValue({
getCommands: mockGetCommands,
});
mockConfig = {
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
isInteractive: vi.fn().mockReturnValue(false),
getSessionId: vi.fn().mockReturnValue('test-session'),
getFolderTrustFeature: vi.fn().mockReturnValue(false),
getFolderTrust: vi.fn().mockReturnValue(false),
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
storage: {},
} as unknown as Config;
mockSettings = {
system: { path: '', settings: {} },
systemDefaults: { path: '', settings: {} },
user: { path: '', settings: {} },
workspace: { path: '', settings: {} },
} as LoadedSettings;
abortController = new AbortController();
});
it('should return no_command for non-slash input', async () => {
const result = await handleSlashCommand(
'regular text',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return no_command for unknown slash commands', async () => {
mockGetCommands.mockReturnValue([]);
const result = await handleSlashCommand(
'/unknowncommand',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return unsupported for known built-in commands not in allowed list', async () => {
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
const result = await handleSlashCommand(
'/help',
abortController,
mockConfig,
mockSettings,
[], // Empty allowed list
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toContain('/help');
expect(result.reason).toContain('not supported');
}
});
it('should return unsupported for /help when using default allowed list', async () => {
const mockHelpCommand = {
name: 'help',
description: 'Show help',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockHelpCommand]);
const result = await handleSlashCommand(
'/help',
abortController,
mockConfig,
mockSettings,
// Default allowed list: ['init', 'summary', 'compress']
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toBe(
'The command "/help" is not supported in non-interactive mode.',
);
}
});
it('should execute allowed built-in commands', async () => {
const mockInitCommand = {
name: 'init',
description: 'Initialize project',
kind: CommandKind.BUILT_IN,
action: vi.fn().mockResolvedValue({
type: 'message',
messageType: 'info',
content: 'Project initialized',
}),
};
mockGetCommands.mockReturnValue([mockInitCommand]);
const result = await handleSlashCommand(
'/init',
abortController,
mockConfig,
mockSettings,
['init'], // init is in the allowed list
);
expect(result.type).toBe('message');
if (result.type === 'message') {
expect(result.content).toBe('Project initialized');
}
});
it('should execute file commands regardless of allowed list', async () => {
const mockFileCommand = {
name: 'custom',
description: 'Custom file command',
kind: CommandKind.FILE,
action: vi.fn().mockResolvedValue({
type: 'submit_prompt',
content: [{ text: 'Custom prompt' }],
}),
};
mockGetCommands.mockReturnValue([mockFileCommand]);
const result = await handleSlashCommand(
'/custom',
abortController,
mockConfig,
mockSettings,
[], // Empty allowed list, but FILE commands should still work
);
expect(result.type).toBe('submit_prompt');
if (result.type === 'submit_prompt') {
expect(result.content).toEqual([{ text: 'Custom prompt' }]);
}
});
it('should return unsupported for other built-in commands like /quit', async () => {
const mockQuitCommand = {
name: 'quit',
description: 'Quit application',
kind: CommandKind.BUILT_IN,
action: vi.fn(),
};
mockGetCommands.mockReturnValue([mockQuitCommand]);
const result = await handleSlashCommand(
'/quit',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('unsupported');
if (result.type === 'unsupported') {
expect(result.reason).toContain('/quit');
expect(result.reason).toContain('not supported');
}
});
it('should handle command with no action', async () => {
const mockCommand = {
name: 'noaction',
description: 'Command without action',
kind: CommandKind.FILE,
// No action property
};
mockGetCommands.mockReturnValue([mockCommand]);
const result = await handleSlashCommand(
'/noaction',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('no_command');
});
it('should return message when command returns void', async () => {
const mockCommand = {
name: 'voidcmd',
description: 'Command that returns void',
kind: CommandKind.FILE,
action: vi.fn().mockResolvedValue(undefined),
};
mockGetCommands.mockReturnValue([mockCommand]);
const result = await handleSlashCommand(
'/voidcmd',
abortController,
mockConfig,
mockSettings,
);
expect(result.type).toBe('message');
if (result.type === 'message') {
expect(result.content).toBe('Command executed successfully.');
expect(result.messageType).toBe('info');
}
});
});

View File

@@ -7,7 +7,6 @@
import type { PartListUnion } from '@google/genai';
import { parseSlashCommand } from './utils/commands.js';
import {
FatalInputError,
Logger,
uiTelemetryService,
type Config,
@@ -19,10 +18,164 @@ import {
CommandKind,
type CommandContext,
type SlashCommand,
type SlashCommandActionReturn,
} from './ui/commands/types.js';
import { createNonInteractiveUI } from './ui/noninteractive/nonInteractiveUi.js';
import type { LoadedSettings } from './config/settings.js';
import type { SessionStatsState } from './ui/contexts/SessionContext.js';
import { t } from './i18n/index.js';
/**
* Built-in commands that are allowed in non-interactive modes (CLI and ACP).
* Only safe, read-only commands that don't require interactive UI.
*
* These commands are:
* - init: Initialize project configuration
* - summary: Generate session summary
* - compress: Compress conversation history
*/
export const ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE = [
'init',
'summary',
'compress',
] as const;
/**
* Result of handling a slash command in non-interactive mode.
*
* Supported types:
* - 'submit_prompt': Submits content to the model (supports all modes)
* - 'message': Returns a single message (supports non-interactive JSON/text only)
* - 'stream_messages': Streams multiple messages (supports ACP only)
* - 'unsupported': Command cannot be executed in this mode
* - 'no_command': No command was found or executed
*/
export type NonInteractiveSlashCommandResult =
| {
type: 'submit_prompt';
content: PartListUnion;
}
| {
type: 'message';
messageType: 'info' | 'error';
content: string;
}
| {
type: 'stream_messages';
messages: AsyncGenerator<
{ messageType: 'info' | 'error'; content: string },
void,
unknown
>;
}
| {
type: 'unsupported';
reason: string;
originalType: string;
}
| {
type: 'no_command';
};
/**
* Converts a SlashCommandActionReturn to a NonInteractiveSlashCommandResult.
*
* Only the following result types are supported in non-interactive mode:
* - submit_prompt: Submits content to the model (all modes)
* - message: Returns a single message (non-interactive JSON/text only)
* - stream_messages: Streams multiple messages (ACP only)
*
* All other result types are converted to 'unsupported'.
*
* @param result The result from executing a slash command action
* @returns A NonInteractiveSlashCommandResult describing the outcome
*/
function handleCommandResult(
result: SlashCommandActionReturn,
): NonInteractiveSlashCommandResult {
switch (result.type) {
case 'submit_prompt':
return {
type: 'submit_prompt',
content: result.content,
};
case 'message':
return {
type: 'message',
messageType: result.messageType,
content: result.content,
};
case 'stream_messages':
return {
type: 'stream_messages',
messages: result.messages,
};
/**
* Currently return types below are never generated due to the
* whitelist of allowed slash commands in ACP and non-interactive mode.
* We'll try to add more supported return types in the future.
*/
case 'tool':
return {
type: 'unsupported',
reason:
'Tool execution from slash commands is not supported in non-interactive mode.',
originalType: 'tool',
};
case 'quit':
return {
type: 'unsupported',
reason:
'Quit command is not supported in non-interactive mode. The process will exit naturally after completion.',
originalType: 'quit',
};
case 'dialog':
return {
type: 'unsupported',
reason: `Dialog '${result.dialog}' cannot be opened in non-interactive mode.`,
originalType: 'dialog',
};
case 'load_history':
return {
type: 'unsupported',
reason:
'Loading history is not supported in non-interactive mode. Each invocation starts with a fresh context.',
originalType: 'load_history',
};
case 'confirm_shell_commands':
return {
type: 'unsupported',
reason:
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.',
originalType: 'confirm_shell_commands',
};
case 'confirm_action':
return {
type: 'unsupported',
reason:
'Action confirmation is not supported in non-interactive mode. Commands requiring confirmation cannot be executed.',
originalType: 'confirm_action',
};
default: {
// Exhaustiveness check
const _exhaustive: never = result;
return {
type: 'unsupported',
reason: `Unknown command result type: ${(_exhaustive as SlashCommandActionReturn).type}`,
originalType: 'unknown',
};
}
}
}
/**
* Filters commands based on the allowed built-in command names.
@@ -62,122 +215,146 @@ function filterCommandsForNonInteractive(
* @param config The configuration object
* @param settings The loaded settings
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
* allowed. If not provided or empty, only file commands are available.
* @returns A Promise that resolves to `PartListUnion` if a valid command is
* found and results in a prompt, or `undefined` otherwise.
* @throws {FatalInputError} if the command result is not supported in
* non-interactive mode.
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
* Pass an empty array to only allow file commands.
* @returns A Promise that resolves to a `NonInteractiveSlashCommandResult` describing
* the outcome of the command execution.
*/
export const handleSlashCommand = async (
rawQuery: string,
abortController: AbortController,
config: Config,
settings: LoadedSettings,
allowedBuiltinCommandNames?: string[],
): Promise<PartListUnion | undefined> => {
allowedBuiltinCommandNames: string[] = [
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
],
): Promise<NonInteractiveSlashCommandResult> => {
const trimmed = rawQuery.trim();
if (!trimmed.startsWith('/')) {
return;
return { type: 'no_command' };
}
const isAcpMode = config.getExperimentalZedIntegration();
const isInteractive = config.isInteractive();
const executionMode = isAcpMode
? 'acp'
: isInteractive
? 'interactive'
: 'non_interactive';
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);
// Only load BuiltinCommandLoader if there are allowed built-in commands
const loaders =
allowedBuiltinSet.size > 0
? [new BuiltinCommandLoader(config), new FileCommandLoader(config)]
: [new FileCommandLoader(config)];
// Load all commands to check if the command exists but is not allowed
const allLoaders = [
new BuiltinCommandLoader(config),
new FileCommandLoader(config),
];
const commandService = await CommandService.create(
loaders,
allLoaders,
abortController.signal,
);
const commands = commandService.getCommands();
const allCommands = commandService.getCommands();
const filteredCommands = filterCommandsForNonInteractive(
commands,
allCommands,
allowedBuiltinSet,
);
// First, try to parse with filtered commands
const { commandToExecute, args } = parseSlashCommand(
rawQuery,
filteredCommands,
);
if (commandToExecute) {
if (commandToExecute.action) {
// Not used by custom commands but may be in the future.
const sessionStats: SessionStatsState = {
sessionId: config?.getSessionId(),
sessionStartTime: new Date(),
metrics: uiTelemetryService.getMetrics(),
lastPromptTokenCount: 0,
promptCount: 1,
if (!commandToExecute) {
// Check if this is a known command that's just not allowed
const { commandToExecute: knownCommand } = parseSlashCommand(
rawQuery,
allCommands,
);
if (knownCommand) {
// Command exists but is not allowed in non-interactive mode
return {
type: 'unsupported',
reason: t(
'The command "/{{command}}" is not supported in non-interactive mode.',
{ command: knownCommand.name },
),
originalType: 'filtered_command',
};
const logger = new Logger(config?.getSessionId() || '', config?.storage);
const context: CommandContext = {
services: {
config,
settings,
git: undefined,
logger,
},
ui: createNonInteractiveUI(),
session: {
stats: sessionStats,
sessionShellAllowlist: new Set(),
},
invocation: {
raw: trimmed,
name: commandToExecute.name,
args,
},
};
const result = await commandToExecute.action(context, args);
if (result) {
switch (result.type) {
case 'submit_prompt':
return result.content;
case 'confirm_shell_commands':
// This result indicates a command attempted to confirm shell commands.
// However note that currently, ShellTool is excluded in non-interactive
// mode unless 'YOLO mode' is active, so confirmation actually won't
// occur because of YOLO mode.
// This ensures that if a command *does* request confirmation (e.g.
// in the future with more granular permissions), it's handled appropriately.
throw new FatalInputError(
'Exiting due to a confirmation prompt requested by the command.',
);
default:
throw new FatalInputError(
'Exiting due to command result that is not supported in non-interactive mode.',
);
}
}
}
return { type: 'no_command' };
}
return;
if (!commandToExecute.action) {
return { type: 'no_command' };
}
// Not used by custom commands but may be in the future.
const sessionStats: SessionStatsState = {
sessionId: config?.getSessionId(),
sessionStartTime: new Date(),
metrics: uiTelemetryService.getMetrics(),
lastPromptTokenCount: 0,
promptCount: 1,
};
const logger = new Logger(config?.getSessionId() || '', config?.storage);
const context: CommandContext = {
executionMode,
services: {
config,
settings,
git: undefined,
logger,
},
ui: createNonInteractiveUI(),
session: {
stats: sessionStats,
sessionShellAllowlist: new Set(),
},
invocation: {
raw: trimmed,
name: commandToExecute.name,
args,
},
};
const result = await commandToExecute.action(context, args);
if (!result) {
// Command executed but returned no result (e.g., void return)
return {
type: 'message',
messageType: 'info',
content: 'Command executed successfully.',
};
}
// Handle different result types
return handleCommandResult(result);
};
/**
* Retrieves all available slash commands for the current configuration.
*
* @param config The configuration object
* @param settings The loaded settings
* @param abortSignal Signal to cancel the loading process
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
* allowed. If not provided or empty, only file commands are available.
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
* Pass an empty array to only include file commands.
* @returns A Promise that resolves to an array of SlashCommand objects
*/
export const getAvailableCommands = async (
config: Config,
settings: LoadedSettings,
abortSignal: AbortSignal,
allowedBuiltinCommandNames?: string[],
allowedBuiltinCommandNames: string[] = [
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
],
): Promise<SlashCommand[]> => {
try {
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);

View File

@@ -28,7 +28,7 @@ const mockPrompt = {
{ name: 'trail', required: false, description: "The animal's trail." },
],
invoke: vi.fn().mockResolvedValue({
messages: [{ content: { text: 'Hello, world!' } }],
messages: [{ content: { type: 'text', text: 'Hello, world!' } }],
}),
};

View File

@@ -123,7 +123,10 @@ export class McpPromptLoader implements ICommandLoader {
};
}
if (!result.messages?.[0]?.content?.['text']) {
const firstMessage = result.messages?.[0];
const content = firstMessage?.content;
if (content?.type !== 'text') {
return {
type: 'message',
messageType: 'error',
@@ -134,7 +137,7 @@ export class McpPromptLoader implements ICommandLoader {
return {
type: 'submit_prompt',
content: JSON.stringify(result.messages[0].content.text),
content: JSON.stringify(content.text),
};
} catch (error) {
return {

View File

@@ -72,6 +72,7 @@ describe('ShellProcessor', () => {
getApprovalMode: vi.fn().mockReturnValue(ApprovalMode.DEFAULT),
getShouldUseNodePtyShell: vi.fn().mockReturnValue(false),
getShellExecutionConfig: vi.fn().mockReturnValue({}),
getAllowedTools: vi.fn().mockReturnValue([]),
};
context = createMockCommandContext({
@@ -196,6 +197,35 @@ describe('ShellProcessor', () => {
);
});
it('should NOT throw ConfirmationRequiredError when a command matches allowedTools', async () => {
const processor = new ShellProcessor('test-command');
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Do something dangerous: !{rm -rf /}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['rm -rf /'],
});
(mockConfig.getAllowedTools as Mock).mockReturnValue([
'ShellTool(rm -rf /)',
]);
mockShellExecute.mockReturnValue({
result: Promise.resolve({ ...SUCCESS_RESULT, output: 'deleted' }),
});
const result = await processor.process(prompt, context);
expect(mockShellExecute).toHaveBeenCalledWith(
'rm -rf /',
expect.any(String),
expect.any(Function),
expect.any(Object),
false,
expect.any(Object),
);
expect(result).toEqual([{ text: 'Do something dangerous: deleted' }]);
});
it('should NOT throw ConfirmationRequiredError if a command is not allowed but approval mode is YOLO', async () => {
const processor = new ShellProcessor('test-command');
const prompt: PromptPipelineContent = createPromptPipelineContent(

View File

@@ -7,11 +7,13 @@
import {
ApprovalMode,
checkCommandPermissions,
doesToolInvocationMatch,
escapeShellArg,
getShellConfiguration,
ShellExecutionService,
flatMapTextParts,
} from '@qwen-code/qwen-code-core';
import type { AnyToolInvocation } from '@qwen-code/qwen-code-core';
import type { CommandContext } from '../../ui/commands/types.js';
import type { IPromptProcessor, PromptPipelineContent } from './types.js';
@@ -124,6 +126,15 @@ export class ShellProcessor implements IPromptProcessor {
// Security check on the final, escaped command string.
const { allAllowed, disallowedCommands, blockReason, isHardDenial } =
checkCommandPermissions(command, config, sessionShellAllowlist);
const allowedTools = config.getAllowedTools() || [];
const invocation = {
params: { command },
} as AnyToolInvocation;
const isAllowedBySettings = doesToolInvocationMatch(
'run_shell_command',
invocation,
allowedTools,
);
if (!allAllowed) {
if (isHardDenial) {
@@ -132,10 +143,17 @@ export class ShellProcessor implements IPromptProcessor {
);
}
// If not a hard denial, respect YOLO mode and auto-approve.
if (config.getApprovalMode() !== ApprovalMode.YOLO) {
disallowedCommands.forEach((uc) => commandsToConfirm.add(uc));
// If the command is allowed by settings, skip confirmation.
if (isAllowedBySettings) {
continue;
}
// If not a hard denial, respect YOLO mode and auto-approve.
if (config.getApprovalMode() === ApprovalMode.YOLO) {
continue;
}
disallowedCommands.forEach((uc) => commandsToConfirm.add(uc));
}
}

View File

@@ -6,10 +6,12 @@
import { render } from 'ink-testing-library';
import type React from 'react';
import type { Config } from '@qwen-code/qwen-code-core';
import { LoadedSettings } from '../config/settings.js';
import { KeypressProvider } from '../ui/contexts/KeypressContext.js';
import { SettingsContext } from '../ui/contexts/SettingsContext.js';
import { ShellFocusContext } from '../ui/contexts/ShellFocusContext.js';
import { ConfigContext } from '../ui/contexts/ConfigContext.js';
const mockSettings = new LoadedSettings(
{ path: '', settings: {}, originalSettings: {} },
@@ -22,14 +24,24 @@ const mockSettings = new LoadedSettings(
export const renderWithProviders = (
component: React.ReactElement,
{ shellFocus = true, settings = mockSettings } = {},
{
shellFocus = true,
settings = mockSettings,
config = undefined,
}: {
shellFocus?: boolean;
settings?: LoadedSettings;
config?: Config;
} = {},
): ReturnType<typeof render> =>
render(
<SettingsContext.Provider value={settings}>
<ShellFocusContext.Provider value={shellFocus}>
<KeypressProvider kittyProtocolEnabled={true}>
{component}
</KeypressProvider>
</ShellFocusContext.Provider>
<ConfigContext.Provider value={config}>
<ShellFocusContext.Provider value={shellFocus}>
<KeypressProvider kittyProtocolEnabled={true}>
{component}
</KeypressProvider>
</ShellFocusContext.Provider>
</ConfigContext.Provider>
</SettingsContext.Provider>,
);

View File

@@ -23,7 +23,6 @@ import {
} from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from '../config/settings.js';
import type { InitializationResult } from '../core/initializer.js';
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
import { UIStateContext, type UIState } from './contexts/UIStateContext.js';
import {
UIActionsContext,
@@ -56,7 +55,6 @@ vi.mock('./App.js', () => ({
App: TestContextConsumer,
}));
vi.mock('./hooks/useQuotaAndFallback.js');
vi.mock('./hooks/useHistoryManager.js');
vi.mock('./hooks/useThemeCommand.js');
vi.mock('./auth/useAuth.js');
@@ -122,7 +120,6 @@ describe('AppContainer State Management', () => {
let mockInitResult: InitializationResult;
// Create typed mocks for all hooks
const mockedUseQuotaAndFallback = useQuotaAndFallback as Mock;
const mockedUseHistory = useHistory as Mock;
const mockedUseThemeCommand = useThemeCommand as Mock;
const mockedUseAuthCommand = useAuthCommand as Mock;
@@ -164,10 +161,6 @@ describe('AppContainer State Management', () => {
capturedUIActions = null!;
// **Provide a default return value for EVERY mocked hook.**
mockedUseQuotaAndFallback.mockReturnValue({
proQuotaRequest: null,
handleProQuotaChoice: vi.fn(),
});
mockedUseHistory.mockReturnValue({
history: [],
addItem: vi.fn(),
@@ -567,75 +560,6 @@ describe('AppContainer State Management', () => {
});
});
describe('Quota and Fallback Integration', () => {
it('passes a null proQuotaRequest to UIStateContext by default', () => {
// The default mock from beforeEach already sets proQuotaRequest to null
render(
<AppContainer
config={mockConfig}
settings={mockSettings}
version="1.0.0"
initializationResult={mockInitResult}
/>,
);
// Assert that the context value is as expected
expect(capturedUIState.proQuotaRequest).toBeNull();
});
it('passes a valid proQuotaRequest to UIStateContext when provided by the hook', () => {
// Arrange: Create a mock request object that a UI dialog would receive
const mockRequest = {
failedModel: 'gemini-pro',
fallbackModel: 'gemini-flash',
resolve: vi.fn(),
};
mockedUseQuotaAndFallback.mockReturnValue({
proQuotaRequest: mockRequest,
handleProQuotaChoice: vi.fn(),
});
// Act: Render the container
render(
<AppContainer
config={mockConfig}
settings={mockSettings}
version="1.0.0"
initializationResult={mockInitResult}
/>,
);
// Assert: The mock request is correctly passed through the context
expect(capturedUIState.proQuotaRequest).toEqual(mockRequest);
});
it('passes the handleProQuotaChoice function to UIActionsContext', () => {
// Arrange: Create a mock handler function
const mockHandler = vi.fn();
mockedUseQuotaAndFallback.mockReturnValue({
proQuotaRequest: null,
handleProQuotaChoice: mockHandler,
});
// Act: Render the container
render(
<AppContainer
config={mockConfig}
settings={mockSettings}
version="1.0.0"
initializationResult={mockInitResult}
/>,
);
// Assert: The action in the context is the mock handler we provided
expect(capturedUIActions.handleProQuotaChoice).toBe(mockHandler);
// You can even verify that the plumbed function is callable
capturedUIActions.handleProQuotaChoice('auth');
expect(mockHandler).toHaveBeenCalledWith('auth');
});
});
describe('Terminal Title Update Feature', () => {
beforeEach(() => {
// Reset mock stdout for each test

View File

@@ -32,8 +32,6 @@ import {
type Config,
type IdeInfo,
type IdeContext,
type UserTierId,
DEFAULT_GEMINI_FLASH_MODEL,
IdeClient,
ideContextStore,
getErrorMessage,
@@ -48,7 +46,6 @@ import { useHistory } from './hooks/useHistoryManager.js';
import { useMemoryMonitor } from './hooks/useMemoryMonitor.js';
import { useThemeCommand } from './hooks/useThemeCommand.js';
import { useAuthCommand } from './auth/useAuth.js';
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
import { useEditorSettings } from './hooks/useEditorSettings.js';
import { useSettingsCommand } from './hooks/useSettingsCommand.js';
import { useModelCommand } from './hooks/useModelCommand.js';
@@ -182,17 +179,10 @@ export const AppContainer = (props: AppContainerProps) => {
[],
);
// Helper to determine the effective model, considering the fallback state.
const getEffectiveModel = useCallback(() => {
if (config.isInFallbackMode()) {
return DEFAULT_GEMINI_FLASH_MODEL;
}
return config.getModel();
}, [config]);
// Helper to determine the current model (polled, since Config has no model-change event).
const getCurrentModel = useCallback(() => config.getModel(), [config]);
const [currentModel, setCurrentModel] = useState(getEffectiveModel());
const [userTier] = useState<UserTierId | undefined>(undefined);
const [currentModel, setCurrentModel] = useState(getCurrentModel());
const [isConfigInitialized, setConfigInitialized] = useState(false);
@@ -245,12 +235,12 @@ export const AppContainer = (props: AppContainerProps) => {
[historyManager.addItem],
);
// Watch for model changes (e.g., from Flash fallback)
// Watch for model changes (e.g., user switches model via /model)
useEffect(() => {
const checkModelChange = () => {
const effectiveModel = getEffectiveModel();
if (effectiveModel !== currentModel) {
setCurrentModel(effectiveModel);
const model = getCurrentModel();
if (model !== currentModel) {
setCurrentModel(model);
}
};
@@ -258,7 +248,7 @@ export const AppContainer = (props: AppContainerProps) => {
const interval = setInterval(checkModelChange, 1000); // Check every second
return () => clearInterval(interval);
}, [config, currentModel, getEffectiveModel]);
}, [config, currentModel, getCurrentModel]);
const {
consoleMessages,
@@ -367,14 +357,6 @@ export const AppContainer = (props: AppContainerProps) => {
cancelAuthentication,
} = useAuthCommand(settings, config, historyManager.addItem);
const { proQuotaRequest, handleProQuotaChoice } = useQuotaAndFallback({
config,
historyManager,
userTier,
setAuthState,
setModelSwitchedFromQuotaError,
});
useInitializationAuthError(initializationResult.authError, onAuthError);
// Sync user tier from config when authentication changes
@@ -388,37 +370,36 @@ export const AppContainer = (props: AppContainerProps) => {
// Check for enforced auth type mismatch
useEffect(() => {
// Check for initialization error first
const currentAuthType = config.modelsConfig.getCurrentAuthType();
if (
settings.merged.security?.auth?.enforcedType &&
settings.merged.security?.auth.selectedType &&
settings.merged.security?.auth.enforcedType !==
settings.merged.security?.auth.selectedType
currentAuthType &&
settings.merged.security?.auth.enforcedType !== currentAuthType
) {
onAuthError(
t(
'Authentication is enforced to be {{enforcedType}}, but you are currently using {{currentType}}.',
{
enforcedType: settings.merged.security?.auth.enforcedType,
currentType: settings.merged.security?.auth.selectedType,
enforcedType: String(settings.merged.security?.auth.enforcedType),
currentType: String(currentAuthType),
},
),
);
} else if (
settings.merged.security?.auth?.selectedType &&
!settings.merged.security?.auth?.useExternal
) {
const error = validateAuthMethod(
settings.merged.security.auth.selectedType,
);
if (error) {
onAuthError(error);
} else if (!settings.merged.security?.auth?.useExternal) {
// If no authType is selected yet, allow the auth UI flow to prompt the user.
// Only validate credentials once a concrete authType exists.
if (currentAuthType) {
const error = validateAuthMethod(currentAuthType, config);
if (error) {
onAuthError(error);
}
}
}
}, [
settings.merged.security?.auth?.selectedType,
settings.merged.security?.auth?.enforcedType,
settings.merged.security?.auth?.useExternal,
config,
onAuthError,
]);
@@ -752,8 +733,7 @@ export const AppContainer = (props: AppContainerProps) => {
!initError &&
!isProcessing &&
(streamingState === StreamingState.Idle ||
streamingState === StreamingState.Responding) &&
!proQuotaRequest;
streamingState === StreamingState.Responding);
const [controlsHeight, setControlsHeight] = useState(0);
@@ -938,7 +918,12 @@ export const AppContainer = (props: AppContainerProps) => {
const handleIdePromptComplete = useCallback(
(result: IdeIntegrationNudgeResult) => {
if (result.userSelection === 'yes') {
handleSlashCommand('/ide install');
// Check whether the extension has been pre-installed
if (result.isExtensionPreInstalled) {
handleSlashCommand('/ide enable');
} else {
handleSlashCommand('/ide install');
}
settings.setValue(SettingScope.User, 'ide.hasSeenNudge', true);
} else if (result.userSelection === 'dismiss') {
settings.setValue(SettingScope.User, 'ide.hasSeenNudge', true);
@@ -1206,7 +1191,6 @@ export const AppContainer = (props: AppContainerProps) => {
isAuthenticating ||
isEditorDialogOpen ||
showIdeRestartPrompt ||
!!proQuotaRequest ||
isSubagentCreateDialogOpen ||
isAgentsManagerDialogOpen ||
isApprovalModeDialogOpen ||
@@ -1277,8 +1261,6 @@ export const AppContainer = (props: AppContainerProps) => {
showWorkspaceMigrationDialog,
workspaceExtensions,
currentModel,
userTier,
proQuotaRequest,
contextFileNames,
errorCount,
availableTerminalHeight,
@@ -1367,8 +1349,6 @@ export const AppContainer = (props: AppContainerProps) => {
showAutoAcceptIndicator,
showWorkspaceMigrationDialog,
workspaceExtensions,
userTier,
proQuotaRequest,
contextFileNames,
errorCount,
availableTerminalHeight,
@@ -1430,7 +1410,6 @@ export const AppContainer = (props: AppContainerProps) => {
handleClearScreen,
onWorkspaceMigrationDialogOpen,
onWorkspaceMigrationDialogClose,
handleProQuotaChoice,
// Vision switch dialog
handleVisionSwitchSelect,
// Welcome back dialog
@@ -1468,7 +1447,6 @@ export const AppContainer = (props: AppContainerProps) => {
handleClearScreen,
onWorkspaceMigrationDialogOpen,
onWorkspaceMigrationDialogClose,
handleProQuotaChoice,
handleVisionSwitchSelect,
handleWelcomeBackSelection,
handleWelcomeBackClose,

View File

@@ -38,6 +38,7 @@ export function IdeIntegrationNudge({
);
const { displayName: ideName } = ide;
const isInSandbox = !!process.env['SANDBOX'];
// Assume extension is already installed if the env variables are set.
const isExtensionPreInstalled =
!!process.env['QWEN_CODE_IDE_SERVER_PORT'] &&
@@ -70,13 +71,15 @@ export function IdeIntegrationNudge({
},
];
const installText = isExtensionPreInstalled
? `If you select Yes, the CLI will have access to your open files and display diffs directly in ${
ideName ?? 'your editor'
}.`
: `If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${
ideName ?? 'your editor'
}.`;
const installText = isInSandbox
? `Note: In sandbox environments, IDE integration requires manual setup on the host system. If you select Yes, you'll receive instructions on how to set this up.`
: isExtensionPreInstalled
? `If you select Yes, the CLI will connect to your ${
ideName ?? 'editor'
} and have access to your open files and display diffs directly.`
: `If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${
ideName ?? 'your editor'
}.`;
return (
<Box

View File

@@ -6,7 +6,8 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { AuthDialog } from './AuthDialog.js';
import { LoadedSettings, SettingScope } from '../../config/settings.js';
import { LoadedSettings } from '../../config/settings.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { AuthType } from '@qwen-code/qwen-code-core';
import { renderWithProviders } from '../../test-utils/render.js';
import { UIStateContext } from '../contexts/UIStateContext.js';
@@ -43,17 +44,24 @@ const renderAuthDialog = (
settings: LoadedSettings,
uiStateOverrides: Partial<UIState> = {},
uiActionsOverrides: Partial<UIActions> = {},
configAuthType: AuthType | undefined = undefined,
configApiKey: string | undefined = undefined,
) => {
const uiState = createMockUIState(uiStateOverrides);
const uiActions = createMockUIActions(uiActionsOverrides);
const mockConfig = {
getAuthType: vi.fn(() => configAuthType),
getContentGeneratorConfig: vi.fn(() => ({ apiKey: configApiKey })),
} as unknown as Config;
return renderWithProviders(
<UIStateContext.Provider value={uiState}>
<UIActionsContext.Provider value={uiActions}>
<AuthDialog />
</UIActionsContext.Provider>
</UIStateContext.Provider>,
{ settings },
{ settings, config: mockConfig },
);
};
@@ -168,7 +176,7 @@ describe('AuthDialog', () => {
it('should not show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to something else', () => {
process.env['GEMINI_API_KEY'] = 'foobar';
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.LOGIN_WITH_GOOGLE;
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
const settings: LoadedSettings = new LoadedSettings(
{
@@ -212,7 +220,7 @@ describe('AuthDialog', () => {
it('should show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to use api key', () => {
process.env['GEMINI_API_KEY'] = 'foobar';
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_GEMINI;
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
const settings: LoadedSettings = new LoadedSettings(
{
@@ -421,6 +429,7 @@ describe('AuthDialog', () => {
settings,
{},
{ handleAuthSelect },
undefined, // config.getAuthType() returns undefined
);
await wait();
@@ -475,6 +484,7 @@ describe('AuthDialog', () => {
settings,
{ authError: 'Initial error' },
{ handleAuthSelect },
undefined, // config.getAuthType() returns undefined
);
await wait();
@@ -504,12 +514,12 @@ describe('AuthDialog', () => {
},
{
settings: {
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
security: { auth: { selectedType: AuthType.USE_OPENAI } },
ui: { customThemes: {} },
mcpServers: {},
},
originalSettings: {
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
security: { auth: { selectedType: AuthType.USE_OPENAI } },
ui: { customThemes: {} },
mcpServers: {},
},
@@ -528,6 +538,7 @@ describe('AuthDialog', () => {
settings,
{},
{ handleAuthSelect },
AuthType.USE_OPENAI, // config.getAuthType() returns USE_OPENAI
);
await wait();
@@ -536,7 +547,7 @@ describe('AuthDialog', () => {
await wait();
// Should call handleAuthSelect with undefined to exit
expect(handleAuthSelect).toHaveBeenCalledWith(undefined, SettingScope.User);
expect(handleAuthSelect).toHaveBeenCalledWith(undefined);
unmount();
});
});

View File

@@ -8,13 +8,12 @@ import type React from 'react';
import { useState } from 'react';
import { AuthType } from '@qwen-code/qwen-code-core';
import { Box, Text } from 'ink';
import { SettingScope } from '../../config/settings.js';
import { Colors } from '../colors.js';
import { useKeypress } from '../hooks/useKeypress.js';
import { RadioButtonSelect } from '../components/shared/RadioButtonSelect.js';
import { useUIState } from '../contexts/UIStateContext.js';
import { useUIActions } from '../contexts/UIActionsContext.js';
import { useSettings } from '../contexts/SettingsContext.js';
import { useConfig } from '../contexts/ConfigContext.js';
import { t } from '../../i18n/index.js';
function parseDefaultAuthType(
@@ -32,7 +31,7 @@ function parseDefaultAuthType(
export function AuthDialog(): React.JSX.Element {
const { pendingAuthType, authError } = useUIState();
const { handleAuthSelect: onAuthSelect } = useUIActions();
const settings = useSettings();
const config = useConfig();
const [errorMessage, setErrorMessage] = useState<string | null>(null);
const [selectedIndex, setSelectedIndex] = useState<number | null>(null);
@@ -58,9 +57,10 @@ export function AuthDialog(): React.JSX.Element {
return item.value === pendingAuthType;
}
// Priority 2: settings.merged.security?.auth?.selectedType
if (settings.merged.security?.auth?.selectedType) {
return item.value === settings.merged.security?.auth?.selectedType;
// Priority 2: config.getAuthType() - the source of truth
const currentAuthType = config.getAuthType();
if (currentAuthType) {
return item.value === currentAuthType;
}
// Priority 3: QWEN_DEFAULT_AUTH_TYPE env var
@@ -76,7 +76,7 @@ export function AuthDialog(): React.JSX.Element {
}),
);
const hasApiKey = Boolean(settings.merged.security?.auth?.apiKey);
const hasApiKey = Boolean(config.getContentGeneratorConfig()?.apiKey);
const currentSelectedAuthType =
selectedIndex !== null
? items[selectedIndex]?.value
@@ -84,7 +84,7 @@ export function AuthDialog(): React.JSX.Element {
const handleAuthSelect = async (authMethod: AuthType) => {
setErrorMessage(null);
await onAuthSelect(authMethod, SettingScope.User);
await onAuthSelect(authMethod);
};
const handleHighlight = (authMethod: AuthType) => {
@@ -100,7 +100,7 @@ export function AuthDialog(): React.JSX.Element {
if (errorMessage) {
return;
}
if (settings.merged.security?.auth?.selectedType === undefined) {
if (config.getAuthType() === undefined) {
// Prevent exiting if no auth method is set
setErrorMessage(
t(
@@ -109,7 +109,7 @@ export function AuthDialog(): React.JSX.Element {
);
return;
}
onAuthSelect(undefined, SettingScope.User);
onAuthSelect(undefined);
}
},
{ isActive: true },

View File

@@ -4,16 +4,16 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config } from '@qwen-code/qwen-code-core';
import type { Config, ModelProvidersConfig } from '@qwen-code/qwen-code-core';
import {
AuthEvent,
AuthType,
clearCachedCredentialFile,
getErrorMessage,
logAuth,
} from '@qwen-code/qwen-code-core';
import { useCallback, useEffect, useState } from 'react';
import type { LoadedSettings, SettingScope } from '../../config/settings.js';
import type { LoadedSettings } from '../../config/settings.js';
import { getPersistScopeForModelSelection } from '../../config/modelProvidersScope.js';
import type { OpenAICredentials } from '../components/OpenAIKeyPrompt.js';
import { useQwenAuth } from '../hooks/useQwenAuth.js';
import { AuthState, MessageType } from '../types.js';
@@ -27,8 +27,7 @@ export const useAuthCommand = (
config: Config,
addItem: (item: Omit<HistoryItem, 'id'>, timestamp: number) => void,
) => {
const unAuthenticated =
settings.merged.security?.auth?.selectedType === undefined;
const unAuthenticated = config.getAuthType() === undefined;
const [authState, setAuthState] = useState<AuthState>(
unAuthenticated ? AuthState.Updating : AuthState.Unauthenticated,
@@ -81,35 +80,35 @@ export const useAuthCommand = (
);
const handleAuthSuccess = useCallback(
async (
authType: AuthType,
scope: SettingScope,
credentials?: OpenAICredentials,
) => {
async (authType: AuthType, credentials?: OpenAICredentials) => {
try {
settings.setValue(scope, 'security.auth.selectedType', authType);
const authTypeScope = getPersistScopeForModelSelection(settings);
settings.setValue(
authTypeScope,
'security.auth.selectedType',
authType,
);
// Only update credentials if not switching to QWEN_OAUTH,
// so that OpenAI credentials are preserved when switching to QWEN_OAUTH.
if (authType !== AuthType.QWEN_OAUTH && credentials) {
if (credentials?.apiKey != null) {
settings.setValue(
scope,
authTypeScope,
'security.auth.apiKey',
credentials.apiKey,
);
}
if (credentials?.baseUrl != null) {
settings.setValue(
scope,
authTypeScope,
'security.auth.baseUrl',
credentials.baseUrl,
);
}
if (credentials?.model != null) {
settings.setValue(scope, 'model.name', credentials.model);
settings.setValue(authTypeScope, 'model.name', credentials.model);
}
await clearCachedCredentialFile();
}
} catch (error) {
handleAuthFailure(error);
@@ -141,14 +140,10 @@ export const useAuthCommand = (
);
const performAuth = useCallback(
async (
authType: AuthType,
scope: SettingScope,
credentials?: OpenAICredentials,
) => {
async (authType: AuthType, credentials?: OpenAICredentials) => {
try {
await config.refreshAuth(authType);
handleAuthSuccess(authType, scope, credentials);
handleAuthSuccess(authType, credentials);
} catch (e) {
handleAuthFailure(e);
}
@@ -156,18 +151,51 @@ export const useAuthCommand = (
[config, handleAuthSuccess, handleAuthFailure],
);
const isProviderManagedModel = useCallback(
(authType: AuthType, modelId: string | undefined) => {
if (!modelId) {
return false;
}
const modelProviders = settings.merged.modelProviders as
| ModelProvidersConfig
| undefined;
if (!modelProviders) {
return false;
}
const providerModels = modelProviders[authType];
if (!Array.isArray(providerModels)) {
return false;
}
return providerModels.some(
(providerModel) => providerModel.id === modelId,
);
},
[settings],
);
const handleAuthSelect = useCallback(
async (
authType: AuthType | undefined,
scope: SettingScope,
credentials?: OpenAICredentials,
) => {
async (authType: AuthType | undefined, credentials?: OpenAICredentials) => {
if (!authType) {
setIsAuthDialogOpen(false);
setAuthError(null);
return;
}
if (
authType === AuthType.USE_OPENAI &&
credentials?.model &&
isProviderManagedModel(authType, credentials.model)
) {
onAuthError(
t(
'Model "{{modelName}}" is managed via settings.modelProviders. Please complete the fields in settings, or use another model id.',
{ modelName: credentials.model },
),
);
return;
}
setPendingAuthType(authType);
setAuthError(null);
setIsAuthDialogOpen(false);
@@ -180,14 +208,14 @@ export const useAuthCommand = (
baseUrl: credentials.baseUrl,
model: credentials.model,
});
await performAuth(authType, scope, credentials);
await performAuth(authType, credentials);
}
return;
}
await performAuth(authType, scope);
await performAuth(authType);
},
[config, performAuth],
[config, performAuth, isProviderManagedModel, onAuthError],
);
const openAuthDialog = useCallback(() => {
@@ -225,16 +253,26 @@ export const useAuthCommand = (
const defaultAuthType = process.env['QWEN_DEFAULT_AUTH_TYPE'];
if (
defaultAuthType &&
![AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].includes(
defaultAuthType as AuthType,
)
![
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].includes(defaultAuthType as AuthType)
) {
onAuthError(
t(
'Invalid QWEN_DEFAULT_AUTH_TYPE value: "{{value}}". Valid values are: {{validValues}}',
{
value: defaultAuthType,
validValues: [AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].join(', '),
validValues: [
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].join(', '),
},
),
);

View File

@@ -4,31 +4,28 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { approvalModeCommand } from './approvalModeCommand.js';
import {
type CommandContext,
CommandKind,
type OpenDialogActionReturn,
type MessageActionReturn,
} from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import type { LoadedSettings } from '../../config/settings.js';
describe('approvalModeCommand', () => {
let mockContext: CommandContext;
let mockSetApprovalMode: ReturnType<typeof vi.fn>;
beforeEach(() => {
mockSetApprovalMode = vi.fn();
mockContext = createMockCommandContext({
services: {
config: {
getApprovalMode: () => 'default',
setApprovalMode: () => {},
setApprovalMode: mockSetApprovalMode,
},
settings: {
merged: {},
setValue: () => {},
forScope: () => ({}),
} as unknown as LoadedSettings,
},
});
});
@@ -41,7 +38,7 @@ describe('approvalModeCommand', () => {
expect(approvalModeCommand.kind).toBe(CommandKind.BUILT_IN);
});
it('should open approval mode dialog when invoked', async () => {
it('should open approval mode dialog when invoked without arguments', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'',
@@ -51,16 +48,123 @@ describe('approvalModeCommand', () => {
expect(result.dialog).toBe('approval-mode');
});
it('should open approval mode dialog with arguments (ignored)', async () => {
it('should open approval mode dialog when invoked with whitespace only', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'some arguments',
' ',
)) as OpenDialogActionReturn;
expect(result.type).toBe('dialog');
expect(result.dialog).toBe('approval-mode');
});
describe('direct mode setting (session-only)', () => {
it('should set approval mode to "plan" when argument is "plan"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'plan',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('plan');
expect(mockSetApprovalMode).toHaveBeenCalledWith('plan');
});
it('should set approval mode to "yolo" when argument is "yolo"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'yolo',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('yolo');
expect(mockSetApprovalMode).toHaveBeenCalledWith('yolo');
});
it('should set approval mode to "auto-edit" when argument is "auto-edit"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'auto-edit',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('auto-edit');
expect(mockSetApprovalMode).toHaveBeenCalledWith('auto-edit');
});
it('should set approval mode to "default" when argument is "default"', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'default',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(result.content).toContain('default');
expect(mockSetApprovalMode).toHaveBeenCalledWith('default');
});
it('should be case-insensitive for mode argument', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'YOLO',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(mockSetApprovalMode).toHaveBeenCalledWith('yolo');
});
it('should handle argument with leading/trailing whitespace', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
' plan ',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('info');
expect(mockSetApprovalMode).toHaveBeenCalledWith('plan');
});
});
describe('invalid mode argument', () => {
it('should return error for invalid mode', async () => {
const result = (await approvalModeCommand.action?.(
mockContext,
'invalid-mode',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('error');
expect(result.content).toContain('invalid-mode');
expect(result.content).toContain('plan');
expect(result.content).toContain('yolo');
expect(mockSetApprovalMode).not.toHaveBeenCalled();
});
});
describe('untrusted folder handling', () => {
it('should return error when setApprovalMode throws (e.g., untrusted folder)', async () => {
const errorMessage =
'Cannot enable privileged approval modes in an untrusted folder.';
mockSetApprovalMode.mockImplementation(() => {
throw new Error(errorMessage);
});
const result = (await approvalModeCommand.action?.(
mockContext,
'yolo',
)) as MessageActionReturn;
expect(result.type).toBe('message');
expect(result.messageType).toBe('error');
expect(result.content).toBe(errorMessage);
});
});
it('should not have subcommands', () => {
expect(approvalModeCommand.subCommands).toBeUndefined();
});

View File

@@ -8,9 +8,25 @@ import type {
SlashCommand,
CommandContext,
OpenDialogActionReturn,
MessageActionReturn,
} from './types.js';
import { CommandKind } from './types.js';
import { t } from '../../i18n/index.js';
import type { ApprovalMode } from '@qwen-code/qwen-code-core';
import { APPROVAL_MODES } from '@qwen-code/qwen-code-core';
/**
* Parses the argument string and returns the corresponding ApprovalMode if valid.
* Returns undefined if the argument is empty or not a valid mode.
*/
function parseApprovalModeArg(arg: string): ApprovalMode | undefined {
const trimmed = arg.trim().toLowerCase();
if (!trimmed) {
return undefined;
}
// Match against valid approval modes (case-insensitive)
return APPROVAL_MODES.find((mode) => mode.toLowerCase() === trimmed);
}
export const approvalModeCommand: SlashCommand = {
name: 'approval-mode',
@@ -19,10 +35,49 @@ export const approvalModeCommand: SlashCommand = {
},
kind: CommandKind.BUILT_IN,
action: async (
_context: CommandContext,
_args: string,
): Promise<OpenDialogActionReturn> => ({
type: 'dialog',
dialog: 'approval-mode',
}),
context: CommandContext,
args: string,
): Promise<OpenDialogActionReturn | MessageActionReturn> => {
const mode = parseApprovalModeArg(args);
// If no argument provided, open the dialog
if (!args.trim()) {
return {
type: 'dialog',
dialog: 'approval-mode',
};
}
// If invalid argument, return error message with valid options
if (!mode) {
return {
type: 'message',
messageType: 'error',
content: t('Invalid approval mode "{{arg}}". Valid modes: {{modes}}', {
arg: args.trim(),
modes: APPROVAL_MODES.join(', '),
}),
};
}
// Set the mode for current session only (not persisted)
const { config } = context.services;
if (config) {
try {
config.setApprovalMode(mode);
} catch (e) {
return {
type: 'message',
messageType: 'error',
content: (e as Error).message,
};
}
}
return {
type: 'message',
messageType: 'info',
content: t('Approval mode set to "{{mode}}"', { mode }),
};
},
};

View File

@@ -19,7 +19,9 @@ export const compressCommand: SlashCommand = {
kind: CommandKind.BUILT_IN,
action: async (context) => {
const { ui } = context;
if (ui.pendingItem) {
const executionMode = context.executionMode ?? 'interactive';
if (executionMode === 'interactive' && ui.pendingItem) {
ui.addItem(
{
type: MessageType.ERROR,
@@ -40,13 +42,80 @@ export const compressCommand: SlashCommand = {
},
};
try {
ui.setPendingItem(pendingMessage);
const config = context.services.config;
const geminiClient = config?.getGeminiClient();
if (!config || !geminiClient) {
return {
type: 'message',
messageType: 'error',
content: t('Config not loaded.'),
};
}
const doCompress = async () => {
const promptId = `compress-${Date.now()}`;
const compressed = await context.services.config
?.getGeminiClient()
?.tryCompressChat(promptId, true);
if (compressed) {
return await geminiClient.tryCompressChat(promptId, true);
};
if (executionMode === 'acp') {
const messages = async function* () {
try {
yield {
messageType: 'info' as const,
content: 'Compressing context...',
};
const compressed = await doCompress();
if (!compressed) {
yield {
messageType: 'error' as const,
content: t('Failed to compress chat history.'),
};
return;
}
yield {
messageType: 'info' as const,
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
};
} catch (e) {
yield {
messageType: 'error' as const,
content: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
};
}
};
return { type: 'stream_messages', messages: messages() };
}
try {
if (executionMode === 'interactive') {
ui.setPendingItem(pendingMessage);
}
const compressed = await doCompress();
if (!compressed) {
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history.'),
},
Date.now(),
);
return;
}
return {
type: 'message',
messageType: 'error',
content: t('Failed to compress chat history.'),
};
}
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.COMPRESSION,
@@ -59,27 +128,39 @@ export const compressCommand: SlashCommand = {
} as HistoryItemCompression,
Date.now(),
);
} else {
return;
}
return {
type: 'message',
messageType: 'info',
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
};
} catch (e) {
if (executionMode === 'interactive') {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history.'),
text: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
},
Date.now(),
);
return;
}
} catch (e) {
ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
},
Date.now(),
);
return {
type: 'message',
messageType: 'error',
content: t('Failed to compress chat history: {{error}}', {
error: e instanceof Error ? e.message : String(e),
}),
};
} finally {
ui.setPendingItem(null);
if (executionMode === 'interactive') {
ui.setPendingItem(null);
}
}
},
};

View File

@@ -15,7 +15,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original = await importOriginal<typeof core>();
return {
...original,
getOauthClient: vi.fn(original.getOauthClient),
getIdeInstaller: vi.fn(original.getIdeInstaller),
IdeClient: {
getInstance: vi.fn(),

View File

@@ -191,11 +191,23 @@ export const ideCommand = async (): Promise<SlashCommand> => {
kind: CommandKind.BUILT_IN,
action: async (context) => {
const installer = getIdeInstaller(currentIDE);
const isSandBox = !!process.env['SANDBOX'];
if (isSandBox) {
context.ui.addItem(
{
type: 'info',
text: `IDE integration needs to be installed on the host. If you have already installed it, you can directly connect the ide`,
},
Date.now(),
);
return;
}
if (!installer) {
const ideName = ideClient.getDetectedIdeDisplayName();
context.ui.addItem(
{
type: 'error',
text: `No installer is available for ${ideClient.getDetectedIdeDisplayName()}. Please install the '${QWEN_CODE_COMPANION_EXTENSION_NAME}' extension manually from the marketplace.`,
text: `Automatic installation is not supported for ${ideName}. Please install the '${QWEN_CODE_COMPANION_EXTENSION_NAME}' extension manually from the marketplace.`,
},
Date.now(),
);

View File

@@ -11,9 +11,14 @@ import type { SlashCommand, type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { MessageType } from '../types.js';
import type { LoadedSettings } from '../../config/settings.js';
import { readFile } from 'node:fs/promises';
import os from 'node:os';
import path from 'node:path';
import {
getErrorMessage,
loadServerHierarchicalMemory,
QWEN_DIR,
setGeminiMdFilename,
type FileDiscoveryService,
type LoadServerHierarchicalMemoryResponse,
} from '@qwen-code/qwen-code-core';
@@ -31,7 +36,18 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
};
});
vi.mock('node:fs/promises', () => {
const readFile = vi.fn();
return {
readFile,
default: {
readFile,
},
};
});
const mockLoadServerHierarchicalMemory = loadServerHierarchicalMemory as Mock;
const mockReadFile = readFile as unknown as Mock;
describe('memoryCommand', () => {
let mockContext: CommandContext;
@@ -52,6 +68,10 @@ describe('memoryCommand', () => {
let mockGetGeminiMdFileCount: Mock;
beforeEach(() => {
setGeminiMdFilename('QWEN.md');
mockReadFile.mockReset();
vi.restoreAllMocks();
showCommand = getSubCommand('show');
mockGetUserMemory = vi.fn();
@@ -102,6 +122,52 @@ describe('memoryCommand', () => {
expect.any(Number),
);
});
it('should show project memory from the configured context file', async () => {
const projectCommand = showCommand.subCommands?.find(
(cmd) => cmd.name === '--project',
);
if (!projectCommand?.action) throw new Error('Command has no action');
setGeminiMdFilename('AGENTS.md');
vi.spyOn(process, 'cwd').mockReturnValue('/test/project');
mockReadFile.mockResolvedValue('project memory');
await projectCommand.action(mockContext, '');
const expectedProjectPath = path.join('/test/project', 'AGENTS.md');
expect(mockReadFile).toHaveBeenCalledWith(expectedProjectPath, 'utf-8');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: expect.stringContaining(expectedProjectPath),
},
expect.any(Number),
);
});
it('should show global memory from the configured context file', async () => {
const globalCommand = showCommand.subCommands?.find(
(cmd) => cmd.name === '--global',
);
if (!globalCommand?.action) throw new Error('Command has no action');
setGeminiMdFilename('AGENTS.md');
vi.spyOn(os, 'homedir').mockReturnValue('/home/user');
mockReadFile.mockResolvedValue('global memory');
await globalCommand.action(mockContext, '');
const expectedGlobalPath = path.join('/home/user', QWEN_DIR, 'AGENTS.md');
expect(mockReadFile).toHaveBeenCalledWith(expectedGlobalPath, 'utf-8');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: expect.stringContaining('Global memory content'),
},
expect.any(Number),
);
});
});
describe('/memory add', () => {

View File

@@ -6,12 +6,13 @@
import {
getErrorMessage,
getCurrentGeminiMdFilename,
loadServerHierarchicalMemory,
QWEN_DIR,
} from '@qwen-code/qwen-code-core';
import path from 'node:path';
import os from 'os';
import fs from 'fs/promises';
import os from 'node:os';
import fs from 'node:fs/promises';
import { MessageType } from '../types.js';
import type { SlashCommand, SlashCommandActionReturn } from './types.js';
import { CommandKind } from './types.js';
@@ -56,7 +57,12 @@ export const memoryCommand: SlashCommand = {
kind: CommandKind.BUILT_IN,
action: async (context) => {
try {
const projectMemoryPath = path.join(process.cwd(), 'QWEN.md');
const workingDir =
context.services.config?.getWorkingDir?.() ?? process.cwd();
const projectMemoryPath = path.join(
workingDir,
getCurrentGeminiMdFilename(),
);
const memoryContent = await fs.readFile(
projectMemoryPath,
'utf-8',
@@ -104,7 +110,7 @@ export const memoryCommand: SlashCommand = {
const globalMemoryPath = path.join(
os.homedir(),
QWEN_DIR,
'QWEN.md',
getCurrentGeminiMdFilename(),
);
const globalMemoryContent = await fs.readFile(
globalMemoryPath,

View File

@@ -13,12 +13,6 @@ import {
type ContentGeneratorConfig,
type Config,
} from '@qwen-code/qwen-code-core';
import * as availableModelsModule from '../models/availableModels.js';
// Mock the availableModels module
vi.mock('../models/availableModels.js', () => ({
getAvailableModelsForAuthType: vi.fn(),
}));
// Helper function to create a mock config
function createMockConfig(
@@ -31,9 +25,6 @@ function createMockConfig(
describe('modelCommand', () => {
let mockContext: CommandContext;
const mockGetAvailableModelsForAuthType = vi.mocked(
availableModelsModule.getAvailableModelsForAuthType,
);
beforeEach(() => {
mockContext = createMockCommandContext();
@@ -87,10 +78,6 @@ describe('modelCommand', () => {
});
it('should return dialog action for QWEN_OAUTH auth type', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([
{ id: 'qwen3-coder-plus', label: 'qwen3-coder-plus' },
]);
const mockConfig = createMockConfig({
model: 'test-model',
authType: AuthType.QWEN_OAUTH,
@@ -105,11 +92,7 @@ describe('modelCommand', () => {
});
});
it('should return dialog action for USE_OPENAI auth type when model is available', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([
{ id: 'gpt-4', label: 'gpt-4' },
]);
it('should return dialog action for USE_OPENAI auth type', async () => {
const mockConfig = createMockConfig({
model: 'test-model',
authType: AuthType.USE_OPENAI,
@@ -124,28 +107,7 @@ describe('modelCommand', () => {
});
});
it('should return error for USE_OPENAI auth type when no model is available', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([]);
const mockConfig = createMockConfig({
model: 'test-model',
authType: AuthType.USE_OPENAI,
});
mockContext.services.config = mockConfig as Config;
const result = await modelCommand.action!(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content:
'No models available for the current authentication type (openai).',
});
});
it('should return error for unsupported auth types', async () => {
mockGetAvailableModelsForAuthType.mockReturnValue([]);
it('should return dialog action for unsupported auth types', async () => {
const mockConfig = createMockConfig({
model: 'test-model',
authType: 'UNSUPPORTED_AUTH_TYPE' as AuthType,
@@ -155,10 +117,8 @@ describe('modelCommand', () => {
const result = await modelCommand.action!(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content:
'No models available for the current authentication type (UNSUPPORTED_AUTH_TYPE).',
type: 'dialog',
dialog: 'model',
});
});

View File

@@ -11,7 +11,6 @@ import type {
MessageActionReturn,
} from './types.js';
import { CommandKind } from './types.js';
import { getAvailableModelsForAuthType } from '../models/availableModels.js';
import { t } from '../../i18n/index.js';
export const modelCommand: SlashCommand = {
@@ -30,7 +29,7 @@ export const modelCommand: SlashCommand = {
return {
type: 'message',
messageType: 'error',
content: 'Configuration not available.',
content: t('Configuration not available.'),
};
}
@@ -52,22 +51,6 @@ export const modelCommand: SlashCommand = {
};
}
const availableModels = getAvailableModelsForAuthType(authType);
if (availableModels.length === 0) {
return {
type: 'message',
messageType: 'error',
content: t(
'No models available for the current authentication type ({{authType}}).',
{
authType,
},
),
};
}
// Trigger model selection dialog
return {
type: 'dialog',
dialog: 'model',

View File

@@ -26,6 +26,8 @@ export const summaryCommand: SlashCommand = {
action: async (context): Promise<SlashCommandActionReturn> => {
const { config } = context.services;
const { ui } = context;
const executionMode = context.executionMode ?? 'interactive';
if (!config) {
return {
type: 'message',
@@ -43,8 +45,8 @@ export const summaryCommand: SlashCommand = {
};
}
// Check if already generating summary
if (ui.pendingItem) {
// Check if already generating summary (interactive UI only)
if (executionMode === 'interactive' && ui.pendingItem) {
ui.addItem(
{
type: 'error' as const,
@@ -63,29 +65,22 @@ export const summaryCommand: SlashCommand = {
};
}
try {
// Get the current chat history
const getChatHistory = () => {
const chat = geminiClient.getChat();
const history = chat.getHistory();
return chat.getHistory();
};
const validateChatHistory = (
history: ReturnType<typeof getChatHistory>,
) => {
if (history.length <= 2) {
return {
type: 'message',
messageType: 'info',
content: t('No conversation found to summarize.'),
};
throw new Error(t('No conversation found to summarize.'));
}
};
// Show loading state
const pendingMessage: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: true,
stage: 'generating',
},
};
ui.setPendingItem(pendingMessage);
const generateSummaryMarkdown = async (
history: ReturnType<typeof getChatHistory>,
): Promise<string> => {
// Build the conversation context for summary generation
const conversationContext = history.map((message) => ({
role: message.role,
@@ -121,19 +116,21 @@ export const summaryCommand: SlashCommand = {
if (!markdownSummary) {
throw new Error(
'Failed to generate summary - no text content received from LLM response',
t(
'Failed to generate summary - no text content received from LLM response',
),
);
}
// Update loading message to show saving progress
ui.setPendingItem({
type: 'summary',
summary: {
isPending: true,
stage: 'saving',
},
});
return markdownSummary;
};
const saveSummaryToDisk = async (
markdownSummary: string,
): Promise<{
filePathForDisplay: string;
fullPath: string;
}> => {
// Ensure .qwen directory exists
const projectRoot = config.getProjectRoot();
const qwenDir = path.join(projectRoot, '.qwen');
@@ -155,45 +152,163 @@ export const summaryCommand: SlashCommand = {
await fsPromises.writeFile(summaryPath, summaryContent, 'utf8');
// Clear pending item and show success message
return {
filePathForDisplay: '.qwen/PROJECT_SUMMARY.md',
fullPath: summaryPath,
};
};
const emitInteractivePending = (stage: 'generating' | 'saving') => {
if (executionMode !== 'interactive') {
return;
}
const pendingMessage: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: true,
stage,
},
};
ui.setPendingItem(pendingMessage);
};
const completeInteractive = (filePathForDisplay: string) => {
if (executionMode !== 'interactive') {
return;
}
ui.setPendingItem(null);
const completedSummaryItem: HistoryItemSummary = {
type: 'summary',
summary: {
isPending: false,
stage: 'completed',
filePath: '.qwen/PROJECT_SUMMARY.md',
filePath: filePathForDisplay,
},
};
ui.addItem(completedSummaryItem, Date.now());
};
return {
type: 'message',
messageType: 'info',
content: '', // Empty content since we show the message in UI component
};
} catch (error) {
// Clear pending item on error
const formatErrorMessage = (error: unknown): string =>
t('Failed to generate project context summary: {{error}}', {
error: error instanceof Error ? error.message : String(error),
});
const failInteractive = (error: unknown) => {
if (executionMode !== 'interactive') {
return;
}
ui.setPendingItem(null);
ui.addItem(
{
type: 'error' as const,
text: `${t(
'Failed to generate project context summary: {{error}}',
{
error: error instanceof Error ? error.message : String(error),
},
)}`,
text: `${formatErrorMessage(error)}`,
},
Date.now(),
);
};
const formatSuccessMessage = (filePathForDisplay: string): string =>
t('Saved project summary to {{filePathForDisplay}}.', {
filePathForDisplay,
});
const returnNoConversationMessage = (): SlashCommandActionReturn => {
const msg = t('No conversation found to summarize.');
if (executionMode === 'acp') {
const messages = async function* () {
yield {
messageType: 'info' as const,
content: msg,
};
};
return {
type: 'stream_messages',
messages: messages(),
};
}
return {
type: 'message',
messageType: 'info',
content: msg,
};
};
const executeSummaryGeneration = async (
history: ReturnType<typeof getChatHistory>,
): Promise<{
markdownSummary: string;
filePathForDisplay: string;
}> => {
emitInteractivePending('generating');
const markdownSummary = await generateSummaryMarkdown(history);
emitInteractivePending('saving');
const { filePathForDisplay } = await saveSummaryToDisk(markdownSummary);
completeInteractive(filePathForDisplay);
return { markdownSummary, filePathForDisplay };
};
// Validate chat history once at the beginning
const history = getChatHistory();
try {
validateChatHistory(history);
} catch (_error) {
return returnNoConversationMessage();
}
if (executionMode === 'acp') {
const messages = async function* () {
try {
yield {
messageType: 'info' as const,
content: t('Generating project summary...'),
};
const { filePathForDisplay } =
await executeSummaryGeneration(history);
yield {
messageType: 'info' as const,
content: formatSuccessMessage(filePathForDisplay),
};
} catch (error) {
failInteractive(error);
yield {
messageType: 'error' as const,
content: formatErrorMessage(error),
};
}
};
return {
type: 'stream_messages',
messages: messages(),
};
}
try {
const { filePathForDisplay } = await executeSummaryGeneration(history);
if (executionMode === 'non_interactive') {
return {
type: 'message',
messageType: 'info',
content: formatSuccessMessage(filePathForDisplay),
};
}
// Interactive mode: UI components already display progress and completion.
return {
type: 'message',
messageType: 'info',
content: '',
};
} catch (error) {
failInteractive(error);
return {
type: 'message',
messageType: 'error',
content: t('Failed to generate project context summary: {{error}}', {
error: error instanceof Error ? error.message : String(error),
}),
content: formatErrorMessage(error),
};
}
},

View File

@@ -22,6 +22,14 @@ import type {
// Grouped dependencies for clarity and easier mocking
export interface CommandContext {
/**
* Execution mode for the current invocation.
*
* - interactive: React/Ink UI mode
* - non_interactive: non-interactive CLI mode (text/json)
* - acp: ACP/Zed integration mode
*/
executionMode?: 'interactive' | 'non_interactive' | 'acp';
// Invocation properties for when commands are called.
invocation?: {
/** The raw, untrimmed input string from the user. */
@@ -108,6 +116,19 @@ export interface MessageActionReturn {
content: string;
}
/**
* The return type for a command action that streams multiple messages.
* Used for long-running operations that need to send progress updates.
*/
export interface StreamMessagesActionReturn {
type: 'stream_messages';
messages: AsyncGenerator<
{ messageType: 'info' | 'error'; content: string },
void,
unknown
>;
}
/**
* The return type for a command action that needs to open a dialog.
*/
@@ -174,6 +195,7 @@ export interface ConfirmActionReturn {
export type SlashCommandActionReturn =
| ToolActionReturn
| MessageActionReturn
| StreamMessagesActionReturn
| QuitActionReturn
| OpenDialogActionReturn
| LoadHistoryActionReturn

View File

@@ -54,7 +54,7 @@ export function ApprovalModeDialog({
}: ApprovalModeDialogProps): React.JSX.Element {
// Start with User scope by default
const [selectedScope, setSelectedScope] = useState<SettingScope>(
SettingScope.User,
SettingScope.Workspace,
);
// Track the currently highlighted approval mode

View File

@@ -17,7 +17,6 @@ import { AuthDialog } from '../auth/AuthDialog.js';
import { OpenAIKeyPrompt } from './OpenAIKeyPrompt.js';
import { EditorSettingsDialog } from './EditorSettingsDialog.js';
import { WorkspaceMigrationDialog } from './WorkspaceMigrationDialog.js';
import { ProQuotaDialog } from './ProQuotaDialog.js';
import { PermissionsModifyTrustDialog } from './PermissionsModifyTrustDialog.js';
import { ModelDialog } from './ModelDialog.js';
import { ApprovalModeDialog } from './ApprovalModeDialog.js';
@@ -26,7 +25,6 @@ import { useUIState } from '../contexts/UIStateContext.js';
import { useUIActions } from '../contexts/UIActionsContext.js';
import { useConfig } from '../contexts/ConfigContext.js';
import { useSettings } from '../contexts/SettingsContext.js';
import { SettingScope } from '../../config/settings.js';
import { AuthState } from '../types.js';
import { AuthType } from '@qwen-code/qwen-code-core';
import process from 'node:process';
@@ -87,15 +85,6 @@ export const DialogManager = ({
/>
);
}
if (uiState.proQuotaRequest) {
return (
<ProQuotaDialog
failedModel={uiState.proQuotaRequest.failedModel}
fallbackModel={uiState.proQuotaRequest.fallbackModel}
onChoice={uiActions.handleProQuotaChoice}
/>
);
}
if (uiState.shouldShowIdePrompt) {
return (
<IdeIntegrationNudge
@@ -212,7 +201,7 @@ export const DialogManager = ({
return (
<OpenAIKeyPrompt
onSubmit={(apiKey, baseUrl, model) => {
uiActions.handleAuthSelect(AuthType.USE_OPENAI, SettingScope.User, {
uiActions.handleAuthSelect(AuthType.USE_OPENAI, {
apiKey,
baseUrl,
model,

View File

@@ -10,7 +10,11 @@ import { ModelDialog } from './ModelDialog.js';
import { useKeypress } from '../hooks/useKeypress.js';
import { DescriptiveRadioButtonSelect } from './shared/DescriptiveRadioButtonSelect.js';
import { ConfigContext } from '../contexts/ConfigContext.js';
import { SettingsContext } from '../contexts/SettingsContext.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { AuthType } from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from '../../config/settings.js';
import { SettingScope } from '../../config/settings.js';
import {
AVAILABLE_MODELS_QWEN,
MAINLINE_CODER,
@@ -36,18 +40,29 @@ const renderComponent = (
};
const combinedProps = { ...defaultProps, ...props };
const mockSettings = {
isTrusted: true,
user: { settings: {} },
workspace: { settings: {} },
setValue: vi.fn(),
} as unknown as LoadedSettings;
const mockConfig = contextValue
? ({
// --- Functions used by ModelDialog ---
getModel: vi.fn(() => MAINLINE_CODER),
setModel: vi.fn(),
setModel: vi.fn().mockResolvedValue(undefined),
switchModel: vi.fn().mockResolvedValue(undefined),
getAuthType: vi.fn(() => 'qwen-oauth'),
// --- Functions used by ClearcutLogger ---
getUsageStatisticsEnabled: vi.fn(() => true),
getSessionId: vi.fn(() => 'mock-session-id'),
getDebugMode: vi.fn(() => false),
getContentGeneratorConfig: vi.fn(() => ({ authType: 'mock' })),
getContentGeneratorConfig: vi.fn(() => ({
authType: AuthType.QWEN_OAUTH,
model: MAINLINE_CODER,
})),
getUseSmartEdit: vi.fn(() => false),
getUseModelRouter: vi.fn(() => false),
getProxy: vi.fn(() => undefined),
@@ -58,21 +73,27 @@ const renderComponent = (
: undefined;
const renderResult = render(
<ConfigContext.Provider value={mockConfig}>
<ModelDialog {...combinedProps} />
</ConfigContext.Provider>,
<SettingsContext.Provider value={mockSettings}>
<ConfigContext.Provider value={mockConfig}>
<ModelDialog {...combinedProps} />
</ConfigContext.Provider>
</SettingsContext.Provider>,
);
return {
...renderResult,
props: combinedProps,
mockConfig,
mockSettings,
};
};
describe('<ModelDialog />', () => {
beforeEach(() => {
vi.clearAllMocks();
// Ensure env-based fallback models don't leak into this suite from the developer environment.
delete process.env['OPENAI_MODEL'];
delete process.env['ANTHROPIC_MODEL'];
});
afterEach(() => {
@@ -91,8 +112,12 @@ describe('<ModelDialog />', () => {
const props = mockedSelect.mock.calls[0][0];
expect(props.items).toHaveLength(AVAILABLE_MODELS_QWEN.length);
expect(props.items[0].value).toBe(MAINLINE_CODER);
expect(props.items[1].value).toBe(MAINLINE_VLM);
expect(props.items[0].value).toBe(
`${AuthType.QWEN_OAUTH}::${MAINLINE_CODER}`,
);
expect(props.items[1].value).toBe(
`${AuthType.QWEN_OAUTH}::${MAINLINE_VLM}`,
);
expect(props.showNumbers).toBe(true);
});
@@ -139,16 +164,93 @@ describe('<ModelDialog />', () => {
expect(mockedSelect).toHaveBeenCalledTimes(1);
});
it('calls config.setModel and onClose when DescriptiveRadioButtonSelect.onSelect is triggered', () => {
const { props, mockConfig } = renderComponent({}, {}); // Pass empty object for contextValue
it('calls config.switchModel and onClose when DescriptiveRadioButtonSelect.onSelect is triggered', async () => {
const { props, mockConfig, mockSettings } = renderComponent({}, {}); // Pass empty object for contextValue
const childOnSelect = mockedSelect.mock.calls[0][0].onSelect;
expect(childOnSelect).toBeDefined();
childOnSelect(MAINLINE_CODER);
await childOnSelect(`${AuthType.QWEN_OAUTH}::${MAINLINE_CODER}`);
// Assert against the default mock provided by renderComponent
expect(mockConfig?.setModel).toHaveBeenCalledWith(MAINLINE_CODER);
expect(mockConfig?.switchModel).toHaveBeenCalledWith(
AuthType.QWEN_OAUTH,
MAINLINE_CODER,
undefined,
{
reason: 'user_manual',
context: 'Model switched via /model dialog',
},
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'model.name',
MAINLINE_CODER,
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'security.auth.selectedType',
AuthType.QWEN_OAUTH,
);
expect(props.onClose).toHaveBeenCalledTimes(1);
});
it('calls config.switchModel and persists authType+model when selecting a different authType', async () => {
const switchModel = vi.fn().mockResolvedValue(undefined);
const getAuthType = vi.fn(() => AuthType.USE_OPENAI);
const getAvailableModelsForAuthType = vi.fn((t: AuthType) => {
if (t === AuthType.USE_OPENAI) {
return [{ id: 'gpt-4', label: 'GPT-4', authType: t }];
}
if (t === AuthType.QWEN_OAUTH) {
return AVAILABLE_MODELS_QWEN.map((m) => ({
id: m.id,
label: m.label,
authType: AuthType.QWEN_OAUTH,
}));
}
return [];
});
const mockConfigWithSwitchAuthType = {
getAuthType,
getModel: vi.fn(() => 'gpt-4'),
getContentGeneratorConfig: vi.fn(() => ({
authType: AuthType.QWEN_OAUTH,
model: MAINLINE_CODER,
})),
// Add switchModel to the mock object (not the type)
switchModel,
getAvailableModelsForAuthType,
};
const { props, mockSettings } = renderComponent(
{},
// Cast to Config to bypass type checking, matching the runtime behavior
mockConfigWithSwitchAuthType as unknown as Partial<Config>,
);
const childOnSelect = mockedSelect.mock.calls[0][0].onSelect;
await childOnSelect(`${AuthType.QWEN_OAUTH}::${MAINLINE_CODER}`);
expect(switchModel).toHaveBeenCalledWith(
AuthType.QWEN_OAUTH,
MAINLINE_CODER,
{ requireCachedCredentials: true },
{
reason: 'user_manual',
context: 'AuthType+model switched via /model dialog',
},
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'model.name',
MAINLINE_CODER,
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
'security.auth.selectedType',
AuthType.QWEN_OAUTH,
);
expect(props.onClose).toHaveBeenCalledTimes(1);
});
@@ -193,17 +295,25 @@ describe('<ModelDialog />', () => {
it('updates initialIndex when config context changes', () => {
const mockGetModel = vi.fn(() => MAINLINE_CODER);
const mockGetAuthType = vi.fn(() => 'qwen-oauth');
const mockSettings = {
isTrusted: true,
user: { settings: {} },
workspace: { settings: {} },
setValue: vi.fn(),
} as unknown as LoadedSettings;
const { rerender } = render(
<ConfigContext.Provider
value={
{
getModel: mockGetModel,
getAuthType: mockGetAuthType,
} as unknown as Config
}
>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>,
<SettingsContext.Provider value={mockSettings}>
<ConfigContext.Provider
value={
{
getModel: mockGetModel,
getAuthType: mockGetAuthType,
} as unknown as Config
}
>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>
</SettingsContext.Provider>,
);
expect(mockedSelect.mock.calls[0][0].initialIndex).toBe(0);
@@ -215,9 +325,11 @@ describe('<ModelDialog />', () => {
} as unknown as Config;
rerender(
<ConfigContext.Provider value={newMockConfig}>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>,
<SettingsContext.Provider value={mockSettings}>
<ConfigContext.Provider value={newMockConfig}>
<ModelDialog onClose={vi.fn()} />
</ConfigContext.Provider>
</SettingsContext.Provider>,
);
// Should be called at least twice: initial render + re-render after context change

View File

@@ -5,52 +5,210 @@
*/
import type React from 'react';
import { useCallback, useContext, useMemo } from 'react';
import { useCallback, useContext, useMemo, useState } from 'react';
import { Box, Text } from 'ink';
import {
AuthType,
ModelSlashCommandEvent,
logModelSlashCommand,
type ContentGeneratorConfig,
type ContentGeneratorConfigSource,
type ContentGeneratorConfigSources,
} from '@qwen-code/qwen-code-core';
import { useKeypress } from '../hooks/useKeypress.js';
import { theme } from '../semantic-colors.js';
import { DescriptiveRadioButtonSelect } from './shared/DescriptiveRadioButtonSelect.js';
import { ConfigContext } from '../contexts/ConfigContext.js';
import { UIStateContext } from '../contexts/UIStateContext.js';
import { useSettings } from '../contexts/SettingsContext.js';
import {
getAvailableModelsForAuthType,
MAINLINE_CODER,
} from '../models/availableModels.js';
import { getPersistScopeForModelSelection } from '../../config/modelProvidersScope.js';
import { t } from '../../i18n/index.js';
interface ModelDialogProps {
onClose: () => void;
}
function formatSourceBadge(
source: ContentGeneratorConfigSource | undefined,
): string | undefined {
if (!source) return undefined;
switch (source.kind) {
case 'cli':
return source.detail ? `CLI ${source.detail}` : 'CLI';
case 'env':
return source.envKey ? `ENV ${source.envKey}` : 'ENV';
case 'settings':
return source.settingsPath
? `Settings ${source.settingsPath}`
: 'Settings';
case 'modelProviders': {
const suffix =
source.authType && source.modelId
? `${source.authType}:${source.modelId}`
: source.authType
? `${source.authType}`
: source.modelId
? `${source.modelId}`
: '';
return suffix ? `ModelProviders ${suffix}` : 'ModelProviders';
}
case 'default':
return source.detail ? `Default ${source.detail}` : 'Default';
case 'computed':
return source.detail ? `Computed ${source.detail}` : 'Computed';
case 'programmatic':
return source.detail ? `Programmatic ${source.detail}` : 'Programmatic';
case 'unknown':
default:
return undefined;
}
}
function readSourcesFromConfig(config: unknown): ContentGeneratorConfigSources {
if (!config) {
return {};
}
const maybe = config as {
getContentGeneratorConfigSources?: () => ContentGeneratorConfigSources;
};
return maybe.getContentGeneratorConfigSources?.() ?? {};
}
function maskApiKey(apiKey: string | undefined): string {
if (!apiKey) return '(not set)';
const trimmed = apiKey.trim();
if (trimmed.length === 0) return '(not set)';
if (trimmed.length <= 6) return '***';
const head = trimmed.slice(0, 3);
const tail = trimmed.slice(-4);
return `${head}${tail}`;
}
function persistModelSelection(
settings: ReturnType<typeof useSettings>,
modelId: string,
): void {
const scope = getPersistScopeForModelSelection(settings);
settings.setValue(scope, 'model.name', modelId);
}
function persistAuthTypeSelection(
settings: ReturnType<typeof useSettings>,
authType: AuthType,
): void {
const scope = getPersistScopeForModelSelection(settings);
settings.setValue(scope, 'security.auth.selectedType', authType);
}
function ConfigRow({
label,
value,
badge,
}: {
label: string;
value: React.ReactNode;
badge?: string;
}): React.JSX.Element {
return (
<Box flexDirection="column">
<Box>
<Box minWidth={12} flexShrink={0}>
<Text color={theme.text.secondary}>{label}:</Text>
</Box>
<Box flexGrow={1} flexDirection="row" flexWrap="wrap">
<Text>{value}</Text>
</Box>
</Box>
{badge ? (
<Box>
<Box minWidth={12} flexShrink={0}>
<Text> </Text>
</Box>
<Box flexGrow={1}>
<Text color={theme.text.secondary}>{badge}</Text>
</Box>
</Box>
) : null}
</Box>
);
}
export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
const config = useContext(ConfigContext);
const uiState = useContext(UIStateContext);
const settings = useSettings();
// Get auth type from config, default to QWEN_OAUTH if not available
const authType = config?.getAuthType() ?? AuthType.QWEN_OAUTH;
// Local error state for displaying errors within the dialog
const [errorMessage, setErrorMessage] = useState<string | null>(null);
// Get available models based on auth type
const availableModels = useMemo(
() => getAvailableModelsForAuthType(authType),
[authType],
);
const authType = config?.getAuthType();
const effectiveConfig =
(config?.getContentGeneratorConfig?.() as
| ContentGeneratorConfig
| undefined) ?? undefined;
const sources = readSourcesFromConfig(config);
const availableModelEntries = useMemo(() => {
const allAuthTypes = Object.values(AuthType) as AuthType[];
const modelsByAuthType = allAuthTypes
.map((t) => ({
authType: t,
models: getAvailableModelsForAuthType(t, config ?? undefined),
}))
.filter((x) => x.models.length > 0);
// Fixed order: qwen-oauth first, then others in a stable order
const authTypeOrder: AuthType[] = [
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
];
// Filter to only include authTypes that have models
const availableAuthTypes = new Set(modelsByAuthType.map((x) => x.authType));
const orderedAuthTypes = authTypeOrder.filter((t) =>
availableAuthTypes.has(t),
);
return orderedAuthTypes.flatMap((t) => {
const models =
modelsByAuthType.find((x) => x.authType === t)?.models ?? [];
return models.map((m) => ({ authType: t, model: m }));
});
}, [config]);
const MODEL_OPTIONS = useMemo(
() =>
availableModels.map((model) => ({
value: model.id,
title: model.label,
description: model.description || '',
key: model.id,
})),
[availableModels],
availableModelEntries.map(({ authType: t2, model }) => {
const value = `${t2}::${model.id}`;
const title = (
<Text>
<Text bold color={theme.text.accent}>
[{t2}]
</Text>
<Text>{` ${model.label}`}</Text>
</Text>
);
const description = model.description || '';
return {
value,
title,
description,
key: value,
};
}),
[availableModelEntries],
);
// Determine the Preferred Model (read once when the dialog opens).
const preferredModel = config?.getModel() || MAINLINE_CODER;
const preferredModelId = config?.getModel() || MAINLINE_CODER;
const preferredKey = authType ? `${authType}::${preferredModelId}` : '';
useKeypress(
(key) => {
@@ -61,25 +219,83 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
{ isActive: true },
);
// Calculate the initial index based on the preferred model.
const initialIndex = useMemo(
() => MODEL_OPTIONS.findIndex((option) => option.value === preferredModel),
[MODEL_OPTIONS, preferredModel],
);
const initialIndex = useMemo(() => {
const index = MODEL_OPTIONS.findIndex(
(option) => option.value === preferredKey,
);
return index === -1 ? 0 : index;
}, [MODEL_OPTIONS, preferredKey]);
// Handle selection internally (Autonomous Dialog).
const handleSelect = useCallback(
(model: string) => {
async (selected: string) => {
// Clear any previous error
setErrorMessage(null);
const sep = '::';
const idx = selected.indexOf(sep);
const selectedAuthType = (
idx >= 0 ? selected.slice(0, idx) : authType
) as AuthType;
const modelId = idx >= 0 ? selected.slice(idx + sep.length) : selected;
if (config) {
config.setModel(model);
const event = new ModelSlashCommandEvent(model);
try {
await config.switchModel(
selectedAuthType,
modelId,
selectedAuthType !== authType &&
selectedAuthType === AuthType.QWEN_OAUTH
? { requireCachedCredentials: true }
: undefined,
{
reason: 'user_manual',
context:
selectedAuthType === authType
? 'Model switched via /model dialog'
: 'AuthType+model switched via /model dialog',
},
);
} catch (e) {
const baseErrorMessage = e instanceof Error ? e.message : String(e);
setErrorMessage(
`Failed to switch model to '${modelId}'.\n\n${baseErrorMessage}`,
);
return;
}
const event = new ModelSlashCommandEvent(modelId);
logModelSlashCommand(config, event);
const after = config.getContentGeneratorConfig?.() as
| ContentGeneratorConfig
| undefined;
const effectiveAuthType =
after?.authType ?? selectedAuthType ?? authType;
const effectiveModelId = after?.model ?? modelId;
persistModelSelection(settings, effectiveModelId);
persistAuthTypeSelection(settings, effectiveAuthType);
const baseUrl = after?.baseUrl ?? '(default)';
const maskedKey = maskApiKey(after?.apiKey);
uiState?.historyManager.addItem(
{
type: 'info',
text:
`authType: ${effectiveAuthType}\n` +
`Using model: ${effectiveModelId}\n` +
`Base URL: ${baseUrl}\n` +
`API key: ${maskedKey}`,
},
Date.now(),
);
}
onClose();
},
[config, onClose],
[authType, config, onClose, settings, uiState, setErrorMessage],
);
const hasModels = MODEL_OPTIONS.length > 0;
return (
<Box
borderStyle="round"
@@ -89,14 +305,73 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
width="100%"
>
<Text bold>{t('Select Model')}</Text>
<Box marginTop={1}>
<DescriptiveRadioButtonSelect
items={MODEL_OPTIONS}
onSelect={handleSelect}
initialIndex={initialIndex}
showNumbers={true}
/>
<Box marginTop={1} flexDirection="column">
<Text color={theme.text.secondary}>
{t('Current (effective) configuration')}
</Text>
<Box flexDirection="column" marginTop={1}>
<ConfigRow label="AuthType" value={authType} />
<ConfigRow
label="Model"
value={effectiveConfig?.model ?? config?.getModel?.() ?? ''}
badge={formatSourceBadge(sources['model'])}
/>
{authType !== AuthType.QWEN_OAUTH && (
<>
<ConfigRow
label="Base URL"
value={effectiveConfig?.baseUrl ?? ''}
badge={formatSourceBadge(sources['baseUrl'])}
/>
<ConfigRow
label="API Key"
value={effectiveConfig?.apiKey ? t('(set)') : t('(not set)')}
badge={formatSourceBadge(sources['apiKey'])}
/>
</>
)}
</Box>
</Box>
{!hasModels ? (
<Box marginTop={1} flexDirection="column">
<Text color={theme.status.warning}>
{t(
'No models available for the current authentication type ({{authType}}).',
{
authType: authType ? String(authType) : t('(none)'),
},
)}
</Text>
<Box marginTop={1}>
<Text color={theme.text.secondary}>
{t(
'Please configure models in settings.modelProviders or use environment variables.',
)}
</Text>
</Box>
</Box>
) : (
<Box marginTop={1}>
<DescriptiveRadioButtonSelect
items={MODEL_OPTIONS}
onSelect={handleSelect}
initialIndex={initialIndex}
showNumbers={true}
/>
</Box>
)}
{errorMessage && (
<Box marginTop={1} flexDirection="column" paddingX={1}>
<Text color={theme.status.error} wrap="wrap">
{errorMessage}
</Text>
</Box>
)}
<Box marginTop={1} flexDirection="column">
<Text color={theme.text.secondary}>{t('(Press Esc to close)')}</Text>
</Box>

View File

@@ -1,91 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { render } from 'ink-testing-library';
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { ProQuotaDialog } from './ProQuotaDialog.js';
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
// Mock the child component to make it easier to test the parent
vi.mock('./shared/RadioButtonSelect.js', () => ({
RadioButtonSelect: vi.fn(),
}));
describe('ProQuotaDialog', () => {
beforeEach(() => {
vi.clearAllMocks();
});
it('should render with correct title and options', () => {
const { lastFrame } = render(
<ProQuotaDialog
failedModel="gemini-2.5-pro"
fallbackModel="gemini-2.5-flash"
onChoice={() => {}}
/>,
);
const output = lastFrame();
expect(output).toContain('Pro quota limit reached for gemini-2.5-pro.');
// Check that RadioButtonSelect was called with the correct items
expect(RadioButtonSelect).toHaveBeenCalledWith(
expect.objectContaining({
items: [
{
label: 'Change auth (executes the /auth command)',
value: 'auth',
key: 'auth',
},
{
label: `Continue with gemini-2.5-flash`,
value: 'continue',
key: 'continue',
},
],
}),
undefined,
);
});
it('should call onChoice with "auth" when "Change auth" is selected', () => {
const mockOnChoice = vi.fn();
render(
<ProQuotaDialog
failedModel="gemini-2.5-pro"
fallbackModel="gemini-2.5-flash"
onChoice={mockOnChoice}
/>,
);
// Get the onSelect function passed to RadioButtonSelect
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
// Simulate the selection
onSelect('auth');
expect(mockOnChoice).toHaveBeenCalledWith('auth');
});
it('should call onChoice with "continue" when "Continue with flash" is selected', () => {
const mockOnChoice = vi.fn();
render(
<ProQuotaDialog
failedModel="gemini-2.5-pro"
fallbackModel="gemini-2.5-flash"
onChoice={mockOnChoice}
/>,
);
// Get the onSelect function passed to RadioButtonSelect
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
// Simulate the selection
onSelect('continue');
expect(mockOnChoice).toHaveBeenCalledWith('continue');
});
});

View File

@@ -1,55 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type React from 'react';
import { Box, Text } from 'ink';
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
import { theme } from '../semantic-colors.js';
import { t } from '../../i18n/index.js';
interface ProQuotaDialogProps {
failedModel: string;
fallbackModel: string;
onChoice: (choice: 'auth' | 'continue') => void;
}
export function ProQuotaDialog({
failedModel,
fallbackModel,
onChoice,
}: ProQuotaDialogProps): React.JSX.Element {
const items = [
{
label: t('Change auth (executes the /auth command)'),
value: 'auth' as const,
key: 'auth',
},
{
label: t('Continue with {{model}}', { model: fallbackModel }),
value: 'continue' as const,
key: 'continue',
},
];
const handleSelect = (choice: 'auth' | 'continue') => {
onChoice(choice);
};
return (
<Box borderStyle="round" flexDirection="column" paddingX={1}>
<Text bold color={theme.status.warning}>
{t('Pro quota limit reached for {{model}}.', { model: failedModel })}
</Text>
<Box marginTop={1}>
<RadioButtonSelect
items={items}
initialIndex={1}
onSelect={handleSelect}
/>
</Box>
</Box>
);
}

View File

@@ -87,7 +87,13 @@ export async function showResumeSessionPicker(
let selectedId: string | undefined;
const { unmount, waitUntilExit } = render(
<KeypressProvider kittyProtocolEnabled={false}>
<KeypressProvider
kittyProtocolEnabled={false}
pasteWorkaround={
process.platform === 'win32' ||
parseInt(process.versions.node.split('.')[0], 10) < 20
}
>
<StandalonePickerScreen
sessionService={sessionService}
onSelect={(id) => {

View File

@@ -11,7 +11,7 @@ import { BaseSelectionList } from './BaseSelectionList.js';
import type { SelectionListItem } from '../../hooks/useSelectionList.js';
export interface DescriptiveRadioSelectItem<T> extends SelectionListItem<T> {
title: string;
title: React.ReactNode;
description: string;
}

View File

@@ -30,7 +30,6 @@ export interface UIActions {
) => void;
handleAuthSelect: (
authType: AuthType | undefined,
scope: SettingScope,
credentials?: OpenAICredentials,
) => Promise<void>;
setAuthState: (state: AuthState) => void;
@@ -55,7 +54,6 @@ export interface UIActions {
handleClearScreen: () => void;
onWorkspaceMigrationDialogOpen: () => void;
onWorkspaceMigrationDialogClose: () => void;
handleProQuotaChoice: (choice: 'auth' | 'continue') => void;
// Vision switch dialog
handleVisionSwitchSelect: (outcome: VisionSwitchOutcome) => void;
// Welcome back dialog

View File

@@ -22,21 +22,13 @@ import type {
AuthType,
IdeContext,
ApprovalMode,
UserTierId,
IdeInfo,
FallbackIntent,
} from '@qwen-code/qwen-code-core';
import type { DOMElement } from 'ink';
import type { SessionStatsState } from '../contexts/SessionContext.js';
import type { ExtensionUpdateState } from '../state/extensions.js';
import type { UpdateObject } from '../utils/updateCheck.js';
export interface ProQuotaDialogRequest {
failedModel: string;
fallbackModel: string;
resolve: (intent: FallbackIntent) => void;
}
import { type UseHistoryManagerReturn } from '../hooks/useHistoryManager.js';
import { type RestartReason } from '../hooks/useIdeTrustListener.js';
@@ -99,8 +91,6 @@ export interface UIState {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
workspaceExtensions: any[]; // Extension[]
// Quota-related state
userTier: UserTierId | undefined;
proQuotaRequest: ProQuotaDialogRequest | null;
currentModel: string;
contextFileNames: string[];
errorCount: number;

View File

@@ -520,6 +520,13 @@ export const useSlashCommandProcessor = (
true,
);
}
case 'stream_messages': {
// stream_messages is only used in ACP/Zed integration mode
// and should not be returned in interactive UI mode
throw new Error(
'stream_messages result type is not supported in interactive mode',
);
}
default: {
const unhandled: never = result;
throw new Error(

View File

@@ -25,7 +25,6 @@ export interface DialogCloseOptions {
isAuthDialogOpen: boolean;
handleAuthSelect: (
authType: AuthType | undefined,
scope: SettingScope,
credentials?: OpenAICredentials,
) => Promise<void>;
pendingAuthType: AuthType | undefined;

View File

@@ -1323,7 +1323,7 @@ describe('useGeminiStream', () => {
it('should call parseAndFormatApiError with the correct authType on stream initialization failure', async () => {
// 1. Setup
const mockError = new Error('Rate limit exceeded');
const mockAuthType = AuthType.LOGIN_WITH_GOOGLE;
const mockAuthType = AuthType.USE_VERTEX_AI;
mockParseAndFormatApiError.mockClear();
mockSendMessageStream.mockReturnValue(
(async function* () {
@@ -1374,9 +1374,6 @@ describe('useGeminiStream', () => {
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
'Rate limit exceeded',
mockAuthType,
undefined,
'gemini-2.5-pro',
'gemini-2.5-flash',
);
});
});
@@ -2493,9 +2490,6 @@ describe('useGeminiStream', () => {
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
{ message: 'Test error' },
expect.any(String),
undefined,
'gemini-2.5-pro',
'gemini-2.5-flash',
);
});
});

View File

@@ -26,7 +26,6 @@ import {
GitService,
UnauthorizedError,
UserPromptEvent,
DEFAULT_GEMINI_FLASH_MODEL,
logConversationFinishedEvent,
ConversationFinishedEvent,
ApprovalMode,
@@ -527,10 +526,15 @@ export const useGeminiStream = (
return currentThoughtBuffer;
}
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
const pendingType = pendingHistoryItemRef.current?.type;
const isPendingThought =
pendingType === 'gemini_thought' ||
pendingType === 'gemini_thought_content';
// If we're not already showing a thought, start a new one
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
if (!isPendingThought) {
// If there's a pending non-thought item, finalize it first
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
@@ -538,11 +542,37 @@ export const useGeminiStream = (
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
}
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: 'gemini_thought',
text: newThoughtBuffer,
});
// Split large thought messages for better rendering performance (same rationale
// as regular content streaming). This helps avoid terminal flicker caused by
// constantly re-rendering an ever-growing "pending" block.
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
isPendingThought && pendingType === 'gemini_thought_content'
? 'gemini_thought_content'
: 'gemini_thought';
if (splitPoint === newThoughtBuffer.length) {
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: nextPendingType,
text: newThoughtBuffer,
});
} else {
const beforeText = newThoughtBuffer.substring(0, splitPoint);
const afterText = newThoughtBuffer.substring(splitPoint);
addItem(
{
type: nextPendingType,
text: beforeText,
},
userMessageTimestamp,
);
setPendingHistoryItem({
type: 'gemini_thought_content',
text: afterText,
});
newThoughtBuffer = afterText;
}
// Also update the thought state for the loading indicator
mergeThought(eventValue);
@@ -600,9 +630,6 @@ export const useGeminiStream = (
text: parseAndFormatApiError(
eventValue.error,
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
userMessageTimestamp,
@@ -654,6 +681,9 @@ export const useGeminiStream = (
'Response stopped due to image safety violations.',
[FinishReason.UNEXPECTED_TOOL_CALL]:
'Response stopped due to unexpected tool call.',
[FinishReason.IMAGE_PROHIBITED_CONTENT]:
'Response stopped due to image prohibited content.',
[FinishReason.NO_IMAGE]: 'Response stopped due to no image.',
};
const message = finishReasonMessages[finishReason];
@@ -770,11 +800,17 @@ export const useGeminiStream = (
for await (const event of stream) {
switch (event.type) {
case ServerGeminiEventType.Thought:
thoughtBuffer = handleThoughtEvent(
event.value,
thoughtBuffer,
userMessageTimestamp,
);
// If the thought has a subject, it's a discrete status update rather than
// a streamed textual thought, so we update the thought state directly.
if (event.value.subject) {
setThought(event.value);
} else {
thoughtBuffer = handleThoughtEvent(
event.value,
thoughtBuffer,
userMessageTimestamp,
);
}
break;
case ServerGeminiEventType.Content:
geminiMessageBuffer = handleContentEvent(
@@ -845,6 +881,7 @@ export const useGeminiStream = (
handleMaxSessionTurnsEvent,
handleSessionTokenLimitExceededEvent,
handleCitationEvent,
setThought,
],
);
@@ -875,7 +912,7 @@ export const useGeminiStream = (
// Reset quota error flag when starting a new query (not a continuation)
if (!options?.isContinuation) {
setModelSwitchedFromQuotaError(false);
config.setQuotaErrorOccurred(false);
// No quota-error / fallback routing mechanism currently; keep state minimal.
}
abortControllerRef.current = new AbortController();
@@ -987,9 +1024,6 @@ export const useGeminiStream = (
text: parseAndFormatApiError(
getErrorMessage(error) || 'Unknown error',
config.getContentGeneratorConfig()?.authType,
undefined,
config.getModel(),
DEFAULT_GEMINI_FLASH_MODEL,
),
},
userMessageTimestamp,

View File

@@ -1,21 +1,58 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { useCallback } from 'react';
import { useStdin } from 'ink';
import type { EditorType } from '@qwen-code/qwen-code-core';
import {
editorCommands,
commandExists as coreCommandExists,
} from '@qwen-code/qwen-code-core';
import { spawnSync } from 'child_process';
import { useSettings } from '../contexts/SettingsContext.js';
/**
* Cache for command existence checks to avoid repeated execSync calls.
*/
const commandExistsCache = new Map<string, boolean>();
/**
* Check if a command exists in the system with caching.
* Results are cached to improve performance in test environments.
*/
function commandExists(cmd: string): boolean {
if (commandExistsCache.has(cmd)) {
return commandExistsCache.get(cmd)!;
}
const exists = coreCommandExists(cmd);
commandExistsCache.set(cmd, exists);
return exists;
}
/**
* Get the actual executable command for an editor type.
*/
function getExecutableCommand(editorType: EditorType): string {
const commandConfig = editorCommands[editorType];
const commands =
process.platform === 'win32' ? commandConfig.win32 : commandConfig.default;
const availableCommand = commands.find((cmd) => commandExists(cmd));
if (!availableCommand) {
throw new Error(
`No available editor command found for ${editorType}. ` +
`Tried: ${commands.join(', ')}. ` +
`Please install one of these editors or set a different preferredEditor in settings.`,
);
}
return availableCommand;
}
/**
* Determines the editor command to use based on user preferences and platform.
*/
function getEditorCommand(preferredEditor?: EditorType): string {
if (preferredEditor) {
return preferredEditor;
return getExecutableCommand(preferredEditor);
}
// Platform-specific defaults with UI preference for macOS
@@ -63,8 +100,14 @@ export function useLaunchEditor() {
try {
setRawMode?.(false);
// On Windows, .cmd and .bat files need shell: true
const needsShell =
process.platform === 'win32' &&
(editorCommand.endsWith('.cmd') || editorCommand.endsWith('.bat'));
const { status, error } = spawnSync(editorCommand, editorArgs, {
stdio: 'inherit',
shell: needsShell,
});
if (error) throw error;

View File

@@ -1,391 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
vi,
describe,
it,
expect,
beforeEach,
afterEach,
type Mock,
} from 'vitest';
import { act, renderHook } from '@testing-library/react';
import {
type Config,
type FallbackModelHandler,
UserTierId,
AuthType,
isGenericQuotaExceededError,
isProQuotaExceededError,
makeFakeConfig,
} from '@qwen-code/qwen-code-core';
import { useQuotaAndFallback } from './useQuotaAndFallback.js';
import type { UseHistoryManagerReturn } from './useHistoryManager.js';
import { AuthState, MessageType } from '../types.js';
// Mock the error checking functions from the core package to control test scenarios
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original =
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
return {
...original,
isGenericQuotaExceededError: vi.fn(),
isProQuotaExceededError: vi.fn(),
};
});
// Use a type alias for SpyInstance as it's not directly exported
type SpyInstance = ReturnType<typeof vi.spyOn>;
describe('useQuotaAndFallback', () => {
let mockConfig: Config;
let mockHistoryManager: UseHistoryManagerReturn;
let mockSetAuthState: Mock;
let mockSetModelSwitchedFromQuotaError: Mock;
let setFallbackHandlerSpy: SpyInstance;
const mockedIsGenericQuotaExceededError = isGenericQuotaExceededError as Mock;
const mockedIsProQuotaExceededError = isProQuotaExceededError as Mock;
beforeEach(() => {
mockConfig = makeFakeConfig();
// Spy on the method that requires the private field and mock its return.
// This is cleaner than modifying the config class for tests.
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
model: 'test-model',
authType: AuthType.LOGIN_WITH_GOOGLE,
});
mockHistoryManager = {
addItem: vi.fn(),
history: [],
updateItem: vi.fn(),
clearItems: vi.fn(),
loadHistory: vi.fn(),
};
mockSetAuthState = vi.fn();
mockSetModelSwitchedFromQuotaError = vi.fn();
setFallbackHandlerSpy = vi.spyOn(mockConfig, 'setFallbackModelHandler');
vi.spyOn(mockConfig, 'setQuotaErrorOccurred');
mockedIsGenericQuotaExceededError.mockReturnValue(false);
mockedIsProQuotaExceededError.mockReturnValue(false);
});
afterEach(() => {
vi.clearAllMocks();
});
it('should register a fallback handler on initialization', () => {
renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
expect(setFallbackHandlerSpy).toHaveBeenCalledTimes(1);
expect(setFallbackHandlerSpy.mock.calls[0][0]).toBeInstanceOf(Function);
});
describe('Fallback Handler Logic', () => {
// Helper function to render the hook and extract the registered handler
const getRegisteredHandler = (
userTier: UserTierId = UserTierId.FREE,
): FallbackModelHandler => {
renderHook(
(props) =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: props.userTier,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
{ initialProps: { userTier } },
);
return setFallbackHandlerSpy.mock.calls[0][0] as FallbackModelHandler;
};
it('should return null and take no action if already in fallback mode', async () => {
vi.spyOn(mockConfig, 'isInFallbackMode').mockReturnValue(true);
const handler = getRegisteredHandler();
const result = await handler('gemini-pro', 'gemini-flash', new Error());
expect(result).toBeNull();
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
});
it('should return null and take no action if authType is not LOGIN_WITH_GOOGLE', async () => {
// Override the default mock from beforeEach for this specific test
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
model: 'test-model',
authType: AuthType.USE_GEMINI,
});
const handler = getRegisteredHandler();
const result = await handler('gemini-pro', 'gemini-flash', new Error());
expect(result).toBeNull();
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
});
describe('Automatic Fallback Scenarios', () => {
const testCases = [
{
errorType: 'generic',
tier: UserTierId.FREE,
expectedMessageSnippets: [
'Automatically switching from model-A to model-B',
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
],
},
{
errorType: 'generic',
tier: UserTierId.STANDARD, // Paid tier
expectedMessageSnippets: [
'Automatically switching from model-A to model-B',
'switch to using a paid API key from AI Studio',
],
},
{
errorType: 'other',
tier: UserTierId.FREE,
expectedMessageSnippets: [
'Automatically switching from model-A to model-B for faster responses',
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
],
},
{
errorType: 'other',
tier: UserTierId.LEGACY, // Paid tier
expectedMessageSnippets: [
'Automatically switching from model-A to model-B for faster responses',
'switch to using a paid API key from AI Studio',
],
},
];
for (const { errorType, tier, expectedMessageSnippets } of testCases) {
it(`should handle ${errorType} error for ${tier} tier correctly`, async () => {
mockedIsGenericQuotaExceededError.mockReturnValue(
errorType === 'generic',
);
const handler = getRegisteredHandler(tier);
const result = await handler(
'model-A',
'model-B',
new Error('quota exceeded'),
);
// Automatic fallbacks should return 'stop'
expect(result).toBe('stop');
expect(mockHistoryManager.addItem).toHaveBeenCalledWith(
expect.objectContaining({ type: MessageType.INFO }),
expect.any(Number),
);
const message = (mockHistoryManager.addItem as Mock).mock.calls[0][0]
.text;
for (const snippet of expectedMessageSnippets) {
expect(message).toContain(snippet);
}
expect(mockSetModelSwitchedFromQuotaError).toHaveBeenCalledWith(true);
expect(mockConfig.setQuotaErrorOccurred).toHaveBeenCalledWith(true);
});
}
});
describe('Interactive Fallback (Pro Quota Error)', () => {
beforeEach(() => {
mockedIsProQuotaExceededError.mockReturnValue(true);
});
it('should set an interactive request and wait for user choice', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
// Call the handler but do not await it, to check the intermediate state
const promise = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota'),
);
await act(async () => {});
// The hook should now have a pending request for the UI to handle
expect(result.current.proQuotaRequest).not.toBeNull();
expect(result.current.proQuotaRequest?.failedModel).toBe('gemini-pro');
// Simulate the user choosing to continue with the fallback model
act(() => {
result.current.handleProQuotaChoice('continue');
});
// The original promise from the handler should now resolve
const intent = await promise;
expect(intent).toBe('retry');
// The pending request should be cleared from the state
expect(result.current.proQuotaRequest).toBeNull();
});
it('should handle race conditions by stopping subsequent requests', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
const promise1 = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota 1'),
);
await act(async () => {});
const firstRequest = result.current.proQuotaRequest;
expect(firstRequest).not.toBeNull();
const result2 = await handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota 2'),
);
// The lock should have stopped the second request
expect(result2).toBe('stop');
expect(result.current.proQuotaRequest).toBe(firstRequest);
act(() => {
result.current.handleProQuotaChoice('continue');
});
const intent1 = await promise1;
expect(intent1).toBe('retry');
expect(result.current.proQuotaRequest).toBeNull();
});
});
});
describe('handleProQuotaChoice', () => {
beforeEach(() => {
mockedIsProQuotaExceededError.mockReturnValue(true);
});
it('should do nothing if there is no pending pro quota request', () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
act(() => {
result.current.handleProQuotaChoice('auth');
});
expect(mockSetAuthState).not.toHaveBeenCalled();
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
});
it('should resolve intent to "auth" and trigger auth state update', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
const promise = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota'),
);
await act(async () => {}); // Allow state to update
act(() => {
result.current.handleProQuotaChoice('auth');
});
const intent = await promise;
expect(intent).toBe('auth');
expect(mockSetAuthState).toHaveBeenCalledWith(AuthState.Updating);
expect(result.current.proQuotaRequest).toBeNull();
});
it('should resolve intent to "retry" and add info message on continue', async () => {
const { result } = renderHook(() =>
useQuotaAndFallback({
config: mockConfig,
historyManager: mockHistoryManager,
userTier: UserTierId.FREE,
setAuthState: mockSetAuthState,
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
}),
);
const handler = setFallbackHandlerSpy.mock
.calls[0][0] as FallbackModelHandler;
// The first `addItem` call is for the initial quota error message
const promise = handler(
'gemini-pro',
'gemini-flash',
new Error('pro quota'),
);
await act(async () => {}); // Allow state to update
act(() => {
result.current.handleProQuotaChoice('continue');
});
const intent = await promise;
expect(intent).toBe('retry');
expect(result.current.proQuotaRequest).toBeNull();
// Check for the second "Switched to fallback model" message
expect(mockHistoryManager.addItem).toHaveBeenCalledTimes(2);
const lastCall = (mockHistoryManager.addItem as Mock).mock.calls[1][0];
expect(lastCall.type).toBe(MessageType.INFO);
expect(lastCall.text).toContain('Switched to fallback model.');
});
});
});

View File

@@ -1,175 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
AuthType,
type Config,
type FallbackModelHandler,
type FallbackIntent,
isGenericQuotaExceededError,
isProQuotaExceededError,
UserTierId,
} from '@qwen-code/qwen-code-core';
import { useCallback, useEffect, useRef, useState } from 'react';
import { type UseHistoryManagerReturn } from './useHistoryManager.js';
import { AuthState, MessageType } from '../types.js';
import { type ProQuotaDialogRequest } from '../contexts/UIStateContext.js';
interface UseQuotaAndFallbackArgs {
config: Config;
historyManager: UseHistoryManagerReturn;
userTier: UserTierId | undefined;
setAuthState: (state: AuthState) => void;
setModelSwitchedFromQuotaError: (value: boolean) => void;
}
export function useQuotaAndFallback({
config,
historyManager,
userTier,
setAuthState,
setModelSwitchedFromQuotaError,
}: UseQuotaAndFallbackArgs) {
const [proQuotaRequest, setProQuotaRequest] =
useState<ProQuotaDialogRequest | null>(null);
const isDialogPending = useRef(false);
// Set up Flash fallback handler
useEffect(() => {
const fallbackHandler: FallbackModelHandler = async (
failedModel,
fallbackModel,
error,
): Promise<FallbackIntent | null> => {
if (config.isInFallbackMode()) {
return null;
}
// Fallbacks are currently only handled for OAuth users.
const contentGeneratorConfig = config.getContentGeneratorConfig();
if (
!contentGeneratorConfig ||
contentGeneratorConfig.authType !== AuthType.LOGIN_WITH_GOOGLE
) {
return null;
}
// Use actual user tier if available; otherwise, default to FREE tier behavior (safe default)
const isPaidTier =
userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD;
let message: string;
if (error && isProQuotaExceededError(error)) {
// Pro Quota specific messages (Interactive)
if (isPaidTier) {
message = `⚡ You have reached your daily ${failedModel} quota limit.
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `⚡ You have reached your daily ${failedModel} quota limit.
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else if (error && isGenericQuotaExceededError(error)) {
// Generic Quota (Automatic fallback)
const actionMessage = `⚡ You have reached your daily quota limit.\n⚡ Automatically switching from ${failedModel} to ${fallbackModel} for the remainder of this session.`;
if (isPaidTier) {
message = `${actionMessage}
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `${actionMessage}
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else {
// Consecutive 429s or other errors (Automatic fallback)
const actionMessage = `⚡ Automatically switching from ${failedModel} to ${fallbackModel} for faster responses for the remainder of this session.`;
if (isPaidTier) {
message = `${actionMessage}
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `${actionMessage}
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
}
// Add message to UI history
historyManager.addItem(
{
type: MessageType.INFO,
text: message,
},
Date.now(),
);
setModelSwitchedFromQuotaError(true);
config.setQuotaErrorOccurred(true);
// Interactive Fallback for Pro quota
if (error && isProQuotaExceededError(error)) {
if (isDialogPending.current) {
return 'stop'; // A dialog is already active, so just stop this request.
}
isDialogPending.current = true;
const intent: FallbackIntent = await new Promise<FallbackIntent>(
(resolve) => {
setProQuotaRequest({
failedModel,
fallbackModel,
resolve,
});
},
);
return intent;
}
return 'stop';
};
config.setFallbackModelHandler(fallbackHandler);
}, [config, historyManager, userTier, setModelSwitchedFromQuotaError]);
const handleProQuotaChoice = useCallback(
(choice: 'auth' | 'continue') => {
if (!proQuotaRequest) return;
const intent: FallbackIntent = choice === 'auth' ? 'auth' : 'retry';
proQuotaRequest.resolve(intent);
setProQuotaRequest(null);
isDialogPending.current = false; // Reset the flag here
if (choice === 'auth') {
setAuthState(AuthState.Updating);
} else {
historyManager.addItem(
{
type: MessageType.INFO,
text: 'Switched to fallback model. Tip: Press Ctrl+P (or Up Arrow) to recall your previous prompt and submit it again if you wish.',
},
Date.now(),
);
}
},
[proQuotaRequest, setAuthState, historyManager],
);
return {
proQuotaRequest,
handleProQuotaChoice,
};
}

View File

@@ -411,7 +411,7 @@ describe('useQwenAuth', () => {
expect(geminiResult.current.qwenAuthState.authStatus).toBe('idle');
const { result: oauthResult } = renderHook(() =>
useQwenAuth(AuthType.LOGIN_WITH_GOOGLE, true),
useQwenAuth(AuthType.USE_OPENAI, true),
);
expect(oauthResult.current.qwenAuthState.authStatus).toBe('idle');
});

View File

@@ -62,7 +62,7 @@ const mockConfig = {
getAllowedTools: vi.fn(() => []),
getContentGeneratorConfig: () => ({
model: 'test-model',
authType: 'oauth-personal',
authType: 'gemini',
}),
getUseSmartEdit: () => false,
getUseModelRouter: () => false,

View File

@@ -0,0 +1,205 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
getAvailableModelsForAuthType,
getFilteredQwenModels,
getOpenAIAvailableModelFromEnv,
isVisionModel,
getDefaultVisionModel,
AVAILABLE_MODELS_QWEN,
MAINLINE_VLM,
MAINLINE_CODER,
} from './availableModels.js';
import { AuthType, type Config } from '@qwen-code/qwen-code-core';
describe('availableModels', () => {
describe('AVAILABLE_MODELS_QWEN', () => {
it('should include coder model', () => {
const coderModel = AVAILABLE_MODELS_QWEN.find(
(m) => m.id === MAINLINE_CODER,
);
expect(coderModel).toBeDefined();
expect(coderModel?.isVision).toBeFalsy();
});
it('should include vision model', () => {
const visionModel = AVAILABLE_MODELS_QWEN.find(
(m) => m.id === MAINLINE_VLM,
);
expect(visionModel).toBeDefined();
expect(visionModel?.isVision).toBe(true);
});
});
describe('getFilteredQwenModels', () => {
it('should return all models when vision preview is enabled', () => {
const models = getFilteredQwenModels(true);
expect(models.length).toBe(AVAILABLE_MODELS_QWEN.length);
});
it('should filter out vision models when preview is disabled', () => {
const models = getFilteredQwenModels(false);
expect(models.every((m) => !m.isVision)).toBe(true);
});
});
describe('getOpenAIAvailableModelFromEnv', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should return null when OPENAI_MODEL is not set', () => {
delete process.env['OPENAI_MODEL'];
expect(getOpenAIAvailableModelFromEnv()).toBeNull();
});
it('should return model from OPENAI_MODEL env var', () => {
process.env['OPENAI_MODEL'] = 'gpt-4-turbo';
const model = getOpenAIAvailableModelFromEnv();
expect(model?.id).toBe('gpt-4-turbo');
expect(model?.label).toBe('gpt-4-turbo');
});
it('should trim whitespace from env var', () => {
process.env['OPENAI_MODEL'] = ' gpt-4 ';
const model = getOpenAIAvailableModelFromEnv();
expect(model?.id).toBe('gpt-4');
});
});
describe('getAvailableModelsForAuthType', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should return hard-coded qwen models for qwen-oauth', () => {
const models = getAvailableModelsForAuthType(AuthType.QWEN_OAUTH);
expect(models).toEqual(AVAILABLE_MODELS_QWEN);
});
it('should return hard-coded qwen models even when config is provided', () => {
const mockConfig = {
getAvailableModels: vi
.fn()
.mockReturnValue([
{ id: 'custom', label: 'Custom', authType: AuthType.QWEN_OAUTH },
]),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.QWEN_OAUTH,
mockConfig,
);
expect(models).toEqual(AVAILABLE_MODELS_QWEN);
});
it('should use config.getAvailableModels for openai authType when available', () => {
const mockModels = [
{
id: 'gpt-4',
label: 'GPT-4',
description: 'Test',
authType: AuthType.USE_OPENAI,
isVision: false,
},
];
const getAvailableModelsForAuthType = vi.fn().mockReturnValue(mockModels);
const mockConfigWithMethod = {
// Prefer the newer API when available.
getAvailableModelsForAuthType,
};
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfigWithMethod as unknown as Config,
);
expect(getAvailableModelsForAuthType).toHaveBeenCalled();
expect(models[0].id).toBe('gpt-4');
});
it('should fallback to env var for openai when config returns empty', () => {
process.env['OPENAI_MODEL'] = 'fallback-model';
const mockConfig = {
getAvailableModelsForAuthType: vi.fn().mockReturnValue([]),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfig,
);
expect(models).toEqual([]);
});
it('should fallback to env var for openai when config throws', () => {
process.env['OPENAI_MODEL'] = 'fallback-model';
const mockConfig = {
getAvailableModelsForAuthType: vi.fn().mockImplementation(() => {
throw new Error('Registry not initialized');
}),
} as unknown as Config;
const models = getAvailableModelsForAuthType(
AuthType.USE_OPENAI,
mockConfig,
);
expect(models).toEqual([]);
});
it('should return env model for openai without config', () => {
process.env['OPENAI_MODEL'] = 'gpt-4-turbo';
const models = getAvailableModelsForAuthType(AuthType.USE_OPENAI);
expect(models[0].id).toBe('gpt-4-turbo');
});
it('should return empty array for openai without config or env', () => {
delete process.env['OPENAI_MODEL'];
const models = getAvailableModelsForAuthType(AuthType.USE_OPENAI);
expect(models).toEqual([]);
});
it('should return empty array for other auth types', () => {
const models = getAvailableModelsForAuthType(AuthType.USE_GEMINI);
expect(models).toEqual([]);
});
});
describe('isVisionModel', () => {
it('should return true for vision model', () => {
expect(isVisionModel(MAINLINE_VLM)).toBe(true);
});
it('should return false for non-vision model', () => {
expect(isVisionModel(MAINLINE_CODER)).toBe(false);
});
it('should return false for unknown model', () => {
expect(isVisionModel('unknown-model')).toBe(false);
});
});
describe('getDefaultVisionModel', () => {
it('should return the vision model ID', () => {
expect(getDefaultVisionModel()).toBe(MAINLINE_VLM);
});
});
});

View File

@@ -4,7 +4,12 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { AuthType, DEFAULT_QWEN_MODEL } from '@qwen-code/qwen-code-core';
import {
AuthType,
DEFAULT_QWEN_MODEL,
type Config,
type AvailableModel as CoreAvailableModel,
} from '@qwen-code/qwen-code-core';
import { t } from '../../i18n/index.js';
export type AvailableModel = {
@@ -57,27 +62,91 @@ export function getFilteredQwenModels(
*/
export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
const id = process.env['OPENAI_MODEL']?.trim();
return id ? { id, label: id } : null;
return id
? {
id,
label: id,
get description() {
return t('Configured via OPENAI_MODEL environment variable');
},
}
: null;
}
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
const id = process.env['ANTHROPIC_MODEL']?.trim();
return id
? {
id,
label: id,
get description() {
return t('Configured via ANTHROPIC_MODEL environment variable');
},
}
: null;
}
/**
* Convert core AvailableModel to CLI AvailableModel format
*/
function convertCoreModelToCliModel(
coreModel: CoreAvailableModel,
): AvailableModel {
return {
id: coreModel.id,
label: coreModel.label,
description: coreModel.description,
isVision: coreModel.isVision ?? coreModel.capabilities?.vision ?? false,
};
}
/**
* Get available models for the given authType.
*
* If a Config object is provided, uses config.getAvailableModelsForAuthType().
* For qwen-oauth, always returns the hard-coded models.
* Falls back to environment variables only when no config is provided.
*/
export function getAvailableModelsForAuthType(
authType: AuthType,
config?: Config,
): AvailableModel[] {
// For qwen-oauth, always use hard-coded models, this aligns with the API gateway.
if (authType === AuthType.QWEN_OAUTH) {
return AVAILABLE_MODELS_QWEN;
}
// Use config's model registry when available
if (config) {
try {
const models = config.getAvailableModelsForAuthType(authType);
if (models.length > 0) {
return models.map(convertCoreModelToCliModel);
}
} catch {
// If config throws (e.g., not initialized), return empty array
}
// When a Config object is provided, we intentionally do NOT fall back to env-based
// "raw" models. These may reflect the currently effective config but should not be
// presented as selectable options in /model.
return [];
}
// Fall back to environment variables for specific auth types (no config provided)
switch (authType) {
case AuthType.QWEN_OAUTH:
return AVAILABLE_MODELS_QWEN;
case AuthType.USE_OPENAI: {
const openAIModel = getOpenAIAvailableModelFromEnv();
return openAIModel ? [openAIModel] : [];
}
case AuthType.USE_ANTHROPIC: {
const anthropicModel = getAnthropicAvailableModelFromEnv();
return anthropicModel ? [anthropicModel] : [];
}
default:
// For other auth types, return empty array for now
// This can be expanded later according to the design doc
return [];
}
}
/**
/**
* Hard code the default vision model as a string literal,
* until our coding model supports multimodal.

View File

@@ -20,6 +20,11 @@ const makeConfig = (tools: Record<string, AnyDeclarativeTool>) =>
getToolRegistry: () => ({
getTool: (name: string) => tools[name],
}),
getContentGenerator: () => ({
// Default to showing full thinking content during resume unless explicitly
// summarized; tests don't care about summarized thinking behavior.
useSummarizedThinking: () => false,
}),
}) as unknown as Config;
describe('resumeHistoryUtils', () => {

View File

@@ -204,7 +204,11 @@ function convertToHistoryItems(
const parts = record.message?.parts as Part[] | undefined;
// Extract thought content
const thoughtText = extractThoughtTextFromParts(parts);
const thoughtText = !config
.getContentGenerator()
.useSummarizedThinking()
? extractThoughtTextFromParts(parts)
: '';
// Extract text content (non-function-call, non-thought)
const text = extractTextFromParts(parts);

View File

@@ -6,7 +6,11 @@
import { vi, type Mock, type MockInstance } from 'vitest';
import type { Config } from '@qwen-code/qwen-code-core';
import { OutputFormat, FatalInputError } from '@qwen-code/qwen-code-core';
import {
OutputFormat,
FatalInputError,
ToolErrorType,
} from '@qwen-code/qwen-code-core';
import {
getErrorMessage,
handleError,
@@ -65,6 +69,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
describe('errors', () => {
let mockConfig: Config;
let processExitSpy: MockInstance;
let processStderrWriteSpy: MockInstance;
let consoleErrorSpy: MockInstance;
beforeEach(() => {
@@ -74,6 +79,11 @@ describe('errors', () => {
// Mock console.error
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
// Mock process.stderr.write
processStderrWriteSpy = vi
.spyOn(process.stderr, 'write')
.mockImplementation(() => true);
// Mock process.exit to throw instead of actually exiting
processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit called with code: ${code}`);
@@ -84,11 +94,13 @@ describe('errors', () => {
getOutputFormat: vi.fn().mockReturnValue(OutputFormat.TEXT),
getContentGeneratorConfig: vi.fn().mockReturnValue({ authType: 'test' }),
getDebugMode: vi.fn().mockReturnValue(true),
isInteractive: vi.fn().mockReturnValue(false),
} as unknown as Config;
});
afterEach(() => {
consoleErrorSpy.mockRestore();
processStderrWriteSpy.mockRestore();
processExitSpy.mockRestore();
});
@@ -432,6 +444,87 @@ describe('errors', () => {
expect(processExitSpy).not.toHaveBeenCalled();
});
});
describe('permission denied warnings', () => {
it('should show warning when EXECUTION_DENIED in non-interactive text mode', () => {
(mockConfig.getDebugMode as Mock).mockReturnValue(false);
(mockConfig.isInteractive as Mock).mockReturnValue(false);
(
mockConfig.getOutputFormat as ReturnType<typeof vi.fn>
).mockReturnValue(OutputFormat.TEXT);
handleToolError(
toolName,
toolError,
mockConfig,
ToolErrorType.EXECUTION_DENIED,
);
expect(processStderrWriteSpy).toHaveBeenCalledWith(
expect.stringContaining(
'Warning: Tool "test-tool" requires user approval',
),
);
expect(processStderrWriteSpy).toHaveBeenCalledWith(
expect.stringContaining('use the -y flag (YOLO mode)'),
);
expect(processExitSpy).not.toHaveBeenCalled();
});
it('should not show warning when EXECUTION_DENIED in interactive mode', () => {
(mockConfig.getDebugMode as Mock).mockReturnValue(false);
(mockConfig.isInteractive as Mock).mockReturnValue(true);
(
mockConfig.getOutputFormat as ReturnType<typeof vi.fn>
).mockReturnValue(OutputFormat.TEXT);
handleToolError(
toolName,
toolError,
mockConfig,
ToolErrorType.EXECUTION_DENIED,
);
expect(processStderrWriteSpy).not.toHaveBeenCalled();
expect(processExitSpy).not.toHaveBeenCalled();
});
it('should not show warning when EXECUTION_DENIED in JSON mode', () => {
(mockConfig.getDebugMode as Mock).mockReturnValue(false);
(mockConfig.isInteractive as Mock).mockReturnValue(false);
(
mockConfig.getOutputFormat as ReturnType<typeof vi.fn>
).mockReturnValue(OutputFormat.JSON);
handleToolError(
toolName,
toolError,
mockConfig,
ToolErrorType.EXECUTION_DENIED,
);
expect(processStderrWriteSpy).not.toHaveBeenCalled();
expect(processExitSpy).not.toHaveBeenCalled();
});
it('should not show warning for non-EXECUTION_DENIED errors', () => {
(mockConfig.getDebugMode as Mock).mockReturnValue(false);
(mockConfig.isInteractive as Mock).mockReturnValue(false);
(
mockConfig.getOutputFormat as ReturnType<typeof vi.fn>
).mockReturnValue(OutputFormat.TEXT);
handleToolError(
toolName,
toolError,
mockConfig,
ToolErrorType.FILE_NOT_FOUND,
);
expect(processStderrWriteSpy).not.toHaveBeenCalled();
expect(processExitSpy).not.toHaveBeenCalled();
});
});
});
describe('handleCancellationError', () => {

View File

@@ -11,6 +11,7 @@ import {
parseAndFormatApiError,
FatalTurnLimitedError,
FatalCancellationError,
ToolErrorType,
} from '@qwen-code/qwen-code-core';
export function getErrorMessage(error: unknown): string {
@@ -102,10 +103,24 @@ export function handleToolError(
toolName: string,
toolError: Error,
config: Config,
_errorCode?: string | number,
errorCode?: string | number,
resultDisplay?: string,
): void {
// Always just log to stderr; JSON/streaming formatting happens in the tool_result block elsewhere
// Check if this is a permission denied error in non-interactive mode
const isExecutionDenied = errorCode === ToolErrorType.EXECUTION_DENIED;
const isNonInteractive = !config.isInteractive();
const isTextMode = config.getOutputFormat() === OutputFormat.TEXT;
// Show warning for permission denied errors in non-interactive text mode
if (isExecutionDenied && isNonInteractive && isTextMode) {
const warningMessage =
`Warning: Tool "${toolName}" requires user approval but cannot execute in non-interactive mode.\n` +
`To enable automatic tool execution, use the -y flag (YOLO mode):\n` +
`Example: qwen -p 'your prompt' -y\n\n`;
process.stderr.write(warningMessage);
}
// Always log detailed error in debug mode
if (config.getDebugMode()) {
console.error(
`Error executing tool ${toolName}: ${resultDisplay || toolError.message}`,

View File

@@ -0,0 +1,133 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import {
AuthType,
type ContentGeneratorConfig,
type ContentGeneratorConfigSources,
resolveModelConfig,
type ModelConfigSourcesInput,
} from '@qwen-code/qwen-code-core';
import type { Settings } from '../config/settings.js';
export interface CliGenerationConfigInputs {
argv: {
model?: string | undefined;
openaiApiKey?: string | undefined;
openaiBaseUrl?: string | undefined;
openaiLogging?: boolean | undefined;
openaiLoggingDir?: string | undefined;
};
settings: Settings;
selectedAuthType: AuthType | undefined;
/**
* Injectable env for testability. Defaults to process.env at callsites.
*/
env?: Record<string, string | undefined>;
}
export interface ResolvedCliGenerationConfig {
/** The resolved model id (may be empty string if not resolvable at CLI layer) */
model: string;
/** API key for OpenAI-compatible auth */
apiKey: string;
/** Base URL for OpenAI-compatible auth */
baseUrl: string;
/** The full generation config to pass to core Config */
generationConfig: Partial<ContentGeneratorConfig>;
/** Source attribution for each resolved field */
sources: ContentGeneratorConfigSources;
}
export function getAuthTypeFromEnv(): AuthType | undefined {
if (process.env['OPENAI_API_KEY']) {
return AuthType.USE_OPENAI;
}
if (process.env['QWEN_OAUTH']) {
return AuthType.QWEN_OAUTH;
}
if (process.env['GEMINI_API_KEY']) {
return AuthType.USE_GEMINI;
}
if (process.env['GOOGLE_API_KEY']) {
return AuthType.USE_VERTEX_AI;
}
if (process.env['ANTHROPIC_API_KEY']) {
return AuthType.USE_ANTHROPIC;
}
return undefined;
}
/**
* Unified resolver for CLI generation config.
*
* Precedence (for OpenAI auth):
* - model: argv.model > OPENAI_MODEL > QWEN_MODEL > settings.model.name
* - apiKey: argv.openaiApiKey > OPENAI_API_KEY > settings.security.auth.apiKey
* - baseUrl: argv.openaiBaseUrl > OPENAI_BASE_URL > settings.security.auth.baseUrl
*
* For non-OpenAI auth, only argv.model override is respected at CLI layer.
*/
export function resolveCliGenerationConfig(
inputs: CliGenerationConfigInputs,
): ResolvedCliGenerationConfig {
const { argv, settings, selectedAuthType } = inputs;
const env = inputs.env ?? (process.env as Record<string, string | undefined>);
const authType = selectedAuthType;
const configSources: ModelConfigSourcesInput = {
authType,
cli: {
model: argv.model,
apiKey: argv.openaiApiKey,
baseUrl: argv.openaiBaseUrl,
},
settings: {
model: settings.model?.name,
apiKey: settings.security?.auth?.apiKey,
baseUrl: settings.security?.auth?.baseUrl,
generationConfig: settings.model?.generationConfig as
| Partial<ContentGeneratorConfig>
| undefined,
},
env,
};
const resolved = resolveModelConfig(configSources);
// Log warnings if any
for (const warning of resolved.warnings) {
console.warn(`[modelProviderUtils] ${warning}`);
}
// Resolve OpenAI logging config (CLI-specific, not part of core resolver)
const enableOpenAILogging =
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging
: argv.openaiLogging) ?? false;
const openAILoggingDir =
argv.openaiLoggingDir || settings.model?.openAILoggingDir;
// Build the full generation config
// Note: we merge the resolved config with logging settings
const generationConfig: Partial<ContentGeneratorConfig> = {
...resolved.config,
enableOpenAILogging,
openAILoggingDir,
};
return {
model: resolved.config.model || '',
apiKey: resolved.config.apiKey || '',
baseUrl: resolved.config.baseUrl || '',
generationConfig,
sources: resolved.sources,
};
}

View File

@@ -35,22 +35,33 @@ import {
} from './nonInteractiveHelpers.js';
// Mock dependencies
vi.mock('../services/CommandService.js', () => ({
CommandService: {
create: vi.fn().mockResolvedValue({
getCommands: vi
.fn()
.mockReturnValue([
{ name: 'help' },
{ name: 'commit' },
{ name: 'memory' },
]),
}),
},
}));
vi.mock('../nonInteractiveCliCommands.js', () => ({
getAvailableCommands: vi
.fn()
.mockImplementation(
async (
_config: unknown,
_signal: AbortSignal,
allowedBuiltinCommandNames?: string[],
) => {
const allowedSet = new Set(allowedBuiltinCommandNames ?? []);
const allCommands = [
{ name: 'help', kind: 'built-in' },
{ name: 'commit', kind: 'file' },
{ name: 'memory', kind: 'built-in' },
{ name: 'init', kind: 'built-in' },
{ name: 'summary', kind: 'built-in' },
{ name: 'compress', kind: 'built-in' },
];
vi.mock('../services/BuiltinCommandLoader.js', () => ({
BuiltinCommandLoader: vi.fn().mockImplementation(() => ({})),
// Filter commands: always include file commands, only include allowed built-in commands
return allCommands.filter(
(cmd) =>
cmd.kind === 'file' ||
(cmd.kind === 'built-in' && allowedSet.has(cmd.name)),
);
},
),
}));
vi.mock('../ui/utils/computeStats.js', () => ({
@@ -511,10 +522,12 @@ describe('buildSystemMessage', () => {
});
it('should build system message with all fields', async () => {
const allowedBuiltinCommands = ['init', 'summary', 'compress'];
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
allowedBuiltinCommands,
);
expect(result).toEqual({
@@ -530,7 +543,7 @@ describe('buildSystemMessage', () => {
],
model: 'test-model',
permission_mode: 'auto',
slash_commands: ['commit', 'help', 'memory'],
slash_commands: ['commit', 'compress', 'init', 'summary'],
qwen_code_version: '1.0.0',
agents: [],
});
@@ -546,6 +559,7 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.tools).toEqual([]);
@@ -561,6 +575,7 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.mcp_servers).toEqual([]);
@@ -576,10 +591,37 @@ describe('buildSystemMessage', () => {
config,
'test-session-id',
'auto' as PermissionMode,
['init', 'summary'],
);
expect(result.qwen_code_version).toBe('unknown');
});
it('should only include allowed built-in commands and all file commands', async () => {
const allowedBuiltinCommands = ['init', 'summary'];
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
allowedBuiltinCommands,
);
// Should include: 'commit' (FILE), 'init' (BUILT_IN, allowed), 'summary' (BUILT_IN, allowed)
// Should NOT include: 'help', 'memory', 'compress' (BUILT_IN but not in allowed set)
expect(result.slash_commands).toEqual(['commit', 'init', 'summary']);
});
it('should include only file commands when no built-in commands are allowed', async () => {
const result = await buildSystemMessage(
mockConfig,
'test-session-id',
'auto' as PermissionMode,
[], // Empty array - no built-in commands allowed
);
// Should only include 'commit' (FILE command)
expect(result.slash_commands).toEqual(['commit']);
});
});
describe('createTaskToolProgressHandler', () => {

Some files were not shown because too many files have changed in this diff Show More