Compare commits

..

112 Commits

Author SHA1 Message Date
DragonnZhang
7475ffcbeb refactor(insight): update insight page assets and styles 2026-01-16 14:57:21 +08:00
DragonnZhang
ccd51a6a00 feat: add new insight page with Vite setup 2026-01-16 14:49:40 +08:00
DragonnZhang
a7e14255c3 feat(insight): add insight command and server for personalized programming insights 2026-01-16 13:38:31 +08:00
tanzhenxin
886f914fb3 Merge pull request #1496 from QwenLM/fix/vscode-run
fix(vscode-ide-companion): simplify ELECTRON_RUN_AS_NODE detection and improve README
2026-01-15 09:00:11 +08:00
tanzhenxin
90365af2f8 Merge pull request #1499 from QwenLM/fix/1498
fix: include --acp flag in tool exclusion check
2026-01-15 08:56:58 +08:00
yiliang114
cbef5ffd89 fix: include --acp flag in tool exclusion check
Fixed #1498

The tool exclusion logic only checked --experimental-acp but not --acp,
causing edit, write_file, and run_shell_command to be incorrectly
excluded when VS Code extension uses --acp flag in ACP mode.
2026-01-14 22:49:04 +08:00
yiliang114
5e80e80387 fix(vscode-ide-companion): simplify ELECTRON_RUN_AS_NODE detection and improve README
- Bump version to 0.7.1
- Simplify macOS/Linux terminal launch by always using ELECTRON_RUN_AS_NODE=1
  (all VSCode-like IDEs are Electron-based)
- Update README with marketplace badges, cleaner docs structure
- Fix broken markdown table row

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-14 21:10:19 +08:00
Mingholy
985f65f8fa Merge pull request #1494 from QwenLM/chore/v0.7.1
chore: bump version to 0.7.1
2026-01-14 18:29:59 +08:00
Mingholy
9b9c5fadd5 Merge pull request #1492 from QwenLM/mingholy/fix/loggingContentGenerator-timing-issue
Fix timing issue in LoggingContentGenerator initialization
2026-01-14 18:09:26 +08:00
Mingholy
372c67cad4 Merge pull request #1489 from QwenLM/fix/slow-quit
Reduce slow quit by trimming skills watchers
2026-01-14 18:07:37 +08:00
mingholy.lmh
af3864b5de chore: bump version to 0.7.1
Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
2026-01-14 18:02:43 +08:00
mingholy.lmh
1e3791f30a fix: ci issue 2026-01-14 17:51:00 +08:00
mingholy.lmh
9bf626d051 refactor: streamline initialization of LoggingContentGenerator and update auth type retrieval 2026-01-14 16:44:51 +08:00
mingholy.lmh
a35af6550f fix: timing issue of initialize loggingContentGenerator 2026-01-14 16:17:35 +08:00
tanzhenxin
d6607e134e update 2026-01-14 15:40:53 +08:00
tanzhenxin
9024a41723 Conditional skill manager initialization with improved file watching
Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
2026-01-14 15:22:49 +08:00
yiliang114
bde056b62e Merge branch 'main' of https://github.com/QwenLM/qwen-code into fix/vscode-run 2026-01-14 13:11:58 +08:00
pomelo
ff5ea3c6d7 Merge pull request #1485 from QwenLM/fix-docs
fix: docs
2026-01-14 10:31:55 +08:00
pomelo-nwu
0faaac8fa4 fix: docs 2026-01-14 10:30:03 +08:00
pomelo
c2e62b9122 Merge pull request #1484 from QwenLM/fix-docs
fix: docs errors and add community contacts
2026-01-14 09:20:43 +08:00
pomelo-nwu
f54b62cda3 fix: docs error 2026-01-13 22:02:55 +08:00
pomelo-nwu
9521987a09 feat: update docs 2026-01-13 21:51:34 +08:00
qwen-code-ci-bot
d20f2a41a2 Merge pull request #1483 from QwenLM/release/sdk-typescript/v0.1.3
chore(release): sdk-typescript v0.1.3
2026-01-13 21:13:07 +08:00
github-actions[bot]
e3eccb5987 chore(release): sdk-typescript v0.1.3 2026-01-13 12:59:45 +00:00
Mingholy
22916457cd Merge pull request #1482 from QwenLM/mingholy/test/skip-flaky-e2e-test
Skip flaky permission control test
2026-01-13 20:16:35 +08:00
Mingholy
28bc4e6467 Merge pull request #1480 from QwenLM/mingholy/fix/qwen-oauth-fallback
Fix: Improve qwen-oauth fallback message display
2026-01-13 20:15:25 +08:00
mingholy.lmh
50bf65b10b test: skip flaky & ambigous sdk e2e test case 2026-01-13 20:04:19 +08:00
Mingholy
47c8bc5303 Merge pull request #1478 from QwenLM/mingholy/fix/misc-adjustments
Fix auth type switching and model persistence issues
2026-01-13 19:48:57 +08:00
mingholy.lmh
e70ecdf3a8 fix: improve qwen-oauth fallback message display 2026-01-13 19:40:41 +08:00
tanzhenxin
117af05122 Merge pull request #1386 from tt-a1i/fix/error-message-object-display
fix(cli): improve error message display for object errors
2026-01-13 19:18:16 +08:00
tanzhenxin
557e6397bb Merge pull request #1473 from BlockHand/build-sandbox
feat: Customizing the sandbox environment
2026-01-13 19:07:41 +08:00
刘伟光
f762a62a2e feat: Improve the usage documentation 2026-01-13 18:54:26 +08:00
tanzhenxin
ca12772a28 Merge pull request #1469 from QwenLM/fix/1454-shell-timeout
feat(shell): add optional timeout for foreground commands
2026-01-13 18:25:36 +08:00
tanzhenxin
cec4b831b6 Merge pull request #1447 from xuewenjie123/feature/add-defaultHeaders-support
Feature/add custom headers support
2026-01-13 17:51:10 +08:00
tanzhenxin
74bf72877d Merge branch 'main' into fix/1454-shell-timeout 2026-01-13 17:41:09 +08:00
tanzhenxin
b60ae42d10 Merge pull request #1474 from QwenLM/fix/vscode-run
fix(vscode-ide-companion): Fix cross-platform CLI terminal execution
2026-01-13 17:38:28 +08:00
tanzhenxin
54fd4c22a9 Merge pull request #1460 from QwenLM/feat/support-ipynb-select-code
Support Jupyter Notebook (.ipynb) File Code Selection
2026-01-13 17:37:02 +08:00
tanzhenxin
f3b7c63cd1 Merge pull request #1436 from QwenLM/feat/skills-enhancement
feat(skills): add experimental /skills command + hot reload
2026-01-13 17:36:21 +08:00
tanzhenxin
e4dee3a2b2 Implement proper header merging: customHeaders now merge with default headers instead of replacing them in all content generators
Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
2026-01-13 17:30:54 +08:00
mingholy.lmh
996b9df947 fix: switch auth won't persist fallback default models for qwen-oauth 2026-01-13 17:19:15 +08:00
mingholy.lmh
64291db926 fix: misc issues in qwen-oauth models, sdk cli path resolving.
1. remove `generationConfig`` of qwen-oauth models
2. fix esm issues when sdk trying to spawn cli
2026-01-13 17:19:15 +08:00
tanzhenxin
a8e3b9ebe7 Merge pull request #1411 from niklas-wortmann/jetbrains-docs
docs: add integration guide for JetBrains IDEs
2026-01-13 16:52:10 +08:00
tanzhenxin
5cfc9f4686 Update skill manager and package dependencies
Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
2026-01-13 16:51:36 +08:00
yiliang114
97497457a8 Merge branch 'main' of https://github.com/QwenLM/qwen-code into fix/vscode-run 2026-01-13 14:21:26 +08:00
刘伟光
85473210e5 feat: Customizing the sandbox environment 2026-01-13 10:47:08 +08:00
刘伟光
c0c94bd4fc feat: Customizing the sandbox environment 2026-01-13 10:39:32 +08:00
yiliang114
8111511a89 chore(vscode-ide-companion): add comments under window 2026-01-13 10:19:43 +08:00
xwj02155382
a8eb858f99 refactor: rename defaultHeaders to customHeaders
- Rename defaultHeaders field to customHeaders in ContentGeneratorConfig
- Update MODEL_GENERATION_CONFIG_FIELDS constant
- Update ModelGenerationConfig type definition
- Align naming with documentation and usage across the codebase
2026-01-13 10:14:55 +08:00
pomelo
52d6d1ff13 Merge pull request #1472 from QwenLM/update-vscode-extension-docs
docs(vscode-ide-companion): update vscode extension readme
2026-01-13 09:11:04 +08:00
yiliang114
c845049d26 Merge branch 'main' into fix/vscode-run 2026-01-13 00:27:44 +08:00
Jan-Niklas W.
299b7de030 add image for jetbrains acp configuration 2026-01-12 10:17:25 -06:00
yiliang114
b93bb8bff6 docs(vscode-ide-companion): update vscode extension readme 2026-01-13 00:14:57 +08:00
xwj02155382
adb53a6dc6 refactor: change customHeaders to use priority override instead of merge
- Remove special merge handling for customHeaders in modelConfigResolver
- Update all content generators to use priority override logic
- If customHeaders is defined in modelProvider, use it directly
- Otherwise, use customHeaders from global config or default headers
- Update documentation to reflect the new behavior
- Align customHeaders behavior with other config fields (timeout, maxRetries, etc.)
2026-01-12 18:03:02 +08:00
qwen-code-ci-bot
09196c6e19 Merge pull request #1470 from QwenLM/release/sdk-typescript/v0.1.2
chore(release): sdk-typescript v0.1.2
2026-01-12 16:26:57 +08:00
github-actions[bot]
4bd01d592b chore(release): sdk-typescript v0.1.2 2026-01-12 08:25:25 +00:00
tanzhenxin
6917031128 feat(shell): add optional timeout for foreground commands
Adds a timeout parameter (validated and schema-exposed) and improves abort messaging by distinguishing user cancellation from timeout.

Resolves #1454
2026-01-12 16:23:11 +08:00
xwj02155382
b33525183f Merge branch 'main' of github.com:QwenLM/qwen-code into feature/add-defaultHeaders-support 2026-01-12 15:52:23 +08:00
Mingholy
1aed5ce858 Merge pull request #1462 from QwenLM/mingholy/feat/sdk-skills
fix: SDK release workflow and stability improvements
2026-01-12 15:06:55 +08:00
Mingholy
bad5b0485d Merge pull request #1457 from liqiongyu/fix/1118-auth-fetch-failed-diagnostics
fix(core): improve OAuth fetch-failed diagnostics
2026-01-12 15:06:16 +08:00
tanzhenxin
5a6e5bb452 Merge pull request #1427 from liqiongyu/fix/1333-legacy-settings-alias
fix(cli): warn on deprecated/unknown settings keys
2026-01-12 14:43:27 +08:00
tanzhenxin
5f8e1ebc94 chore(settings): update legacy settings alias implementation and tests
Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
2026-01-12 14:29:40 +08:00
mingholy.lmh
9670456a56 fix: simplify JavaScript runtime detection to fix powershell spawning process issue 2026-01-12 13:42:24 +08:00
liqoingyu
4c186e7c92 refactor(core): extract fetch error troubleshooting 2026-01-12 12:00:01 +08:00
tanzhenxin
2f6b0b233a Merge pull request #1464 from xuewenjie123/fix/windows-background-terminal-execute-x
fix(shell): prevent console window flash on Windows for foreground tasks
2026-01-12 11:48:10 +08:00
xuewenjie
9a8ce605c5 test: update shellExecutionService test for Windows spawn config changes 2026-01-12 11:22:54 +08:00
tanzhenxin
afc693a4ab Merge pull request #1453 from liqiongyu/fix/1359-sandbox-uidgid-default-linux
fix(cli): default sandbox UID/GID mapping on Linux
2026-01-12 11:08:47 +08:00
xuewenjie
7173cba844 fix(shell): prevent console window flash on Windows for foreground tasks 2026-01-12 11:04:05 +08:00
yiliang114
ec8cccafd7 Merge branch 'main' of https://github.com/QwenLM/qwen-code into fix/vscode-run 2026-01-12 10:57:56 +08:00
liqoingyu
8c56b612fb fix(cli): warn on deprecated/unknown settings keys 2026-01-12 10:49:37 +08:00
mingholy.lmh
7d40e1470c chore: add CODEOWNERS for SDK TypeScript package and remove legacy CLI path alias 2026-01-11 21:24:45 +08:00
yiliang114
b0e561ca73 chore(vscode-ide-companion/open-files-manager): update copyright headers to Qwen Team 2026-01-11 00:25:31 +08:00
yiliang114
563d68ad5b feat(vscode-ide-companion/services): add IPYNB code selection support and refactor OpenFilesManager 2026-01-10 23:51:51 +08:00
yiliang114
82c524f87d fix(vscode-ide-companion): window qwen code run command 2026-01-10 22:30:10 +08:00
yiliang114
df75aa06b6 fix(vscode-ide-companion): window qwen code run command 2026-01-10 22:08:14 +08:00
yiliang114
8ea9871d23 fix(vscode-ide-companion): fix positional argument problem due to special handling for Electron app of yargs
- Remove isNodeAvailable function and related child_process import
- Update command execution logic to properly handle ELECTRON_RUN_AS_NODE
- Add proper quoting mechanisms for different platforms (PowerShell vs POSIX)
- Bump version from 0.6.1 to 0.6.2
2026-01-10 19:52:25 +08:00
liqoingyu
097482910e fix(core): improve OAuth fetch-failed diagnostics 2026-01-10 16:49:56 +08:00
liqoingyu
9b78c17638 fix(cli): default sandbox UID/GID mapping on Linux
Fixes #1359.

Default container sandboxing on Linux to use host UID/GID so qwen runs under a user that matches the mounted home directory and persists auth/settings in ~/.qwen.

Also gate the informational log behind DEBUG/DEBUG_MODE and clarify docs about Linux UID/GID mapping and ~/.qwen persistence.
2026-01-10 14:31:08 +08:00
tanzhenxin
bde31d1261 Merge pull request #1448 from QwenLM/fix/openai-compatible
fix(core): handle missing delta in OpenAI stream chunks
2026-01-09 21:38:31 +08:00
xwj02155382
2d1934bf2f docs: add defaultHeaders documentation to settings.md
- Add defaultHeaders to model.generationConfig description
- Add defaultHeaders example in model.generationConfig
- Add defaultHeaders example in modelProviders configuration
- Document defaultHeaders merge strategy in generation config layering
- Explain use cases: request tracing, monitoring, API gateway routing
2026-01-09 18:15:21 +08:00
mingholy.lmh
7f15256eba fix: improve release workflow 2026-01-09 18:00:01 +08:00
mingholy.lmh
587fc82fbc chore: update version to 0.1.1 in package.json 2026-01-09 17:54:59 +08:00
tanzhenxin
cba9c424eb fix(core): handle missing delta in OpenAI stream chunks
Some OpenAI-compatible providers occasionally emit chat.completion.chunk choices
without a delta object. Guard optional reasoning_content access and add a
regression test to ensure chunk conversion does not throw.
2026-01-09 17:41:07 +08:00
xwj02155382
1b7418f91f docs: 添加 defaultHeaders 功能完整实现文档
- 整合当前分支相对于 main 的所有改动(10 个文件)
- 包含两个 commit 的完整改动详情
- 删除测试文件 test-defaultHeaders.cjs 和 verify-defaultHeaders.cjs
- 删除旧的不完整文档
- 新增完整的功能文档,包含代码改动说明、配置示例、使用指南等
2026-01-09 17:31:01 +08:00
yiliang114
b7828ac765 Merge branch 'main' into fix/vscode-run 2026-01-09 16:39:12 +08:00
mingholy.lmh
8705f734d0 fix: improve bundled CLI path finding and support --experimental-skills 2026-01-09 16:32:55 +08:00
xwj02155382
0bd17a2406 feat: 支持从 modelProviders 配置中读取 defaultHeaders
- 修改 ModelConfigSourcesInput 接口,将 modelProvider 类型从 ResolvedModelConfig 改为 ModelProviderConfig
- 在 resolveCliGenerationConfig 中添加从 settings.modelProviders 查找 modelProvider 的逻辑
- 使用类型别名避免与 subagents/types.ts 中的 ModelConfig 冲突
- 修复测试文件中的类型错误
- 现在可以通过 modelProviders 配置为特定模型设置 defaultHeaders
2026-01-09 16:08:59 +08:00
xwj02155382
59be5163fd feat: add defaultHeaders support for all content generators
- Add defaultHeaders field to ContentGeneratorConfig and ModelGenerationConfig
- Implement defaultHeaders merging logic in resolveGenerationConfig
- Support defaultHeaders in OpenAI providers (DefaultOpenAICompatibleProvider, DashScopeOpenAICompatibleProvider)
- Support defaultHeaders in Gemini and Anthropic content generators
- Add defaultHeaders to MODEL_GENERATION_CONFIG_FIELDS
- Update resolveQwenOAuthConfig to support modelProvider.generationConfig

Configuration hierarchy:
- L1: modelProvider.generationConfig.defaultHeaders (high priority)
- L2: settings.model.generationConfig.defaultHeaders (low priority)
- Merge strategy: high priority headers override low priority headers with same name
2026-01-09 15:56:32 +08:00
tanzhenxin
95efe89ac0 fix positional argument problem due to special handling for Electron app of yargs 2026-01-09 14:49:57 +08:00
tanzhenxin
6714f9ce3c Merge pull request #1351 from xuewenjie123/fix/editor-launch-issues
fix: resolve external editor launch failure on macOS and Windows
2026-01-09 11:07:43 +08:00
tanzhenxin
155d1f9518 Merge pull request #1428 from liqiongyu/fix/727-memory-show-respects-context-file
fix(cli): /memory show respects context.fileName
2026-01-09 10:29:27 +08:00
Mingholy
f776075aa8 Merge pull request #1439 from QwenLM/mingholy/fix/multi-provider-cold-start
fix: multi provider cold start issue
2026-01-08 18:54:42 +08:00
tanzhenxin
d86903ced5 Update skill tool descriptions 2026-01-08 16:43:04 +08:00
tanzhenxin
a47bdc0b06 fix(cli): guard experimental skills config lookup 2026-01-08 15:54:43 +08:00
tanzhenxin
0e769e100b Added automatic skill hot-reload 2026-01-08 15:43:46 +08:00
tanzhenxin
b5bcc07223 Add skills list display to CLI interface 2026-01-08 14:45:48 +08:00
tanzhenxin
9653dc90d5 Add skills command with completion support 2026-01-08 14:23:13 +08:00
yiliang114
052337861b Fix #1416 2026-01-07 21:05:49 +08:00
liqoingyu
0a0ab64da0 test(cli): make memoryCommand path assertions cross-platform 2026-01-07 20:28:28 +08:00
liqoingyu
8a15017593 fix(cli): /memory show respects context.fileName 2026-01-07 20:07:41 +08:00
tanzhenxin
f8aecb2631 only allow shell execution in current working directory for skills 2026-01-07 19:29:49 +08:00
xwj02155382
3d059b71de refactor: improve IDE context format and editor command error handling
- Change IDE context from JSON to plain text format for better LLM comprehension
  - Remove JSON.stringify() and code fences from getIdeContextParts()
  - Use human-readable format: 'Active file:', 'Cursor: line X, character Y'
  - Apply same format to delta updates: 'Files opened:', 'Files closed:', etc.
  - Update all related tests to match new plain text format

- Fix editor command fallback logic in useLaunchEditor
  - Throw clear error when no editor command is available
  - Remove meaningless fallback to last command in list
  - Provide helpful error message with tried commands and solution
2026-01-07 16:34:12 +08:00
yiliang114
361492247e fix(vscode-ide-companion): fix cross-platform CLI execution in terminal
- Add platform.ts utility with isWindows constant
- Fix Windows PowerShell execution with & call operator
- Fix macOS Electron helper with ELECTRON_RUN_AS_NODE=1
- Prefer system Node.js, fallback to VS Code runtime
- Refactor platform detection in acpMessageHandler and acpSessionManager

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-07 01:35:05 +08:00
xwj02155382
87dc618a21 revert: restore original editor command fallback logic for zed support
- Revert getExecutableCommand to use original fallback logic
- Revert getDiffCommand to use slice(0, -1) pattern
- Maintain proper support for zed editor with multiple command options ['zed', 'zeditor']
- Keep the caching optimization for commandExists
2026-01-06 11:09:29 +08:00
xwj02155382
94a5d828bd refactor: optimize commandExists with caching and simplify editor command logic
- Add caching layer for commandExists in useLaunchEditor.ts to avoid repeated execSync calls
- Import commandExists from core and wrap it with cache in CLI layer
- Simplify getExecutableCommand and getDiffCommand logic to remove redundant fallback
- For editors with single command, directly use first command instead of meaningless self-fallback
- Maintain support for editors with multiple commands (e.g., zed with 'zed' and 'zeditor')
2026-01-06 11:05:03 +08:00
Jan-Niklas W.
824ca056a4 docs: add integration guide for JetBrains IDEs 2026-01-05 14:07:37 -06:00
Tu Shaokun
4f664d00ac fix: handle edge case where JSON.stringify returns undefined
Add fallback to String() when JSON.stringify returns undefined,
which can happen with objects that have toJSON() returning undefined.
2026-01-01 10:10:24 +08:00
Tu Shaokun
7fdebe8fe6 fix(cli): improve error message display for object errors
Previously, when a tool execution failed with an error object (not an
Error instance), getErrorMessage() would return '[object Object]',
hiding useful error information from users.

This change improves getErrorMessage() to:
1. Extract the 'message' property from error-like objects
2. JSON.stringify plain objects to show their full content
3. Fall back to String() only when JSON.stringify fails

Fixes #1338
2026-01-01 09:56:27 +08:00
xwj02155382
fd41309ed2 refactor: share editorCommands between core and cli packages
- Export editorCommands from @qwen-code/qwen-code-core
- Remove duplicate editorCommands definition in useLaunchEditor
- Import shared editorCommands configuration in CLI package
- Reduces code duplication and ensures consistency

This change makes the editor configuration a single source of truth,
making it easier to maintain and add new editors in the future.
2025-12-26 16:03:05 +08:00
xwj02155382
48bc0f35d7 perf: add cache for commandExists to fix CI timeout
- Add commandExistsCache Map to avoid repeated execSync calls
- Cache command existence check results to improve test performance
- Fix CI test timeout issue (was timing out after 7m)

The commandExists() function was being called frequently during tests,
causing slow test execution due to repeated system command calls.
By caching the results, we significantly improve performance in test
environments while maintaining the same functionality.
2025-12-26 13:52:37 +08:00
xwj02155382
e30c2dbe23 Merge branch 'fix/editor-launch-issues' of https://github.com/xuewenjie123/qwen-code into fix/editor-launch-issues 2025-12-26 11:22:22 +08:00
xwj02155382
e9204ecba9 fix: resolve editor launch issue on macOS for subagent editing
- Fixed ENOENT error when launching external editors (VS Code, etc.)
- Added proper editor command mapping (vscode -> code, neovim -> nvim, etc.)
- Implemented command existence check to find available editor executable
- Supports both macOS and Windows platform-specific commands

Fixes #1180
2025-12-26 11:11:24 +08:00
xwj02155382
f24bda3d7b fix: resolve editor launch issue on macOS for subagent editing
- Fixed ENOENT error when launching external editors (VS Code, etc.)
- Added proper editor command mapping (vscode -> code, neovim -> nvim, etc.)
- Implemented command existence check to find available editor executable
- Supports both macOS and Windows platform-specific commands

Fixes #1180
2025-12-26 10:17:52 +08:00
116 changed files with 37834 additions and 1368 deletions

3
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1,3 @@
* @tanzhenxin @DennisYu07 @gwinthis @LaZzyMan @pomelo-nwu @Mingholy
# SDK TypeScript package changes require review from Mingholy
packages/sdk-typescript/** @Mingholy

View File

@@ -241,7 +241,7 @@ jobs:
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'pr'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
GITHUB_TOKEN: '${{ secrets.CI_BOT_PAT }}'
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
@@ -258,26 +258,15 @@ jobs:
echo "PR_URL=${pr_url}" >> "${GITHUB_OUTPUT}"
- name: 'Wait for CI checks to complete'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
run: |-
set -euo pipefail
echo "Waiting for CI checks to complete..."
gh pr checks "${PR_URL}" --watch --interval 30
- name: 'Enable auto-merge for release PR'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
GITHUB_TOKEN: '${{ secrets.CI_BOT_PAT }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
run: |-
set -euo pipefail
gh pr merge "${PR_URL}" --merge --auto
gh pr merge "${PR_URL}" --merge --auto --delete-branch
- name: 'Create Issue on Failure'
if: |-

View File

@@ -25,7 +25,7 @@ Qwen Code is an open-source AI agent for the terminal, optimized for [Qwen3-Code
- **OpenAI-compatible, OAuth free tier**: use an OpenAI-compatible API, or sign in with Qwen OAuth to get 2,000 free requests/day.
- **Open-source, co-evolving**: both the framework and the Qwen3-Coder model are open-source—and they ship and evolve together.
- **Agentic workflow, feature-rich**: rich built-in tools (Skills, SubAgents, Plan Mode) for a full agentic workflow and a Claude Code-like experience.
- **Terminal-first, IDE-friendly**: built for developers who live in the command line, with optional integration for VS Code and Zed.
- **Terminal-first, IDE-friendly**: built for developers who live in the command line, with optional integration for VS Code, Zed, and JetBrains IDEs.
## Installation
@@ -137,10 +137,11 @@ Use `-p` to run Qwen Code without the interactive UI—ideal for scripts, automa
#### IDE integration
Use Qwen Code inside your editor (VS Code and Zed):
Use Qwen Code inside your editor (VS Code, Zed, and JetBrains IDEs):
- [Use in VS Code](https://qwenlm.github.io/qwen-code-docs/en/users/integration-vscode/)
- [Use in Zed](https://qwenlm.github.io/qwen-code-docs/en/users/integration-zed/)
- [Use in JetBrains IDEs](https://qwenlm.github.io/qwen-code-docs/en/users/integration-jetbrains/)
#### TypeScript SDK
@@ -200,6 +201,11 @@ If you encounter issues, check the [troubleshooting guide](https://qwenlm.github
To report a bug from within the CLI, run `/bug` and include a short title and repro steps.
## Connect with Us
- Discord: https://discord.gg/ycKBjdNd
- Dingtalk: https://qr.dingtalk.com/action/joingroup?code=v1,k1,+FX6Gf/ZDlTahTIRi8AEQhIaBlqykA0j+eBKKdhLeAE=&_dt_no_comment=1&origin=1
## Acknowledgments
This project is based on [Google Gemini CLI](https://github.com/google-gemini/gemini-cli). We acknowledge and appreciate the excellent work of the Gemini CLI team. Our main contribution focuses on parser-level adaptations to better support Qwen-Coder models.

View File

@@ -10,4 +10,5 @@ export default {
'web-search': 'Web Search',
memory: 'Memory',
'mcp-server': 'MCP Servers',
sandbox: 'Sandboxing',
};

View File

@@ -0,0 +1,90 @@
## Customizing the sandbox environment (Docker/Podman)
### Currently, the project does not support the use of the BUILD_SANDBOX function after installation through the npm package
1. To build a custom sandbox, you need to access the build scripts (scripts/build_sandbox.js) in the source code repository.
2. These build scripts are not included in the packages released by npm.
3. The code contains hard-coded path checks that explicitly reject build requests from non-source code environments.
If you need extra tools inside the container (e.g., `git`, `python`, `rg`), create a custom Dockerfile, The specific operation is as follows
#### 1、Clone qwen code project first, https://github.com/QwenLM/qwen-code.git
#### 2、Make sure you perform the following operation in the source code repository directory
```bash
# 1. First, install the dependencies of the project
npm install
# 2. Build the Qwen Code project
npm run build
# 3. Verify that the dist directory has been generated
ls -la packages/cli/dist/
# 4. Create a global link in the CLI package directory
cd packages/cli
npm link
# 5. Verification link (it should now point to the source code)
which qwen
# Expected output: /xxx/xxx/.nvm/versions/node/v24.11.1/bin/qwen
# Or similar paths, but it should be a symbolic link
# 6. For details of the symbolic link, you can see the specific source code path
ls -la $(dirname $(which qwen))/../lib/node_modules/@qwen-code/qwen-code
# It should show that this is a symbolic link pointing to your source code directory
# 7.Test the version of qwen
qwen -v
# npm link will overwrite the global qwen. To avoid being unable to distinguish the same version number, you can uninstall the global CLI first
```
#### 3、Create your sandbox Dockerfile under the root directory of your own project
- Path: `.qwen/sandbox.Dockerfile`
- Official mirror image address:https://github.com/QwenLM/qwen-code/pkgs/container/qwen-code
```bash
# Based on the official Qwen sandbox image (It is recommended to explicitly specify the version)
FROM ghcr.io/qwenlm/qwen-code:sha-570ec43
# Add your extra tools here
RUN apt-get update && apt-get install -y \
git \
python3 \
ripgrep
```
#### 4、Create the first sandbox image under the root directory of your project
```bash
GEMINI_SANDBOX=docker BUILD_SANDBOX=1 qwen -s
# Observe whether the sandbox version of the tool you launched is consistent with the version of your custom image. If they are consistent, the startup will be successful
```
This builds a project-specific image based on the default sandbox image.
#### Remove npm link
- If you want to restore the official CLI of qwen, please remove the npm link
```bash
# Method 1: Unlink globally
npm unlink -g @qwen-code/qwen-code
# Method 2: Remove it in the packages/cli directory
cd packages/cli
npm unlink
# Verification has been lifted
which qwen
# It should display "qwen not found"
# Reinstall the global version if necessary
npm install -g @qwen-code/qwen-code
# Verification Recovery
which qwen
qwen --version
```

View File

@@ -12,6 +12,7 @@ export default {
},
'integration-vscode': 'Visual Studio Code',
'integration-zed': 'Zed IDE',
'integration-jetbrains': 'JetBrains IDEs',
'integration-github-action': 'Github Actions',
'Code with Qwen Code': {
type: 'separator',

View File

@@ -104,7 +104,7 @@ Settings are organized into categories. All settings should be placed within the
| `model.name` | string | The Qwen model to use for conversations. | `undefined` |
| `model.maxSessionTurns` | number | Maximum number of user/model/tool turns to keep in a session. -1 means unlimited. | `-1` |
| `model.summarizeToolOutput` | object | Enables or disables the summarization of tool output. You can specify the token budget for the summarization using the `tokenBudget` setting. Note: Currently only the `run_shell_command` tool is supported. For example `{"run_shell_command": {"tokenBudget": 2000}}` | `undefined` |
| `model.generationConfig` | object | Advanced overrides passed to the underlying content generator. Supports request controls such as `timeout`, `maxRetries`, and `disableCacheControl`, along with fine-tuning knobs under `samplingParams` (for example `temperature`, `top_p`, `max_tokens`). Leave unset to rely on provider defaults. | `undefined` |
| `model.generationConfig` | object | Advanced overrides passed to the underlying content generator. Supports request controls such as `timeout`, `maxRetries`, `disableCacheControl`, and `customHeaders` (custom HTTP headers for API requests), along with fine-tuning knobs under `samplingParams` (for example `temperature`, `top_p`, `max_tokens`). Leave unset to rely on provider defaults. | `undefined` |
| `model.chatCompression.contextPercentageThreshold` | number | Sets the threshold for chat history compression as a percentage of the model's total token limit. This is a value between 0 and 1 that applies to both automatic compression and the manual `/compress` command. For example, a value of `0.6` will trigger compression when the chat history exceeds 60% of the token limit. Use `0` to disable compression entirely. | `0.7` |
| `model.skipNextSpeakerCheck` | boolean | Skip the next speaker check. | `false` |
| `model.skipLoopDetection` | boolean | Disables loop detection checks. Loop detection prevents infinite loops in AI responses but can generate false positives that interrupt legitimate workflows. Enable this option if you experience frequent false positive loop detection interruptions. | `false` |
@@ -114,12 +114,16 @@ Settings are organized into categories. All settings should be placed within the
**Example model.generationConfig:**
```
```json
{
"model": {
"generationConfig": {
"timeout": 60000,
"disableCacheControl": false,
"customHeaders": {
"X-Request-ID": "req-123",
"X-User-ID": "user-456"
},
"samplingParams": {
"temperature": 0.2,
"top_p": 0.8,
@@ -130,6 +134,8 @@ Settings are organized into categories. All settings should be placed within the
}
```
The `customHeaders` field allows you to add custom HTTP headers to all API requests. This is useful for request tracing, monitoring, API gateway routing, or when different models require different headers. If `customHeaders` is defined in `modelProviders[].generationConfig.customHeaders`, it will be used directly; otherwise, headers from `model.generationConfig.customHeaders` will be used. No merging occurs between the two levels.
**model.openAILoggingDir examples:**
- `"~/qwen-logs"` - Logs to `~/qwen-logs` directory
@@ -154,6 +160,10 @@ Use `modelProviders` to declare curated model lists per auth type that the `/mod
"generationConfig": {
"timeout": 60000,
"maxRetries": 3,
"customHeaders": {
"X-Model-Version": "v1.0",
"X-Request-Priority": "high"
},
"samplingParams": { "temperature": 0.2 }
}
}
@@ -215,7 +225,7 @@ Per-field precedence for `generationConfig`:
3. `settings.model.generationConfig`
4. Content-generator defaults (`getDefaultGenerationConfig` for OpenAI, `getParameterValue` for Gemini, etc.)
`samplingParams` is treated atomically; provider values replace the entire object. Defaults from the content generator apply last so each provider retains its tuned baseline.
`samplingParams` and `customHeaders` are both treated atomically; provider values replace the entire object. If `modelProviders[].generationConfig` defines these fields, they are used directly; otherwise, values from `model.generationConfig` are used. No merging occurs between provider and global configuration levels. Defaults from the content generator apply last so each provider retains its tuned baseline.
##### Selection persistence and recommendations
@@ -470,7 +480,7 @@ Arguments passed directly when running the CLI can override other configurations
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](../../developers/development/telemetry) for more information. |
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
| `--checkpointing` | | Enables [checkpointing](../features/checkpointing). | | |
| `--acp` | | Enables ACP mode (Agent Control Protocol). Useful for IDE/editor integrations like [Zed](../integration-zed). | | Stable. Replaces the deprecated `--experimental-acp` flag. |
| `--acp` | | Enables ACP mode (Agent Client Protocol). Useful for IDE/editor integrations like [Zed](../integration-zed). | | Stable. Replaces the deprecated `--experimental-acp` flag. |
| `--experimental-skills` | | Enables experimental [Agent Skills](../features/skills) (registers the `skill` tool and loads Skills from `.qwen/skills/` and `~/.qwen/skills/`). | | Experimental. |
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |

View File

@@ -59,6 +59,7 @@ Commands for managing AI tools and models.
| ---------------- | --------------------------------------------- | --------------------------------------------- |
| `/mcp` | List configured MCP servers and tools | `/mcp`, `/mcp desc` |
| `/tools` | Display currently available tool list | `/tools`, `/tools desc` |
| `/skills` | List and run available skills (experimental) | `/skills`, `/skills <name>` |
| `/approval-mode` | Change approval mode for tool usage | `/approval-mode <mode (auto-edit)> --project` |
| →`plan` | Analysis only, no execution | Secure review |
| →`default` | Require approval for edits | Daily use |

View File

@@ -49,6 +49,8 @@ Cross-platform sandboxing with complete process isolation.
By default, Qwen Code uses a published sandbox image (configured in the CLI package) and will pull it as needed.
The container sandbox mounts your workspace and your `~/.qwen` directory into the container so auth and settings persist between runs.
**Best for**: Strong isolation on any OS, consistent tooling inside a known image.
### Choosing a method
@@ -157,22 +159,13 @@ For a working allowlist-style proxy example, see: [Example Proxy Script](/develo
## Linux UID/GID handling
The sandbox automatically handles user permissions on Linux. Override these permissions with:
On Linux, Qwen Code defaults to enabling UID/GID mapping so the sandbox runs as your user (and reuses the mounted `~/.qwen`). Override with:
```bash
export SANDBOX_SET_UID_GID=true # Force host UID/GID
export SANDBOX_SET_UID_GID=false # Disable UID/GID mapping
```
## Customizing the sandbox environment (Docker/Podman)
If you need extra tools inside the container (e.g., `git`, `python`, `rg`), create a custom Dockerfile:
- Path: `.qwen/sandbox.Dockerfile`
- Then run with: `BUILD_SANDBOX=1 qwen -s ...`
This builds a project-specific image based on the default sandbox image.
## Troubleshooting
### Common issues

View File

@@ -27,6 +27,14 @@ Agent Skills package expertise into discoverable capabilities. Each Skill consis
Skills are **model-invoked** — the model autonomously decides when to use them based on your request and the Skills description. This is different from slash commands, which are **user-invoked** (you explicitly type `/command`).
If you want to invoke a Skill explicitly, use the `/skills` slash command:
```bash
/skills <skill-name>
```
The `/skills` command is only available when you run with `--experimental-skills`. Use autocomplete to browse available Skills and descriptions.
### Benefits
- Extend Qwen Code for your workflows

View File

@@ -0,0 +1,57 @@
# JetBrains IDEs
> JetBrains IDEs provide native support for AI coding assistants through the Agent Client Protocol (ACP). This integration allows you to use Qwen Code directly within your JetBrains IDE with real-time code suggestions.
### Features
- **Native agent experience**: Integrated AI assistant panel within your JetBrains IDE
- **Agent Client Protocol**: Full support for ACP enabling advanced IDE interactions
- **Symbol management**: #-mention files to add them to the conversation context
- **Conversation history**: Access to past conversations within the IDE
### Requirements
- JetBrains IDE with ACP support (IntelliJ IDEA, WebStorm, PyCharm, etc.)
- Qwen Code CLI installed
### Installation
1. Install Qwen Code CLI:
```bash
npm install -g @qwen-code/qwen-code
```
2. Open your JetBrains IDE and navigate to AI Chat tool window.
3. Click the 3-dot menu in the upper-right corner and select **Configure ACP Agent** and configure Qwen Code with the following settings:
```json
{
"agent_servers": {
"qwen": {
"command": "/path/to/qwen",
"args": ["--acp"],
"env": {}
}
}
}
```
4. The Qwen Code agent should now be available in the AI Assistant panel
![Qwen Code in JetBrains AI Chat](https://img.alicdn.com/imgextra/i3/O1CN01ZxYel21y433Ci6eg0_!!6000000006524-2-tps-2774-1494.png)
## Troubleshooting
### Agent not appearing
- Run `qwen --version` in terminal to verify installation
- Ensure your JetBrains IDE version supports ACP
- Restart your JetBrains IDE
### Qwen Code not responding
- Check your internet connection
- Verify CLI works by running `qwen` in terminal
- [File an issue on GitHub](https://github.com/qwenlm/qwen-code/issues) if the problem persists

View File

@@ -18,23 +18,17 @@
### Requirements
- VS Code 1.98.0 or higher
- VS Code 1.85.0 or higher
### Installation
1. Install Qwen Code CLI:
```bash
npm install -g qwen-code
```
2. Download and install the extension from the [Visual Studio Code Extension Marketplace](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion).
Download and install the extension from the [Visual Studio Code Extension Marketplace](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion).
## Troubleshooting
### Extension not installing
- Ensure you have VS Code 1.98.0 or higher
- Ensure you have VS Code 1.85.0 or higher
- Check that VS Code has permission to install extensions
- Try installing directly from the Marketplace website

View File

@@ -1,6 +1,6 @@
# Zed Editor
> Zed Editor provides native support for AI coding assistants through the Agent Control Protocol (ACP). This integration allows you to use Qwen Code directly within Zed's interface with real-time code suggestions.
> Zed Editor provides native support for AI coding assistants through the Agent Client Protocol (ACP). This integration allows you to use Qwen Code directly within Zed's interface with real-time code suggestions.
![Zed Editor Overview](https://img.alicdn.com/imgextra/i1/O1CN01aAhU311GwEoNh27FP_!!6000000000686-2-tps-3024-1898.png)
@@ -20,9 +20,9 @@
1. Install Qwen Code CLI:
```bash
npm install -g qwen-code
```
```bash
npm install -g @qwen-code/qwen-code
```
2. Download and install [Zed Editor](https://zed.dev/)

View File

@@ -159,7 +159,7 @@ Qwen Code will:
### Test out other common workflows
There are a number of ways to work with Claude:
There are a number of ways to work with Qwen Code:
**Refactor code**

View File

@@ -9,11 +9,18 @@ This guide provides solutions to common issues and debugging tips, including top
## Authentication or login errors
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY` or `unable to get local issuer certificate`**
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY`, `UNABLE_TO_VERIFY_LEAF_SIGNATURE`, or `unable to get local issuer certificate`**
- **Cause:** You may be on a corporate network with a firewall that intercepts and inspects SSL/TLS traffic. This often requires a custom root CA certificate to be trusted by Node.js.
- **Solution:** Set the `NODE_EXTRA_CA_CERTS` environment variable to the absolute path of your corporate root CA certificate file.
- Example: `export NODE_EXTRA_CA_CERTS=/path/to/your/corporate-ca.crt`
- **Error: `Device authorization flow failed: fetch failed`**
- **Cause:** Node.js could not reach Qwen OAuth endpoints (often a proxy or SSL/TLS trust issue). When available, Qwen Code will also print the underlying error cause (for example: `UNABLE_TO_VERIFY_LEAF_SIGNATURE`).
- **Solution:**
- Confirm you can access `https://chat.qwen.ai` from the same machine/network.
- If you are behind a proxy, set it via `qwen --proxy <url>` (or the `proxy` setting in `settings.json`).
- If your network uses a corporate TLS inspection CA, set `NODE_EXTRA_CA_CERTS` as described above.
- **Issue: Unable to display UI after authentication failure**
- **Cause:** If authentication fails after selecting an authentication type, the `security.auth.selectedType` setting may be persisted in `settings.json`. On restart, the CLI may get stuck trying to authenticate with the failed auth type and fail to display the UI.
- **Solution:** Clear the `security.auth.selectedType` configuration item in your `settings.json` file:

View File

@@ -26,6 +26,7 @@ export default tseslint.config(
'dist/**',
'docs-site/.next/**',
'docs-site/out/**',
'packages/cli/src/services/insight-page/**',
],
},
eslint.configs.recommended,

View File

@@ -831,7 +831,7 @@ describe('Permission Control (E2E)', () => {
TEST_TIMEOUT,
);
it(
it.skip(
'should execute dangerous commands without confirmation',
async () => {
const q = query({

21
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.7.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.7.1",
"workspaces": [
"packages/*"
],
@@ -6216,10 +6216,7 @@
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz",
"integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"readdirp": "^4.0.1"
},
@@ -13882,10 +13879,7 @@
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz",
"integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"engines": {
"node": ">= 14.18.0"
},
@@ -17316,7 +17310,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.7.1",
"dependencies": {
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
@@ -17953,7 +17947,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.7.0",
"version": "0.7.1",
"hasInstallScript": true,
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
@@ -17974,6 +17968,7 @@
"ajv-formats": "^3.0.0",
"async-mutex": "^0.5.0",
"chardet": "^2.1.0",
"chokidar": "^4.0.3",
"diff": "^7.0.0",
"dotenv": "^17.1.0",
"fast-levenshtein": "^2.0.6",
@@ -18593,7 +18588,7 @@
},
"packages/sdk-typescript": {
"name": "@qwen-code/sdk",
"version": "0.1.0",
"version": "0.1.3",
"license": "Apache-2.0",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",
@@ -21413,7 +21408,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.7.0",
"version": "0.7.1",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -21425,7 +21420,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.7.0",
"version": "0.7.1",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.7.1",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.1"
},
"scripts": {
"start": "cross-env node scripts/start.js",
@@ -125,7 +125,7 @@
"lint-staged": {
"*.{js,jsx,ts,tsx}": [
"prettier --write",
"eslint --fix --max-warnings 0"
"eslint --fix --max-warnings 0 --no-warn-ignored"
],
"*.{json,md}": [
"prettier --write"

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.7.0",
"version": "0.7.1",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -33,7 +33,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.7.1"
},
"dependencies": {
"@google/genai": "1.30.0",

View File

@@ -170,7 +170,17 @@ function normalizeOutputFormat(
}
export async function parseArguments(settings: Settings): Promise<CliArgs> {
const rawArgv = hideBin(process.argv);
let rawArgv = hideBin(process.argv);
// hack: if the first argument is the CLI entry point, remove it
if (
rawArgv.length > 0 &&
(rawArgv[0].endsWith('/dist/qwen-cli/cli.js') ||
rawArgv[0].endsWith('/dist/cli.js'))
) {
rawArgv = rawArgv.slice(1);
}
const yargsInstance = yargs(rawArgv)
.locale('en')
.scriptName('qwen')
@@ -864,11 +874,10 @@ export async function loadCliConfig(
}
};
if (
!interactive &&
!argv.experimentalAcp &&
inputFormat !== InputFormat.STREAM_JSON
) {
// ACP mode check: must include both --acp (current) and --experimental-acp (deprecated).
// Without this check, edit, write_file, run_shell_command would be excluded in ACP mode.
const isAcpMode = argv.acp || argv.experimentalAcp;
if (!interactive && !isAcpMode && inputFormat !== InputFormat.STREAM_JSON) {
switch (approvalMode) {
case ApprovalMode.PLAN:
case ApprovalMode.DEFAULT:

View File

@@ -55,6 +55,7 @@ import { disableExtension } from './extension.js';
// These imports will get the versions from the vi.mock('./settings.js', ...) factory.
import {
getSettingsWarnings,
loadSettings,
USER_SETTINGS_PATH, // This IS the mocked path.
getSystemSettingsPath,
@@ -418,6 +419,86 @@ describe('Settings Loading and Merging', () => {
});
});
it('should warn about ignored legacy keys in a v2 settings file', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
usageStatisticsEnabled: false,
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining(
"Legacy setting 'usageStatisticsEnabled' will be ignored",
),
]),
);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining("'privacy.usageStatisticsEnabled'"),
]),
);
});
it('should warn about unknown top-level keys in a v2 settings file', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
someUnknownKey: 'value',
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual(
expect.arrayContaining([
expect.stringContaining(
"Unknown setting 'someUnknownKey' will be ignored",
),
]),
);
});
it('should not warn for valid v2 container keys', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
[SETTINGS_VERSION_KEY]: SETTINGS_VERSION,
model: { name: 'qwen-coder' },
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(getSettingsWarnings(settings)).toEqual([]);
});
it('should rewrite allowedTools to tools.allowed during migration', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,

View File

@@ -344,6 +344,97 @@ const KNOWN_V2_CONTAINERS = new Set(
Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]),
);
function getSettingsFileKeyWarnings(
settings: Record<string, unknown>,
settingsFilePath: string,
): string[] {
const version = settings[SETTINGS_VERSION_KEY];
if (typeof version !== 'number' || version < SETTINGS_VERSION) {
return [];
}
const warnings: string[] = [];
const ignoredLegacyKeys = new Set<string>();
// Ignored legacy keys (V1 top-level keys that moved to a nested V2 path).
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
if (oldKey === newPath) {
continue;
}
if (!(oldKey in settings)) {
continue;
}
const oldValue = settings[oldKey];
// If this key is a V2 container (like 'model') and it's already an object,
// it's likely already in V2 format. Don't warn.
if (
KNOWN_V2_CONTAINERS.has(oldKey) &&
typeof oldValue === 'object' &&
oldValue !== null &&
!Array.isArray(oldValue)
) {
continue;
}
ignoredLegacyKeys.add(oldKey);
warnings.push(
`⚠️ Legacy setting '${oldKey}' will be ignored in ${settingsFilePath}. Please use '${newPath}' instead.`,
);
}
// Unknown top-level keys.
const schemaKeys = new Set(Object.keys(getSettingsSchema()));
for (const key of Object.keys(settings)) {
if (key === SETTINGS_VERSION_KEY) {
continue;
}
if (ignoredLegacyKeys.has(key)) {
continue;
}
if (schemaKeys.has(key)) {
continue;
}
warnings.push(
`⚠️ Unknown setting '${key}' will be ignored in ${settingsFilePath}.`,
);
}
return warnings;
}
/**
* Collects warnings for ignored legacy and unknown settings keys.
*
* For `$version: 2` settings files, we do not apply implicit migrations.
* Instead, we surface actionable, de-duplicated warnings in the terminal UI.
*/
export function getSettingsWarnings(loadedSettings: LoadedSettings): string[] {
const warningSet = new Set<string>();
for (const scope of [SettingScope.User, SettingScope.Workspace]) {
const settingsFile = loadedSettings.forScope(scope);
if (settingsFile.rawJson === undefined) {
continue; // File not present / not loaded.
}
const settingsObject = settingsFile.originalSettings as unknown as Record<
string,
unknown
>;
for (const warning of getSettingsFileKeyWarnings(
settingsObject,
settingsFile.path,
)) {
warningSet.add(warning);
}
}
return [...warningSet];
}
export function migrateSettingsToV1(
v2Settings: Record<string, unknown>,
): Record<string, unknown> {

View File

@@ -17,7 +17,11 @@ import * as cliConfig from './config/config.js';
import { loadCliConfig, parseArguments } from './config/config.js';
import { ExtensionStorage, loadExtensions } from './config/extension.js';
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
import { loadSettings, migrateDeprecatedSettings } from './config/settings.js';
import {
getSettingsWarnings,
loadSettings,
migrateDeprecatedSettings,
} from './config/settings.js';
import {
initializeApp,
type InitializationResult,
@@ -342,6 +346,7 @@ export async function main() {
extensionEnablementManager,
argv,
);
registerCleanup(() => config.shutdown());
if (config.getListExtensions()) {
console.log('Installed extensions:');
@@ -400,12 +405,15 @@ export async function main() {
let input = config.getQuestion();
const startupWarnings = [
...(await getStartupWarnings()),
...(await getUserStartupWarnings({
workspaceRoot: process.cwd(),
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
})),
...new Set([
...(await getStartupWarnings()),
...(await getUserStartupWarnings({
workspaceRoot: process.cwd(),
useRipgrep: settings.merged.tools?.useRipgrep ?? true,
useBuiltinRipgrep: settings.merged.tools?.useBuiltinRipgrep ?? true,
})),
...getSettingsWarnings(settings),
]),
];
// Render UI, passing necessary config values. Check that there is no command line question.

View File

@@ -31,6 +31,7 @@ import { quitCommand } from '../ui/commands/quitCommand.js';
import { restoreCommand } from '../ui/commands/restoreCommand.js';
import { resumeCommand } from '../ui/commands/resumeCommand.js';
import { settingsCommand } from '../ui/commands/settingsCommand.js';
import { skillsCommand } from '../ui/commands/skillsCommand.js';
import { statsCommand } from '../ui/commands/statsCommand.js';
import { summaryCommand } from '../ui/commands/summaryCommand.js';
import { terminalSetupCommand } from '../ui/commands/terminalSetupCommand.js';
@@ -38,6 +39,7 @@ import { themeCommand } from '../ui/commands/themeCommand.js';
import { toolsCommand } from '../ui/commands/toolsCommand.js';
import { vimCommand } from '../ui/commands/vimCommand.js';
import { setupGithubCommand } from '../ui/commands/setupGithubCommand.js';
import { insightCommand } from '../ui/commands/insightCommand.js';
/**
* Loads the core, hard-coded slash commands that are an integral part
@@ -78,6 +80,7 @@ export class BuiltinCommandLoader implements ICommandLoader {
quitCommand,
restoreCommand(this.config),
resumeCommand,
...(this.config?.getExperimentalSkills?.() ? [skillsCommand] : []),
statsCommand,
summaryCommand,
themeCommand,
@@ -86,6 +89,7 @@ export class BuiltinCommandLoader implements ICommandLoader {
vimCommand,
setupGithubCommand,
terminalSetupCommand,
insightCommand,
];
return allDefinitions.filter((cmd): cmd is SlashCommand => cmd !== null);

View File

@@ -0,0 +1,120 @@
# Qwen Code Insights Page
A React-based visualization dashboard for displaying coding activity insights and statistics.
## Development
This application consists of two parts:
1. **Backend (Express Server)**: Serves API endpoints and processes chat history data
2. **Frontend (Vite + React)**: Development server with HMR
### Running in Development Mode
You need to run both the backend and frontend servers:
**Terminal 1 - Backend Server (Port 3001):**
```bash
pnpm dev:server
```
**Terminal 2 - Frontend Dev Server (Port 3000):**
```bash
pnpm dev
```
Then open <http://localhost:3000> in your browser.
The Vite dev server will proxy `/api` requests to the backend server at port 3001.
### Building for Production
```bash
pnpm build
```
This compiles TypeScript and builds the React application. The output will be in the `dist/` directory.
In production, the Express server serves both the static files and API endpoints from a single port.
## Architecture
- **Frontend**: React + TypeScript + Vite + Chart.js
- **Backend**: Express + Node.js
- **Data Source**: JSONL chat history files from `~/.qwen/projects/*/chats/`
## Original Vite Template Info
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
## React Compiler
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
```js
export default defineConfig([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Remove tseslint.configs.recommended and replace with this
tseslint.configs.recommendedTypeChecked,
// Alternatively, use this for stricter rules
tseslint.configs.strictTypeChecked,
// Optionally, add this for stylistic rules
tseslint.configs.stylisticTypeChecked,
// Other configs...
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
]);
```
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
```js
// eslint.config.js
import reactX from 'eslint-plugin-react-x';
import reactDom from 'eslint-plugin-react-dom';
export default defineConfig([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Enable lint rules for React
reactX.configs['recommended-typescript'],
// Enable lint rules for React DOM
reactDom.configs.recommended,
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
]);
```

View File

@@ -0,0 +1,13 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/qwen.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Qwen Code Insight</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

View File

@@ -0,0 +1,42 @@
{
"name": "insight-page",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"dev:server": "BASE_DIR=$HOME/.qwen/projects PORT=3001 tsx ../insightServer.ts",
"build": "tsc -b && vite build",
"lint": "eslint .",
"preview": "vite preview"
},
"dependencies": {
"@uiw/react-heat-map": "^2.3.3",
"chart.js": "^4.5.1",
"html2canvas": "^1.4.1",
"react": "^19.2.0",
"react-dom": "^19.2.0"
},
"devDependencies": {
"@eslint/js": "^9.39.1",
"@types/node": "^24.10.1",
"@types/react": "^19.2.5",
"@types/react-dom": "^19.2.3",
"@vitejs/plugin-react": "^5.1.1",
"autoprefixer": "^10.4.20",
"eslint": "^9.39.1",
"eslint-plugin-react-hooks": "^7.0.1",
"eslint-plugin-react-refresh": "^0.4.24",
"globals": "^16.5.0",
"postcss": "^8.4.49",
"tailwindcss": "^3.4.17",
"typescript": "~5.9.3",
"typescript-eslint": "^8.46.4",
"vite": "npm:rolldown-vite@7.2.5"
},
"pnpm": {
"overrides": {
"vite": "npm:rolldown-vite@7.2.5"
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -0,0 +1,395 @@
import { useEffect, useRef, useState, type CSSProperties } from 'react';
import {
Chart,
LineController,
LineElement,
BarController,
BarElement,
CategoryScale,
LinearScale,
PointElement,
Legend,
Title,
Tooltip,
} from 'chart.js';
import type { ChartConfiguration } from 'chart.js';
import HeatMap from '@uiw/react-heat-map';
import html2canvas from 'html2canvas';
// Register Chart.js components
Chart.register(
LineController,
LineElement,
BarController,
BarElement,
CategoryScale,
LinearScale,
PointElement,
Legend,
Title,
Tooltip,
);
interface UsageMetadata {
input: number;
output: number;
total: number;
}
interface InsightData {
heatmap: { [date: string]: number };
tokenUsage: { [date: string]: UsageMetadata };
currentStreak: number;
longestStreak: number;
longestWorkDate: string | null;
longestWorkDuration: number;
activeHours: { [hour: number]: number };
latestActiveTime: string | null;
achievements: Array<{
id: string;
name: string;
description: string;
}>;
}
function App() {
const [insights, setInsights] = useState<InsightData | null>(null);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const hourChartRef = useRef<HTMLCanvasElement>(null);
const hourChartInstance = useRef<Chart | null>(null);
const containerRef = useRef<HTMLDivElement>(null);
// Load insights data
useEffect(() => {
const loadInsights = async () => {
try {
setLoading(true);
const response = await fetch('/api/insights');
if (!response.ok) {
throw new Error('Failed to fetch insights');
}
const data: InsightData = await response.json();
setInsights(data);
setError(null);
} catch (err) {
setError((err as Error).message);
setInsights(null);
} finally {
setLoading(false);
}
};
loadInsights();
}, []);
// Create hour chart when insights change
useEffect(() => {
if (!insights || !hourChartRef.current) return;
// Destroy existing chart if it exists
if (hourChartInstance.current) {
hourChartInstance.current.destroy();
}
const labels = Array.from({ length: 24 }, (_, i) => `${i}:00`);
const data = labels.map((_, i) => insights.activeHours[i] || 0);
const ctx = hourChartRef.current.getContext('2d');
if (!ctx) return;
hourChartInstance.current = new Chart(ctx, {
type: 'bar',
data: {
labels,
datasets: [
{
label: 'Activity per Hour',
data,
backgroundColor: 'rgba(52, 152, 219, 0.7)',
borderColor: 'rgba(52, 152, 219, 1)',
borderWidth: 1,
},
],
},
options: {
indexAxis: 'y',
responsive: true,
maintainAspectRatio: false,
scales: {
x: {
beginAtZero: true,
},
},
plugins: {
legend: {
display: false,
},
},
} as ChartConfiguration['options'],
});
}, [insights]);
const handleExport = async () => {
if (!containerRef.current) return;
try {
const button = document.getElementById('export-btn') as HTMLButtonElement;
button.style.display = 'none';
const canvas = await html2canvas(containerRef.current, {
scale: 2,
useCORS: true,
logging: false,
});
const imgData = canvas.toDataURL('image/png');
const link = document.createElement('a');
link.href = imgData;
link.download = `qwen-insights-${new Date().toISOString().slice(0, 10)}.png`;
link.click();
button.style.display = 'block';
} catch (err) {
console.error('Error capturing image:', err);
alert('Failed to export image. Please try again.');
}
};
if (loading) {
return (
<div className="flex min-h-screen items-center justify-center bg-gradient-to-br from-slate-50 via-white to-slate-100">
<div className="glass-card px-8 py-6 text-center">
<h2 className="text-xl font-semibold text-slate-900">
Loading insights...
</h2>
<p className="mt-2 text-sm text-slate-600">
Fetching your coding patterns
</p>
</div>
</div>
);
}
if (error || !insights) {
return (
<div className="flex min-h-screen items-center justify-center bg-gradient-to-br from-slate-50 via-white to-slate-100">
<div className="glass-card px-8 py-6 text-center">
<h2 className="text-xl font-semibold text-rose-700">
Error loading insights
</h2>
<p className="mt-2 text-sm text-slate-600">
{error || 'Please try again later.'}
</p>
</div>
</div>
);
}
// Prepare heatmap data for react-heat-map
const heatmapData = Object.entries(insights.heatmap).map(([date, count]) => ({
date,
count,
}));
const cardClass = 'glass-card p-6';
const sectionTitleClass =
'text-lg font-semibold tracking-tight text-slate-900';
const captionClass = 'text-sm font-medium text-slate-500';
return (
<div className="min-h-screen" ref={containerRef}>
<div className="mx-auto max-w-6xl px-6 py-10 md:py-12">
<header className="mb-8 space-y-3 text-center">
<p className="text-xs font-semibold uppercase tracking-[0.2em] text-slate-500">
Insights
</p>
<h1 className="text-3xl font-semibold text-slate-900 md:text-4xl">
Qwen Code Insights
</h1>
<p className="text-sm text-slate-600">
Your personalized coding journey and patterns
</p>
</header>
<div className="grid gap-4 md:grid-cols-3 md:gap-6">
<div className={`${cardClass} h-full`}>
<div className="flex items-start justify-between">
<div>
<p className={captionClass}>Current Streak</p>
<p className="mt-1 text-4xl font-bold text-slate-900">
{insights.currentStreak}
<span className="ml-2 text-base font-semibold text-slate-500">
days
</span>
</p>
</div>
<span className="rounded-full bg-emerald-50 px-4 py-2 text-sm font-semibold text-emerald-700">
Longest {insights.longestStreak}d
</span>
</div>
</div>
<div className={`${cardClass} h-full`}>
<div className="flex items-center justify-between">
<h3 className={sectionTitleClass}>Active Hours</h3>
<span className="rounded-full bg-slate-100 px-3 py-1 text-xs font-semibold text-slate-600">
24h
</span>
</div>
<div className="mt-4 h-56 w-full">
<canvas ref={hourChartRef}></canvas>
</div>
</div>
<div className={`${cardClass} h-full space-y-3`}>
<h3 className={sectionTitleClass}>Work Session</h3>
<div className="grid grid-cols-2 gap-3 text-sm text-slate-700">
<div className="rounded-xl bg-slate-50 px-3 py-2">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Longest
</p>
<p className="mt-1 text-lg font-semibold text-slate-900">
{insights.longestWorkDuration}m
</p>
</div>
<div className="rounded-xl bg-slate-50 px-3 py-2">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Date
</p>
<p className="mt-1 text-lg font-semibold text-slate-900">
{insights.longestWorkDate || '-'}
</p>
</div>
<div className="col-span-2 rounded-xl bg-slate-50 px-3 py-2">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Last Active
</p>
<p className="mt-1 text-lg font-semibold text-slate-900">
{insights.latestActiveTime || '-'}
</p>
</div>
</div>
</div>
</div>
<div className={`${cardClass} mt-4 space-y-4 md:mt-6`}>
<div className="flex items-center justify-between">
<h3 className={sectionTitleClass}>Activity Heatmap</h3>
<span className="text-xs font-semibold text-slate-500">
Past year
</span>
</div>
<div className="overflow-x-auto">
<div className="min-w-[720px] rounded-xl border border-slate-100 bg-white/70 p-4 shadow-inner shadow-slate-100">
<HeatMap
value={heatmapData}
width={1000}
style={{ color: '#0f172a' } satisfies CSSProperties}
startDate={
new Date(new Date().setFullYear(new Date().getFullYear() - 1))
}
endDate={new Date()}
rectSize={14}
legendCellSize={12}
rectProps={{
rx: 2,
}}
panelColors={{
0: '#e2e8f0',
2: '#a5d8ff',
4: '#74c0fc',
10: '#339af0',
20: '#1c7ed6',
}}
/>
</div>
</div>
</div>
<div className={`${cardClass} mt-4 md:mt-6`}>
<div className="space-y-3">
<h3 className={sectionTitleClass}>Token Usage</h3>
<div className="grid grid-cols-3 gap-3">
<div className="rounded-xl bg-slate-50 px-4 py-3">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Input
</p>
<p className="mt-1 text-2xl font-bold text-slate-900">
{Object.values(insights.tokenUsage)
.reduce((acc, usage) => acc + usage.input, 0)
.toLocaleString()}
</p>
</div>
<div className="rounded-xl bg-slate-50 px-4 py-3">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Output
</p>
<p className="mt-1 text-2xl font-bold text-slate-900">
{Object.values(insights.tokenUsage)
.reduce((acc, usage) => acc + usage.output, 0)
.toLocaleString()}
</p>
</div>
<div className="rounded-xl bg-slate-50 px-4 py-3">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Total
</p>
<p className="mt-1 text-2xl font-bold text-slate-900">
{Object.values(insights.tokenUsage)
.reduce((acc, usage) => acc + usage.total, 0)
.toLocaleString()}
</p>
</div>
</div>
</div>
</div>
<div className={`${cardClass} mt-4 space-y-4 md:mt-6`}>
<div className="flex items-center justify-between">
<h3 className={sectionTitleClass}>Achievements</h3>
<span className="text-xs font-semibold text-slate-500">
{insights.achievements.length} total
</span>
</div>
{insights.achievements.length === 0 ? (
<p className="text-sm text-slate-600">
No achievements yet. Keep coding!
</p>
) : (
<div className="divide-y divide-slate-200">
{insights.achievements.map((achievement) => (
<div
key={achievement.id}
className="flex flex-col gap-1 py-3 text-left"
>
<span className="text-base font-semibold text-slate-900">
{achievement.name}
</span>
<p className="text-sm text-slate-600">
{achievement.description}
</p>
</div>
))}
</div>
)}
</div>
<div className="mt-6 flex justify-center">
<button
id="export-btn"
className="group inline-flex items-center gap-2 rounded-full bg-slate-900 px-5 py-3 text-sm font-semibold text-white shadow-soft transition focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-slate-400 hover:-translate-y-[1px] hover:shadow-lg active:translate-y-[1px]"
onClick={handleExport}
>
Export as Image
<span className="text-slate-200 transition group-hover:translate-x-0.5">
</span>
</button>
</div>
</div>
</div>
);
}
export default App;

View File

@@ -0,0 +1,15 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer base {
body {
@apply min-h-screen bg-gradient-to-br from-slate-50 via-white to-slate-100 text-slate-900 antialiased;
}
}
@layer components {
.glass-card {
@apply rounded-2xl border border-slate-200 bg-white/80 shadow-soft backdrop-blur;
}
}

View File

@@ -0,0 +1,10 @@
import { StrictMode } from 'react';
import { createRoot } from 'react-dom/client';
import './index.css';
import App from './App.tsx';
createRoot(document.getElementById('root')!).render(
<StrictMode>
<App />
</StrictMode>,
);

View File

@@ -0,0 +1,18 @@
import type { Config } from 'tailwindcss';
const config: Config = {
content: ['./index.html', './src/**/*.{ts,tsx}'],
theme: {
extend: {
boxShadow: {
soft: '0 10px 40px rgba(15, 23, 42, 0.08)',
},
borderRadius: {
xl: '1.25rem',
},
},
},
plugins: [],
};
export default config;

View File

@@ -0,0 +1,28 @@
{
"compilerOptions": {
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
"target": "ES2022",
"useDefineForClassFields": true,
"lib": ["ES2022", "DOM", "DOM.Iterable"],
"module": "ESNext",
"types": ["vite/client"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"noEmit": true,
"jsx": "react-jsx",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true
},
"include": ["src"]
}

View File

@@ -0,0 +1,7 @@
{
"files": [],
"references": [
{ "path": "./tsconfig.app.json" },
{ "path": "./tsconfig.node.json" }
]
}

View File

@@ -0,0 +1,26 @@
{
"compilerOptions": {
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
"target": "ES2023",
"lib": ["ES2023"],
"module": "ESNext",
"types": ["node"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"noEmit": true,
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true
},
"include": ["vite.config.ts"]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,14 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/qwen.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>insight-page</title>
<script type="module" crossorigin src="/assets/index-D7obW1Jn.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-CV6J1oXz.css">
</head>
<body>
<div id="root"></div>
</body>
</html>

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -0,0 +1,19 @@
import { defineConfig } from 'vite';
import react from '@vitejs/plugin-react';
// https://vite.dev/config/
export default defineConfig({
plugins: [react()],
build: {
outDir: 'views',
},
server: {
port: 3000,
proxy: {
'/api': {
target: 'http://localhost:3001',
changeOrigin: true,
},
},
},
});

View File

@@ -0,0 +1,404 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import express from 'express';
import fs from 'fs/promises';
import path, { dirname } from 'path';
import { fileURLToPath } from 'url';
import type { ChatRecord } from '@qwen-code/qwen-code-core';
import { read } from '@qwen-code/qwen-code-core/src/utils/jsonl-utils.js';
interface StreakData {
currentStreak: number;
longestStreak: number;
dates: string[];
}
// For heat map data
interface HeatMapData {
[date: string]: number;
}
// For token usage data
interface TokenUsageData {
[date: string]: {
input: number;
output: number;
total: number;
};
}
// For achievement data
interface AchievementData {
id: string;
name: string;
description: string;
}
// For the final insight data
interface InsightData {
heatmap: HeatMapData;
tokenUsage: TokenUsageData;
currentStreak: number;
longestStreak: number;
longestWorkDate: string | null;
longestWorkDuration: number; // in minutes
activeHours: { [hour: number]: number };
latestActiveTime: string | null;
achievements: AchievementData[];
}
function debugLog(message: string) {
const timestamp = new Date().toISOString();
const logMessage = `[${timestamp}] ${message}\n`;
console.log(logMessage);
}
debugLog('Insight server starting...');
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const app = express();
const PORT = process.env['PORT'];
const BASE_DIR = process.env['BASE_DIR'];
if (!BASE_DIR) {
debugLog('BASE_DIR environment variable is required');
process.exit(1);
}
// Serve static assets from the views/assets directory
app.use(
'/assets',
express.static(path.join(__dirname, 'insight-page', 'views', 'assets')),
);
app.get('/', (_req, res) => {
res.sendFile(path.join(__dirname, 'insight-page', 'views', 'index.html'));
});
// API endpoint to get insight data
app.get('/api/insights', async (_req, res) => {
try {
debugLog('Received request for insights data');
const insights = await generateInsights(BASE_DIR);
res.json(insights);
} catch (error) {
debugLog(`Error generating insights: ${error}`);
res.status(500).json({ error: 'Failed to generate insights' });
}
});
// Process chat files from all projects in the base directory and generate insights
async function generateInsights(baseDir: string): Promise<InsightData> {
// Initialize data structures
const heatmap: HeatMapData = {};
const tokenUsage: TokenUsageData = {};
const activeHours: { [hour: number]: number } = {};
const sessionStartTimes: { [sessionId: string]: Date } = {};
const sessionEndTimes: { [sessionId: string]: Date } = {};
try {
// Get all project directories in the base directory
const projectDirs = await fs.readdir(baseDir);
// Process each project directory
for (const projectDir of projectDirs) {
const projectPath = path.join(baseDir, projectDir);
const stats = await fs.stat(projectPath);
// Only process if it's a directory
if (stats.isDirectory()) {
const chatsDir = path.join(projectPath, 'chats');
let chatFiles: string[] = [];
try {
// Get all chat files in the chats directory
const files = await fs.readdir(chatsDir);
chatFiles = files.filter((file) => file.endsWith('.jsonl'));
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
debugLog(
`Error reading chats directory for project ${projectDir}: ${error}`,
);
}
// Continue to next project if chats directory doesn't exist
continue;
}
// Process each chat file in this project
for (const file of chatFiles) {
const filePath = path.join(chatsDir, file);
const records = await read<ChatRecord>(filePath);
// Process each record
for (const record of records) {
const timestamp = new Date(record.timestamp);
const dateKey = formatDate(timestamp);
const hour = timestamp.getHours();
// Update heatmap (count of interactions per day)
heatmap[dateKey] = (heatmap[dateKey] || 0) + 1;
// Update active hours
activeHours[hour] = (activeHours[hour] || 0) + 1;
// Update token usage
if (record.usageMetadata) {
const usage = tokenUsage[dateKey] || {
input: 0,
output: 0,
total: 0,
};
usage.input += record.usageMetadata.promptTokenCount || 0;
usage.output += record.usageMetadata.candidatesTokenCount || 0;
usage.total += record.usageMetadata.totalTokenCount || 0;
tokenUsage[dateKey] = usage;
}
// Track session times
if (!sessionStartTimes[record.sessionId]) {
sessionStartTimes[record.sessionId] = timestamp;
}
sessionEndTimes[record.sessionId] = timestamp;
}
}
}
}
} catch (error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
// Base directory doesn't exist, return empty insights
debugLog(`Base directory does not exist: ${baseDir}`);
} else {
debugLog(`Error reading base directory: ${error}`);
}
}
// Calculate streak data
const streakData = calculateStreaks(Object.keys(heatmap));
// Calculate longest work session
let longestWorkDuration = 0;
let longestWorkDate: string | null = null;
for (const sessionId in sessionStartTimes) {
const start = sessionStartTimes[sessionId];
const end = sessionEndTimes[sessionId];
const durationMinutes = Math.round(
(end.getTime() - start.getTime()) / (1000 * 60),
);
if (durationMinutes > longestWorkDuration) {
longestWorkDuration = durationMinutes;
longestWorkDate = formatDate(start);
}
}
// Calculate latest active time
let latestActiveTime: string | null = null;
let latestTimestamp = new Date(0);
for (const dateStr in heatmap) {
const date = new Date(dateStr);
if (date > latestTimestamp) {
latestTimestamp = date;
latestActiveTime = date.toLocaleTimeString([], {
hour: '2-digit',
minute: '2-digit',
});
}
}
// Calculate achievements
const achievements = calculateAchievements(activeHours, heatmap, tokenUsage);
return {
heatmap,
tokenUsage,
currentStreak: streakData.currentStreak,
longestStreak: streakData.longestStreak,
longestWorkDate,
longestWorkDuration,
activeHours,
latestActiveTime,
achievements,
};
}
// Helper function to format date as YYYY-MM-DD
function formatDate(date: Date): string {
return date.toISOString().split('T')[0];
}
// Calculate streaks from activity dates
function calculateStreaks(dates: string[]): StreakData {
if (dates.length === 0) {
return { currentStreak: 0, longestStreak: 0, dates: [] };
}
// Convert string dates to Date objects and sort them
const dateObjects = dates.map((dateStr) => new Date(dateStr));
dateObjects.sort((a, b) => a.getTime() - b.getTime());
let currentStreak = 1;
let maxStreak = 1;
let currentDate = new Date(dateObjects[0]);
currentDate.setHours(0, 0, 0, 0); // Normalize to start of day
for (let i = 1; i < dateObjects.length; i++) {
const nextDate = new Date(dateObjects[i]);
nextDate.setHours(0, 0, 0, 0); // Normalize to start of day
// Calculate difference in days
const diffDays = Math.floor(
(nextDate.getTime() - currentDate.getTime()) / (1000 * 60 * 60 * 24),
);
if (diffDays === 1) {
// Consecutive day
currentStreak++;
maxStreak = Math.max(maxStreak, currentStreak);
} else if (diffDays > 1) {
// Gap in streak
currentStreak = 1;
}
// If diffDays === 0, same day, so streak continues
currentDate = nextDate;
}
// Check if the streak is still ongoing (if last activity was yesterday or today)
const today = new Date();
today.setHours(0, 0, 0, 0);
const yesterday = new Date(today);
yesterday.setDate(yesterday.getDate() - 1);
if (
currentDate.getTime() === today.getTime() ||
currentDate.getTime() === yesterday.getTime()
) {
// The streak might still be active, so we don't reset it
}
return {
currentStreak,
longestStreak: maxStreak,
dates,
};
}
// Calculate achievements based on user behavior
function calculateAchievements(
activeHours: { [hour: number]: number },
heatmap: HeatMapData,
_tokenUsage: TokenUsageData,
): AchievementData[] {
const achievements: AchievementData[] = [];
// Total activities
const totalActivities = Object.values(heatmap).reduce(
(sum, count) => sum + count,
0,
);
// Total tokens used - commented out since it's not currently used
// const totalTokens = Object.values(tokenUsage).reduce((sum, usage) => sum + usage.total, 0);
// Total sessions
const totalSessions = Object.keys(heatmap).length;
// Calculate percentage of activity per hour
const totalHourlyActivity = Object.values(activeHours).reduce(
(sum, count) => sum + count,
0,
);
if (totalHourlyActivity > 0) {
// Midnight debugger: 20% of sessions happen between 12AM-5AM
const midnightActivity =
(activeHours[0] || 0) +
(activeHours[1] || 0) +
(activeHours[2] || 0) +
(activeHours[3] || 0) +
(activeHours[4] || 0) +
(activeHours[5] || 0);
if (midnightActivity / totalHourlyActivity >= 0.2) {
achievements.push({
id: 'midnight-debugger',
name: 'Midnight Debugger',
description: '20% of your sessions happen between 12AM-5AM',
});
}
// Morning coder: 20% of sessions happen between 6AM-9AM
const morningActivity =
(activeHours[6] || 0) +
(activeHours[7] || 0) +
(activeHours[8] || 0) +
(activeHours[9] || 0);
if (morningActivity / totalHourlyActivity >= 0.2) {
achievements.push({
id: 'morning-coder',
name: 'Morning Coder',
description: '20% of your sessions happen between 6AM-9AM',
});
}
}
// Patient king: average conversation length >= 10 exchanges
if (totalSessions > 0) {
const avgExchanges = totalActivities / totalSessions;
if (avgExchanges >= 10) {
achievements.push({
id: 'patient-king',
name: 'Patient King',
description: 'Your average conversation length is 10+ exchanges',
});
}
}
// Quick finisher: 70% of sessions have <= 2 exchanges
let quickSessions = 0;
// Since we don't have per-session exchange counts easily available,
// we'll estimate based on the distribution of activities
if (totalSessions > 0) {
// This is a simplified calculation - in a real implementation,
// we'd need to count exchanges per session
const avgPerSession = totalActivities / totalSessions;
if (avgPerSession <= 2) {
// Estimate based on low average
quickSessions = Math.floor(totalSessions * 0.7);
}
if (quickSessions / totalSessions >= 0.7) {
achievements.push({
id: 'quick-finisher',
name: 'Quick Finisher',
description: '70% of your sessions end in 2 exchanges or fewer',
});
}
}
// Explorer: for users with insufficient data or default
if (achievements.length === 0) {
achievements.push({
id: 'explorer',
name: 'Explorer',
description: 'Getting started with Qwen Code',
});
}
return achievements;
}
// Start the server
app.listen(PORT, () => {
debugLog(`Server running at http://localhost:${PORT}/`);
debugLog(`Analyzing projects in: ${BASE_DIR}`);
debugLog('Server is running. Press Ctrl+C to stop.');
});

View File

@@ -83,12 +83,26 @@ export const useAuthCommand = (
async (authType: AuthType, credentials?: OpenAICredentials) => {
try {
const authTypeScope = getPersistScopeForModelSelection(settings);
// Persist authType
settings.setValue(
authTypeScope,
'security.auth.selectedType',
authType,
);
// Persist model from ContentGenerator config (handles fallback cases)
// This ensures that when syncAfterAuthRefresh falls back to default model,
// it gets persisted to settings.json
const contentGeneratorConfig = config.getContentGeneratorConfig();
if (contentGeneratorConfig?.model) {
settings.setValue(
authTypeScope,
'model.name',
contentGeneratorConfig.model,
);
}
// Only update credentials if not switching to QWEN_OAUTH,
// so that OpenAI credentials are preserved when switching to QWEN_OAUTH.
if (authType !== AuthType.QWEN_OAUTH && credentials) {
@@ -106,9 +120,6 @@ export const useAuthCommand = (
credentials.baseUrl,
);
}
if (credentials?.model != null) {
settings.setValue(authTypeScope, 'model.name', credentials.model);
}
}
} catch (error) {
handleAuthFailure(error);

View File

@@ -0,0 +1,190 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandContext, SlashCommand } from './types.js';
import { CommandKind } from './types.js';
import { MessageType } from '../types.js';
import { t } from '../../i18n/index.js';
import { spawn } from 'child_process';
import { join } from 'path';
import os from 'os';
import { registerCleanup } from '../../utils/cleanup.js';
import net from 'net';
// Track the insight server subprocess so we can terminate it on quit
let insightServerProcess: import('child_process').ChildProcess | null = null;
// Find an available port starting from a default port
async function findAvailablePort(startingPort: number = 3000): Promise<number> {
return new Promise((resolve, reject) => {
let port = startingPort;
const checkPort = () => {
const server = net.createServer();
server.listen(port, () => {
server.once('close', () => {
resolve(port);
});
server.close();
});
server.on('error', (err: NodeJS.ErrnoException) => {
if (err.code === 'EADDRINUSE') {
port++; // Try next port
checkPort();
} else {
reject(err);
}
});
};
checkPort();
});
}
export const insightCommand: SlashCommand = {
name: 'insight',
get description() {
return t(
'generate personalized programming insights from your chat history',
);
},
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext) => {
try {
context.ui.setDebugMessage(t('Starting insight server...'));
// If there's an existing insight server process, terminate it first
if (insightServerProcess && !insightServerProcess.killed) {
insightServerProcess.kill();
insightServerProcess = null;
}
// Find an available port
const availablePort = await findAvailablePort(3000);
const projectsDir = join(os.homedir(), '.qwen', 'projects');
// Path to the insight server script
const insightScriptPath = join(
process.cwd(),
'packages',
'cli',
'src',
'services',
'insightServer.ts',
);
// Spawn the insight server process
const serverProcess = spawn('npx', ['tsx', insightScriptPath], {
stdio: 'pipe',
env: {
...process.env,
NODE_ENV: 'production',
BASE_DIR: projectsDir,
PORT: String(availablePort),
},
});
// Store the server process for cleanup
insightServerProcess = serverProcess;
// Register cleanup function to terminate the server process on quit
registerCleanup(() => {
if (insightServerProcess && !insightServerProcess.killed) {
insightServerProcess.kill();
insightServerProcess = null;
}
});
serverProcess.stderr.on('data', (data) => {
// Forward error output to parent process stderr
process.stderr.write(`Insight server error: ${data}`);
context.ui.addItem(
{
type: MessageType.ERROR,
text: `Insight server error: ${data.toString()}`,
},
Date.now(),
);
});
serverProcess.on('close', (code) => {
console.log(`Insight server process exited with code ${code}`);
context.ui.setDebugMessage(t('Insight server stopped.'));
// Reset the reference when the process closes
if (insightServerProcess === serverProcess) {
insightServerProcess = null;
}
});
const url = `http://localhost:${availablePort}`;
// Open browser automatically
const openBrowser = async () => {
try {
const { exec } = await import('child_process');
const { promisify } = await import('util');
const execAsync = promisify(exec);
switch (process.platform) {
case 'darwin': // macOS
await execAsync(`open ${url}`);
break;
case 'win32': // Windows
await execAsync(`start ${url}`);
break;
default: // Linux and others
await execAsync(`xdg-open ${url}`);
}
context.ui.addItem(
{
type: MessageType.INFO,
text: `Insight server started. Visit: ${url}`,
},
Date.now(),
);
} catch (err) {
console.error('Failed to open browser automatically:', err);
context.ui.addItem(
{
type: MessageType.INFO,
text: `Insight server started. Please visit: ${url}`,
},
Date.now(),
);
}
};
// Wait for the server to start (give it some time to bind to the port)
setTimeout(openBrowser, 1000);
// Inform the user that the server is running
context.ui.addItem(
{
type: MessageType.INFO,
text: t(
'Insight server started. Check your browser for the visualization.',
),
},
Date.now(),
);
} catch (error) {
context.ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to start insight server: {{error}}', {
error: (error as Error).message,
}),
},
Date.now(),
);
}
},
};

View File

@@ -11,9 +11,14 @@ import type { SlashCommand, type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { MessageType } from '../types.js';
import type { LoadedSettings } from '../../config/settings.js';
import { readFile } from 'node:fs/promises';
import os from 'node:os';
import path from 'node:path';
import {
getErrorMessage,
loadServerHierarchicalMemory,
QWEN_DIR,
setGeminiMdFilename,
type FileDiscoveryService,
type LoadServerHierarchicalMemoryResponse,
} from '@qwen-code/qwen-code-core';
@@ -31,7 +36,18 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
};
});
vi.mock('node:fs/promises', () => {
const readFile = vi.fn();
return {
readFile,
default: {
readFile,
},
};
});
const mockLoadServerHierarchicalMemory = loadServerHierarchicalMemory as Mock;
const mockReadFile = readFile as unknown as Mock;
describe('memoryCommand', () => {
let mockContext: CommandContext;
@@ -52,6 +68,10 @@ describe('memoryCommand', () => {
let mockGetGeminiMdFileCount: Mock;
beforeEach(() => {
setGeminiMdFilename('QWEN.md');
mockReadFile.mockReset();
vi.restoreAllMocks();
showCommand = getSubCommand('show');
mockGetUserMemory = vi.fn();
@@ -102,6 +122,52 @@ describe('memoryCommand', () => {
expect.any(Number),
);
});
it('should show project memory from the configured context file', async () => {
const projectCommand = showCommand.subCommands?.find(
(cmd) => cmd.name === '--project',
);
if (!projectCommand?.action) throw new Error('Command has no action');
setGeminiMdFilename('AGENTS.md');
vi.spyOn(process, 'cwd').mockReturnValue('/test/project');
mockReadFile.mockResolvedValue('project memory');
await projectCommand.action(mockContext, '');
const expectedProjectPath = path.join('/test/project', 'AGENTS.md');
expect(mockReadFile).toHaveBeenCalledWith(expectedProjectPath, 'utf-8');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: expect.stringContaining(expectedProjectPath),
},
expect.any(Number),
);
});
it('should show global memory from the configured context file', async () => {
const globalCommand = showCommand.subCommands?.find(
(cmd) => cmd.name === '--global',
);
if (!globalCommand?.action) throw new Error('Command has no action');
setGeminiMdFilename('AGENTS.md');
vi.spyOn(os, 'homedir').mockReturnValue('/home/user');
mockReadFile.mockResolvedValue('global memory');
await globalCommand.action(mockContext, '');
const expectedGlobalPath = path.join('/home/user', QWEN_DIR, 'AGENTS.md');
expect(mockReadFile).toHaveBeenCalledWith(expectedGlobalPath, 'utf-8');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: expect.stringContaining('Global memory content'),
},
expect.any(Number),
);
});
});
describe('/memory add', () => {

View File

@@ -6,12 +6,13 @@
import {
getErrorMessage,
getCurrentGeminiMdFilename,
loadServerHierarchicalMemory,
QWEN_DIR,
} from '@qwen-code/qwen-code-core';
import path from 'node:path';
import os from 'os';
import fs from 'fs/promises';
import os from 'node:os';
import fs from 'node:fs/promises';
import { MessageType } from '../types.js';
import type { SlashCommand, SlashCommandActionReturn } from './types.js';
import { CommandKind } from './types.js';
@@ -56,7 +57,12 @@ export const memoryCommand: SlashCommand = {
kind: CommandKind.BUILT_IN,
action: async (context) => {
try {
const projectMemoryPath = path.join(process.cwd(), 'QWEN.md');
const workingDir =
context.services.config?.getWorkingDir?.() ?? process.cwd();
const projectMemoryPath = path.join(
workingDir,
getCurrentGeminiMdFilename(),
);
const memoryContent = await fs.readFile(
projectMemoryPath,
'utf-8',
@@ -104,7 +110,7 @@ export const memoryCommand: SlashCommand = {
const globalMemoryPath = path.join(
os.homedir(),
QWEN_DIR,
'QWEN.md',
getCurrentGeminiMdFilename(),
);
const globalMemoryContent = await fs.readFile(
globalMemoryPath,

View File

@@ -0,0 +1,132 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import {
CommandKind,
type CommandCompletionItem,
type CommandContext,
type SlashCommand,
} from './types.js';
import { MessageType, type HistoryItemSkillsList } from '../types.js';
import { t } from '../../i18n/index.js';
import { AsyncFzf } from 'fzf';
import type { SkillConfig } from '@qwen-code/qwen-code-core';
export const skillsCommand: SlashCommand = {
name: 'skills',
get description() {
return t('List available skills.');
},
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext, args?: string) => {
const rawArgs = args?.trim() ?? '';
const [skillName = ''] = rawArgs.split(/\s+/);
const skillManager = context.services.config?.getSkillManager();
if (!skillManager) {
context.ui.addItem(
{
type: MessageType.ERROR,
text: t('Could not retrieve skill manager.'),
},
Date.now(),
);
return;
}
const skills = await skillManager.listSkills();
if (skills.length === 0) {
context.ui.addItem(
{
type: MessageType.INFO,
text: t('No skills are currently available.'),
},
Date.now(),
);
return;
}
if (!skillName) {
const sortedSkills = [...skills].sort((left, right) =>
left.name.localeCompare(right.name),
);
const skillsListItem: HistoryItemSkillsList = {
type: MessageType.SKILLS_LIST,
skills: sortedSkills.map((skill) => ({ name: skill.name })),
};
context.ui.addItem(skillsListItem, Date.now());
return;
}
const normalizedName = skillName.toLowerCase();
const hasSkill = skills.some(
(skill) => skill.name.toLowerCase() === normalizedName,
);
if (!hasSkill) {
context.ui.addItem(
{
type: MessageType.ERROR,
text: t('Unknown skill: {{name}}', { name: skillName }),
},
Date.now(),
);
return;
}
const rawInput = context.invocation?.raw ?? `/skills ${rawArgs}`;
return {
type: 'submit_prompt',
content: [{ text: rawInput }],
};
},
completion: async (
context: CommandContext,
partialArg: string,
): Promise<CommandCompletionItem[]> => {
const skillManager = context.services.config?.getSkillManager();
if (!skillManager) {
return [];
}
const skills = await skillManager.listSkills();
const normalizedPartial = partialArg.trim();
const matches = await getSkillMatches(skills, normalizedPartial);
return matches.map((skill) => ({
value: skill.name,
description: skill.description,
}));
},
};
async function getSkillMatches(
skills: SkillConfig[],
query: string,
): Promise<SkillConfig[]> {
if (!query) {
return skills;
}
const names = skills.map((skill) => skill.name);
const skillMap = new Map(skills.map((skill) => [skill.name, skill]));
try {
const fzf = new AsyncFzf(names, {
fuzzy: 'v2',
casing: 'case-insensitive',
});
const results = (await fzf.find(query)) as Array<{ item: string }>;
return results
.map((result) => skillMap.get(result.item))
.filter((skill): skill is SkillConfig => !!skill);
} catch (error) {
console.error('[skillsCommand] Fuzzy match failed:', error);
const lowerQuery = query.toLowerCase();
return skills.filter((skill) =>
skill.name.toLowerCase().startsWith(lowerQuery),
);
}
}

View File

@@ -209,6 +209,12 @@ export enum CommandKind {
MCP_PROMPT = 'mcp-prompt',
}
export interface CommandCompletionItem {
value: string;
label?: string;
description?: string;
}
// The standardized contract for any command in the system.
export interface SlashCommand {
name: string;
@@ -234,7 +240,7 @@ export interface SlashCommand {
completion?: (
context: CommandContext,
partialArg: string,
) => Promise<string[]>;
) => Promise<Array<string | CommandCompletionItem> | null>;
subCommands?: SlashCommand[];
}

View File

@@ -30,6 +30,7 @@ import { Help } from './Help.js';
import type { SlashCommand } from '../commands/types.js';
import { ExtensionsList } from './views/ExtensionsList.js';
import { getMCPServerStatus } from '@qwen-code/qwen-code-core';
import { SkillsList } from './views/SkillsList.js';
import { ToolsList } from './views/ToolsList.js';
import { McpStatus } from './views/McpStatus.js';
@@ -153,6 +154,9 @@ const HistoryItemDisplayComponent: React.FC<HistoryItemDisplayProps> = ({
showDescriptions={itemForDisplay.showDescriptions}
/>
)}
{itemForDisplay.type === 'skills_list' && (
<SkillsList skills={itemForDisplay.skills} />
)}
{itemForDisplay.type === 'mcp_status' && (
<McpStatus {...itemForDisplay} serverStatus={getMCPServerStatus} />
)}

View File

@@ -106,7 +106,7 @@ export function SuggestionsDisplay({
</Box>
{suggestion.description && (
<Box flexGrow={1} paddingLeft={3}>
<Box flexGrow={1} paddingLeft={2}>
<Text color={textColor} wrap="truncate">
{suggestion.description}
</Text>

View File

@@ -23,7 +23,7 @@ export const InfoMessage: React.FC<InfoMessageProps> = ({ text }) => {
const prefixWidth = prefix.length;
return (
<Box flexDirection="row" marginTop={1}>
<Box flexDirection="row" marginBottom={1}>
<Box width={prefixWidth}>
<Text color={theme.status.warning}>{prefix}</Text>
</Box>

View File

@@ -18,7 +18,7 @@ export const WarningMessage: React.FC<WarningMessageProps> = ({ text }) => {
const prefixWidth = 3;
return (
<Box flexDirection="row" marginTop={1}>
<Box flexDirection="row" marginBottom={1}>
<Box width={prefixWidth}>
<Text color={Colors.AccentYellow}>{prefix}</Text>
</Box>

View File

@@ -0,0 +1,36 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type React from 'react';
import { Box, Text } from 'ink';
import { theme } from '../../semantic-colors.js';
import { type SkillDefinition } from '../../types.js';
import { t } from '../../../i18n/index.js';
interface SkillsListProps {
skills: readonly SkillDefinition[];
}
export const SkillsList: React.FC<SkillsListProps> = ({ skills }) => (
<Box flexDirection="column" marginBottom={1}>
<Text bold color={theme.text.primary}>
{t('Available skills:')}
</Text>
<Box height={1} />
{skills.length > 0 ? (
skills.map((skill) => (
<Box key={skill.name} flexDirection="row">
<Text color={theme.text.primary}>{' '}- </Text>
<Text bold color={theme.text.accent}>
{skill.name}
</Text>
</Box>
))
) : (
<Text color={theme.text.primary}> {t('No skills available')}</Text>
)}
</Box>
);

View File

@@ -1,21 +1,58 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { useCallback } from 'react';
import { useStdin } from 'ink';
import type { EditorType } from '@qwen-code/qwen-code-core';
import {
editorCommands,
commandExists as coreCommandExists,
} from '@qwen-code/qwen-code-core';
import { spawnSync } from 'child_process';
import { useSettings } from '../contexts/SettingsContext.js';
/**
* Cache for command existence checks to avoid repeated execSync calls.
*/
const commandExistsCache = new Map<string, boolean>();
/**
* Check if a command exists in the system with caching.
* Results are cached to improve performance in test environments.
*/
function commandExists(cmd: string): boolean {
if (commandExistsCache.has(cmd)) {
return commandExistsCache.get(cmd)!;
}
const exists = coreCommandExists(cmd);
commandExistsCache.set(cmd, exists);
return exists;
}
/**
* Get the actual executable command for an editor type.
*/
function getExecutableCommand(editorType: EditorType): string {
const commandConfig = editorCommands[editorType];
const commands =
process.platform === 'win32' ? commandConfig.win32 : commandConfig.default;
const availableCommand = commands.find((cmd) => commandExists(cmd));
if (!availableCommand) {
throw new Error(
`No available editor command found for ${editorType}. ` +
`Tried: ${commands.join(', ')}. ` +
`Please install one of these editors or set a different preferredEditor in settings.`,
);
}
return availableCommand;
}
/**
* Determines the editor command to use based on user preferences and platform.
*/
function getEditorCommand(preferredEditor?: EditorType): string {
if (preferredEditor) {
return preferredEditor;
return getExecutableCommand(preferredEditor);
}
// Platform-specific defaults with UI preference for macOS
@@ -63,8 +100,14 @@ export function useLaunchEditor() {
try {
setRawMode?.(false);
// On Windows, .cmd and .bat files need shell: true
const needsShell =
process.platform === 'win32' &&
(editorCommand.endsWith('.cmd') || editorCommand.endsWith('.bat'));
const { status, error } = spawnSync(editorCommand, editorArgs, {
stdio: 'inherit',
shell: needsShell,
});
if (error) throw error;

View File

@@ -573,6 +573,45 @@ describe('useSlashCompletion', () => {
});
});
it('should map completion items with descriptions for argument suggestions', async () => {
const mockCompletionFn = vi.fn().mockResolvedValue([
{ value: 'pdf', description: 'Create PDF documents' },
{ value: 'xlsx', description: 'Work with spreadsheets' },
]);
const slashCommands = [
createTestCommand({
name: 'skills',
description: 'List available skills',
completion: mockCompletionFn,
}),
];
const { result } = renderHook(() =>
useTestHarnessForSlashCompletion(
true,
'/skills ',
slashCommands,
mockCommandContext,
),
);
await waitFor(() => {
expect(result.current.suggestions).toEqual([
{
label: 'pdf',
value: 'pdf',
description: 'Create PDF documents',
},
{
label: 'xlsx',
value: 'xlsx',
description: 'Work with spreadsheets',
},
]);
});
});
it('should call command.completion with an empty string when args start with a space', async () => {
const mockCompletionFn = vi
.fn()

View File

@@ -9,6 +9,7 @@ import { AsyncFzf } from 'fzf';
import type { Suggestion } from '../components/SuggestionsDisplay.js';
import {
CommandKind,
type CommandCompletionItem,
type CommandContext,
type SlashCommand,
} from '../commands/types.js';
@@ -215,10 +216,9 @@ function useCommandSuggestions(
)) || [];
if (!signal.aborted) {
const finalSuggestions = results.map((s) => ({
label: s,
value: s,
}));
const finalSuggestions = results
.map((item) => toSuggestion(item))
.filter((suggestion): suggestion is Suggestion => !!suggestion);
setSuggestions(finalSuggestions);
setIsLoading(false);
}
@@ -310,6 +310,20 @@ function useCommandSuggestions(
return { suggestions, isLoading };
}
function toSuggestion(item: string | CommandCompletionItem): Suggestion | null {
if (typeof item === 'string') {
return { label: item, value: item };
}
if (!item.value) {
return null;
}
return {
label: item.label ?? item.value,
value: item.value,
description: item.description,
};
}
function useCompletionPositions(
query: string | null,
parserResult: CommandParserResult,

View File

@@ -201,12 +201,21 @@ export interface ToolDefinition {
description?: string;
}
export interface SkillDefinition {
name: string;
}
export type HistoryItemToolsList = HistoryItemBase & {
type: 'tools_list';
tools: ToolDefinition[];
showDescriptions: boolean;
};
export type HistoryItemSkillsList = HistoryItemBase & {
type: 'skills_list';
skills: SkillDefinition[];
};
// JSON-friendly types for using as a simple data model showing info about an
// MCP Server.
export interface JsonMcpTool {
@@ -268,6 +277,7 @@ export type HistoryItemWithoutId =
| HistoryItemCompression
| HistoryItemExtensionsList
| HistoryItemToolsList
| HistoryItemSkillsList
| HistoryItemMcpStatus;
export type HistoryItem = HistoryItemWithoutId & { id: number };
@@ -289,6 +299,7 @@ export enum MessageType {
SUMMARY = 'summary',
EXTENSIONS_LIST = 'extensions_list',
TOOLS_LIST = 'tools_list',
SKILLS_LIST = 'skills_list',
MCP_STATUS = 'mcp_status',
}

View File

@@ -117,8 +117,33 @@ describe('errors', () => {
expect(getErrorMessage(undefined)).toBe('undefined');
});
it('should handle objects', () => {
const obj = { message: 'test' };
it('should extract message from error-like objects', () => {
const obj = { message: 'test error message' };
expect(getErrorMessage(obj)).toBe('test error message');
});
it('should stringify plain objects without message property', () => {
const obj = { code: 500, details: 'internal error' };
expect(getErrorMessage(obj)).toBe(
'{"code":500,"details":"internal error"}',
);
});
it('should handle empty objects', () => {
expect(getErrorMessage({})).toBe('{}');
});
it('should handle objects with non-string message property', () => {
const obj = { message: 123 };
expect(getErrorMessage(obj)).toBe('{"message":123}');
});
it('should fallback to String() when toJSON returns undefined', () => {
const obj = {
toJSON() {
return undefined;
},
};
expect(getErrorMessage(obj)).toBe('[object Object]');
});
});

View File

@@ -18,6 +18,29 @@ export function getErrorMessage(error: unknown): string {
if (error instanceof Error) {
return error.message;
}
// Handle objects with message property (error-like objects)
if (
error !== null &&
typeof error === 'object' &&
'message' in error &&
typeof (error as { message: unknown }).message === 'string'
) {
return (error as { message: string }).message;
}
// Handle plain objects by stringifying them
if (error !== null && typeof error === 'object') {
try {
const stringified = JSON.stringify(error);
// JSON.stringify can return undefined for objects with toJSON() returning undefined
return stringified ?? String(error);
} catch {
// If JSON.stringify fails (circular reference, etc.), fall back to String
return String(error);
}
}
return String(error);
}

View File

@@ -10,6 +10,7 @@ import {
type ContentGeneratorConfigSources,
resolveModelConfig,
type ModelConfigSourcesInput,
type ProviderModelConfig,
} from '@qwen-code/qwen-code-core';
import type { Settings } from '../config/settings.js';
@@ -81,6 +82,21 @@ export function resolveCliGenerationConfig(
const authType = selectedAuthType;
// Find modelProvider from settings.modelProviders based on authType and model
let modelProvider: ProviderModelConfig | undefined;
if (authType && settings.modelProviders) {
const providers = settings.modelProviders[authType];
if (providers && Array.isArray(providers)) {
// Try to find by requested model (from CLI or settings)
const requestedModel = argv.model || settings.model?.name;
if (requestedModel) {
modelProvider = providers.find((p) => p.id === requestedModel) as
| ProviderModelConfig
| undefined;
}
}
}
const configSources: ModelConfigSourcesInput = {
authType,
cli: {
@@ -96,6 +112,7 @@ export function resolveCliGenerationConfig(
| Partial<ContentGeneratorConfig>
| undefined,
},
modelProvider,
env,
};
@@ -103,7 +120,7 @@ export function resolveCliGenerationConfig(
// Log warnings if any
for (const warning of resolved.warnings) {
console.warn(`[modelProviderUtils] ${warning}`);
console.warn(warning);
}
// Resolve OpenAI logging config (CLI-specific, not part of core resolver)

View File

@@ -8,7 +8,6 @@ import { exec, execSync, spawn, type ChildProcess } from 'node:child_process';
import os from 'node:os';
import path from 'node:path';
import fs from 'node:fs';
import { readFile } from 'node:fs/promises';
import { fileURLToPath } from 'node:url';
import { quote, parse } from 'shell-quote';
import {
@@ -50,16 +49,16 @@ const BUILTIN_SEATBELT_PROFILES = [
/**
* Determines whether the sandbox container should be run with the current user's UID and GID.
* This is often necessary on Linux systems (especially Debian/Ubuntu based) when using
* rootful Docker without userns-remap configured, to avoid permission issues with
* This is often necessary on Linux systems when using rootful Docker without userns-remap
* configured, to avoid permission issues with
* mounted volumes.
*
* The behavior is controlled by the `SANDBOX_SET_UID_GID` environment variable:
* - If `SANDBOX_SET_UID_GID` is "1" or "true", this function returns `true`.
* - If `SANDBOX_SET_UID_GID` is "0" or "false", this function returns `false`.
* - If `SANDBOX_SET_UID_GID` is not set:
* - On Debian/Ubuntu Linux, it defaults to `true`.
* - On other OSes, or if OS detection fails, it defaults to `false`.
* - On Linux, it defaults to `true`.
* - On other OSes, it defaults to `false`.
*
* For more context on running Docker containers as non-root, see:
* https://medium.com/redbubble/running-a-docker-container-as-a-non-root-user-7d2e00f8ee15
@@ -76,31 +75,20 @@ async function shouldUseCurrentUserInSandbox(): Promise<boolean> {
return false;
}
// If environment variable is not explicitly set, check for Debian/Ubuntu Linux
if (os.platform() === 'linux') {
try {
const osReleaseContent = await readFile('/etc/os-release', 'utf8');
if (
osReleaseContent.includes('ID=debian') ||
osReleaseContent.includes('ID=ubuntu') ||
osReleaseContent.match(/^ID_LIKE=.*debian.*/m) || // Covers derivatives
osReleaseContent.match(/^ID_LIKE=.*ubuntu.*/m) // Covers derivatives
) {
// note here and below we use console.error for informational messages on stderr
console.error(
'INFO: Defaulting to use current user UID/GID for Debian/Ubuntu-based Linux.',
);
return true;
}
} catch (_err) {
// Silently ignore if /etc/os-release is not found or unreadable.
// The default (false) will be applied in this case.
console.warn(
'Warning: Could not read /etc/os-release to auto-detect Debian/Ubuntu for UID/GID default.',
const debugEnv = [process.env['DEBUG'], process.env['DEBUG_MODE']].some(
(v) => v === 'true' || v === '1',
);
if (debugEnv) {
// Use stderr so it doesn't clutter normal STDOUT output (e.g. in `--prompt` runs).
console.error(
'INFO: Using current user UID/GID in Linux sandbox. Set SANDBOX_SET_UID_GID=false to disable.',
);
}
return true;
}
return false; // Default to false if no other condition is met
return false;
}
// docker does not allow container names to contain ':' or '/', so we
@@ -372,10 +360,10 @@ export async function start_sandbox(
//
// note this can only be done with binary linked from gemini-cli repo
if (process.env['BUILD_SANDBOX']) {
if (!gcPath.includes('gemini-cli/packages/')) {
if (!gcPath.includes('qwen-code/packages/')) {
throw new FatalSandboxError(
'Cannot build sandbox using installed gemini binary; ' +
'run `npm link ./packages/cli` under gemini-cli repo to switch to linked binary.',
'Cannot build sandbox using installed Qwen Code binary; ' +
'run `npm link ./packages/cli` under QwenCode-cli repo to switch to linked binary.',
);
} else {
console.error('building sandbox ...');

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.7.0",
"version": "0.7.1",
"description": "Qwen Code Core",
"repository": {
"type": "git",
@@ -27,7 +27,6 @@
"@google/genai": "1.30.0",
"@modelcontextprotocol/sdk": "^1.25.1",
"@opentelemetry/api": "^1.9.0",
"async-mutex": "^0.5.0",
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
"@opentelemetry/exporter-logs-otlp-http": "^0.203.0",
"@opentelemetry/exporter-metrics-otlp-grpc": "^0.203.0",
@@ -40,7 +39,9 @@
"@xterm/headless": "5.5.0",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.0",
"async-mutex": "^0.5.0",
"chardet": "^2.1.0",
"chokidar": "^4.0.3",
"diff": "^7.0.0",
"dotenv": "^17.1.0",
"fast-levenshtein": "^2.0.6",

View File

@@ -404,7 +404,7 @@ export class Config {
private toolRegistry!: ToolRegistry;
private promptRegistry!: PromptRegistry;
private subagentManager!: SubagentManager;
private skillManager!: SkillManager;
private skillManager: SkillManager | null = null;
private fileSystemService: FileSystemService;
private contentGeneratorConfig!: ContentGeneratorConfig;
private contentGeneratorConfigSources: ContentGeneratorConfigSources = {};
@@ -672,7 +672,10 @@ export class Config {
}
this.promptRegistry = new PromptRegistry();
this.subagentManager = new SubagentManager(this);
this.skillManager = new SkillManager(this);
if (this.getExperimentalSkills()) {
this.skillManager = new SkillManager(this);
await this.skillManager.startWatching();
}
// Load session subagents if they were provided before initialization
if (this.sessionSubagents.length > 0) {
@@ -773,6 +776,13 @@ export class Config {
return this.sessionId;
}
/**
* Releases resources owned by the config instance.
*/
async shutdown(): Promise<void> {
this.skillManager?.stopWatching();
}
/**
* Starts a new session and resets session-scoped services.
*/
@@ -1431,7 +1441,7 @@ export class Config {
return this.subagentManager;
}
getSkillManager(): SkillManager {
getSkillManager(): SkillManager | null {
return this.skillManager;
}

View File

@@ -10,6 +10,7 @@ import type {
GenerateContentParameters,
} from '@google/genai';
import { FinishReason, GenerateContentResponse } from '@google/genai';
import type { ContentGeneratorConfig } from '../contentGenerator.js';
// Mock the request tokenizer module BEFORE importing the class that uses it.
const mockTokenizer = {
@@ -127,6 +128,32 @@ describe('AnthropicContentGenerator', () => {
);
});
it('merges customHeaders into defaultHeaders (does not replace defaults)', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
reasoning: { effort: 'medium' },
customHeaders: {
'X-Custom': '1',
},
} as unknown as Record<string, unknown> as ContentGeneratorConfig,
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['User-Agent']).toContain('QwenCode/1.2.3');
expect(headers['anthropic-beta']).toContain('effort-2025-11-24');
expect(headers['X-Custom']).toBe('1');
});
it('adds the effort beta header when reasoning.effort is set', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(

View File

@@ -141,6 +141,7 @@ export class AnthropicContentGenerator implements ContentGenerator {
private buildHeaders(): Record<string, string> {
const version = this.cliConfig.getCliVersion() || 'unknown';
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
const { customHeaders } = this.contentGeneratorConfig;
const betas: string[] = [];
const reasoning = this.contentGeneratorConfig.reasoning;
@@ -163,7 +164,7 @@ export class AnthropicContentGenerator implements ContentGenerator {
headers['anthropic-beta'] = betas.join(',');
}
return headers;
return customHeaders ? { ...headers, ...customHeaders } : headers;
}
private async buildRequest(

View File

@@ -91,6 +91,8 @@ export type ContentGeneratorConfig = {
userAgent?: string;
// Schema compliance mode for tool definitions
schemaCompliance?: 'auto' | 'openapi_30';
// Custom HTTP headers to be sent with requests
customHeaders?: Record<string, string>;
};
// Keep the public ContentGeneratorConfigSources API, but reuse the generic
@@ -268,28 +270,28 @@ export function createContentGeneratorConfig(
}
export async function createContentGenerator(
config: ContentGeneratorConfig,
gcConfig: Config,
generatorConfig: ContentGeneratorConfig,
config: Config,
isInitialAuth?: boolean,
): Promise<ContentGenerator> {
const validation = validateModelConfig(config, false);
const validation = validateModelConfig(generatorConfig, false);
if (!validation.valid) {
throw new Error(validation.errors.map((e) => e.message).join('\n'));
}
if (config.authType === AuthType.USE_OPENAI) {
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
const authType = generatorConfig.authType;
if (!authType) {
throw new Error('ContentGeneratorConfig must have an authType');
}
let baseGenerator: ContentGenerator;
if (authType === AuthType.USE_OPENAI) {
const { createOpenAIContentGenerator } = await import(
'./openaiContentGenerator/index.js'
);
// Always use OpenAIContentGenerator, logging is controlled by enableOpenAILogging flag
const generator = createOpenAIContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
}
if (config.authType === AuthType.QWEN_OAUTH) {
// Import required classes dynamically
baseGenerator = createOpenAIContentGenerator(generatorConfig, config);
} else if (authType === AuthType.QWEN_OAUTH) {
const { getQwenOAuthClient: getQwenOauthClient } = await import(
'../qwen/qwenOAuth2.js'
);
@@ -298,44 +300,38 @@ export async function createContentGenerator(
);
try {
// Get the Qwen OAuth client (now includes integrated token management)
// If this is initial auth, require cached credentials to detect missing credentials
const qwenClient = await getQwenOauthClient(
gcConfig,
config,
isInitialAuth ? { requireCachedCredentials: true } : undefined,
);
// Create the content generator with dynamic token management
const generator = new QwenContentGenerator(qwenClient, config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
baseGenerator = new QwenContentGenerator(
qwenClient,
generatorConfig,
config,
);
} catch (error) {
throw new Error(
`${error instanceof Error ? error.message : String(error)}`,
);
}
}
if (config.authType === AuthType.USE_ANTHROPIC) {
} else if (authType === AuthType.USE_ANTHROPIC) {
const { createAnthropicContentGenerator } = await import(
'./anthropicContentGenerator/index.js'
);
const generator = createAnthropicContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
}
if (
config.authType === AuthType.USE_GEMINI ||
config.authType === AuthType.USE_VERTEX_AI
baseGenerator = createAnthropicContentGenerator(generatorConfig, config);
} else if (
authType === AuthType.USE_GEMINI ||
authType === AuthType.USE_VERTEX_AI
) {
const { createGeminiContentGenerator } = await import(
'./geminiContentGenerator/index.js'
);
const generator = createGeminiContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
baseGenerator = createGeminiContentGenerator(generatorConfig, config);
} else {
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${authType}`,
);
}
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
);
return new LoggingContentGenerator(baseGenerator, config, generatorConfig);
}

View File

@@ -39,6 +39,41 @@ describe('GeminiContentGenerator', () => {
mockGoogleGenAI = vi.mocked(GoogleGenAI).mock.results[0].value;
});
it('should merge customHeaders into existing httpOptions.headers', async () => {
vi.mocked(GoogleGenAI).mockClear();
void new GeminiContentGenerator(
{
apiKey: 'test-api-key',
httpOptions: {
headers: {
'X-Base': 'base',
'X-Override': 'base',
},
},
},
{
customHeaders: {
'X-Custom': 'custom',
'X-Override': 'custom',
},
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any,
);
expect(vi.mocked(GoogleGenAI)).toHaveBeenCalledTimes(1);
expect(vi.mocked(GoogleGenAI)).toHaveBeenCalledWith({
apiKey: 'test-api-key',
httpOptions: {
headers: {
'X-Base': 'base',
'X-Custom': 'custom',
'X-Override': 'custom',
},
},
});
});
it('should call generateContent on the underlying model', async () => {
const request = { model: 'gemini-1.5-flash', contents: [] };
const expectedResponse = { responseId: 'test-id' };

View File

@@ -35,7 +35,26 @@ export class GeminiContentGenerator implements ContentGenerator {
},
contentGeneratorConfig?: ContentGeneratorConfig,
) {
this.googleGenAI = new GoogleGenAI(options);
const customHeaders = contentGeneratorConfig?.customHeaders;
const finalOptions = customHeaders
? (() => {
const baseHttpOptions = options.httpOptions;
const baseHeaders = baseHttpOptions?.headers ?? {};
return {
...options,
httpOptions: {
...(baseHttpOptions ?? {}),
headers: {
...baseHeaders,
...customHeaders,
},
},
};
})()
: options;
this.googleGenAI = new GoogleGenAI(finalOptions);
this.contentGeneratorConfig = contentGeneratorConfig;
}

View File

@@ -12,6 +12,7 @@ import type {
import { GenerateContentResponse } from '@google/genai';
import type { Config } from '../../config/config.js';
import type { ContentGenerator } from '../contentGenerator.js';
import { AuthType } from '../contentGenerator.js';
import { LoggingContentGenerator } from './index.js';
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
import {
@@ -50,14 +51,17 @@ const convertGeminiResponseToOpenAISpy = vi
choices: [],
} as OpenAI.Chat.ChatCompletion);
const createConfig = (overrides: Record<string, unknown> = {}): Config =>
({
getContentGeneratorConfig: () => ({
authType: 'openai',
enableOpenAILogging: false,
...overrides,
}),
}) as Config;
const createConfig = (overrides: Record<string, unknown> = {}): Config => {
const configContent = {
authType: 'openai',
enableOpenAILogging: false,
...overrides,
};
return {
getContentGeneratorConfig: () => configContent,
getAuthType: () => configContent.authType as AuthType | undefined,
} as Config;
};
const createWrappedGenerator = (
generateContent: ContentGenerator['generateContent'],
@@ -124,13 +128,17 @@ describe('LoggingContentGenerator', () => {
),
vi.fn(),
);
const generatorConfig = {
model: 'test-model',
authType: AuthType.USE_OPENAI,
enableOpenAILogging: true,
openAILoggingDir: 'logs',
schemaCompliance: 'openapi_30' as const,
};
const generator = new LoggingContentGenerator(
wrapped,
createConfig({
enableOpenAILogging: true,
openAILoggingDir: 'logs',
schemaCompliance: 'openapi_30',
}),
createConfig(),
generatorConfig,
);
const request = {
@@ -225,9 +233,15 @@ describe('LoggingContentGenerator', () => {
vi.fn().mockRejectedValue(error),
vi.fn(),
);
const generatorConfig = {
model: 'test-model',
authType: AuthType.USE_OPENAI,
enableOpenAILogging: true,
};
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
createConfig(),
generatorConfig,
);
const request = {
@@ -293,9 +307,15 @@ describe('LoggingContentGenerator', () => {
})(),
),
);
const generatorConfig = {
model: 'test-model',
authType: AuthType.USE_OPENAI,
enableOpenAILogging: true,
};
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
createConfig(),
generatorConfig,
);
const request = {
@@ -345,9 +365,15 @@ describe('LoggingContentGenerator', () => {
})(),
),
);
const generatorConfig = {
model: 'test-model',
authType: AuthType.USE_OPENAI,
enableOpenAILogging: true,
};
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
createConfig(),
generatorConfig,
);
const request = {

View File

@@ -31,7 +31,10 @@ import {
logApiRequest,
logApiResponse,
} from '../../telemetry/loggers.js';
import type { ContentGenerator } from '../contentGenerator.js';
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../contentGenerator.js';
import { isStructuredError } from '../../utils/quotaErrorDetection.js';
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
import { OpenAILogger } from '../../utils/openaiLogger.js';
@@ -50,9 +53,11 @@ export class LoggingContentGenerator implements ContentGenerator {
constructor(
private readonly wrapped: ContentGenerator,
private readonly config: Config,
generatorConfig: ContentGeneratorConfig,
) {
const generatorConfig = this.config.getContentGeneratorConfig();
if (generatorConfig?.enableOpenAILogging) {
// Extract fields needed for initialization from passed config
// (config.getContentGeneratorConfig() may not be available yet during refreshAuth)
if (generatorConfig.enableOpenAILogging) {
this.openaiLogger = new OpenAILogger(generatorConfig.openAILoggingDir);
this.schemaCompliance = generatorConfig.schemaCompliance;
}
@@ -89,7 +94,7 @@ export class LoggingContentGenerator implements ContentGenerator {
model,
durationMs,
prompt_id,
this.config.getContentGeneratorConfig()?.authType,
this.config.getAuthType(),
usageMetadata,
responseText,
),
@@ -126,7 +131,7 @@ export class LoggingContentGenerator implements ContentGenerator {
errorMessage,
durationMs,
prompt_id,
this.config.getContentGeneratorConfig()?.authType,
this.config.getAuthType(),
errorType,
errorStatus,
),

View File

@@ -207,6 +207,27 @@ describe('OpenAIContentConverter', () => {
expect.objectContaining({ text: 'visible text' }),
);
});
it('should not throw when streaming chunk has no delta', () => {
const chunk = converter.convertOpenAIChunkToGemini({
object: 'chat.completion.chunk',
id: 'chunk-2',
created: 456,
choices: [
{
index: 0,
// Some OpenAI-compatible providers may omit delta entirely.
delta: undefined,
finish_reason: null,
logprobs: null,
},
],
model: 'gpt-test',
} as unknown as OpenAI.Chat.ChatCompletionChunk);
const parts = chunk.candidates?.[0]?.content?.parts;
expect(parts).toEqual([]);
});
});
describe('convertGeminiToolsToOpenAI', () => {

View File

@@ -799,7 +799,7 @@ export class OpenAIContentConverter {
const parts: Part[] = [];
const reasoningText = (choice.delta as ExtendedCompletionChunkDelta)
.reasoning_content;
?.reasoning_content;
if (reasoningText) {
parts.push({ text: reasoningText, thought: true });
}

View File

@@ -142,6 +142,27 @@ describe('DashScopeOpenAICompatibleProvider', () => {
});
});
it('should merge custom headers with DashScope defaults', () => {
const providerWithCustomHeaders = new DashScopeOpenAICompatibleProvider(
{
...mockContentGeneratorConfig,
customHeaders: {
'X-Custom': '1',
'X-DashScope-CacheControl': 'disable',
},
} as ContentGeneratorConfig,
mockCliConfig,
);
const headers = providerWithCustomHeaders.buildHeaders();
expect(headers['User-Agent']).toContain('QwenCode/1.0.0');
expect(headers['X-DashScope-UserAgent']).toContain('QwenCode/1.0.0');
expect(headers['X-DashScope-AuthType']).toBe(AuthType.QWEN_OAUTH);
expect(headers['X-Custom']).toBe('1');
expect(headers['X-DashScope-CacheControl']).toBe('disable');
});
it('should handle unknown CLI version', () => {
(
mockCliConfig.getCliVersion as MockedFunction<

View File

@@ -47,13 +47,17 @@ export class DashScopeOpenAICompatibleProvider
buildHeaders(): Record<string, string | undefined> {
const version = this.cliConfig.getCliVersion() || 'unknown';
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
const { authType } = this.contentGeneratorConfig;
return {
const { authType, customHeaders } = this.contentGeneratorConfig;
const defaultHeaders = {
'User-Agent': userAgent,
'X-DashScope-CacheControl': 'enable',
'X-DashScope-UserAgent': userAgent,
'X-DashScope-AuthType': authType,
};
return customHeaders
? { ...defaultHeaders, ...customHeaders }
: defaultHeaders;
}
buildClient(): OpenAI {

View File

@@ -73,6 +73,26 @@ describe('DefaultOpenAICompatibleProvider', () => {
});
});
it('should merge customHeaders with defaults (and allow overrides)', () => {
const providerWithCustomHeaders = new DefaultOpenAICompatibleProvider(
{
...mockContentGeneratorConfig,
customHeaders: {
'X-Custom': '1',
'User-Agent': 'custom-agent',
},
} as ContentGeneratorConfig,
mockCliConfig,
);
const headers = providerWithCustomHeaders.buildHeaders();
expect(headers).toEqual({
'User-Agent': 'custom-agent',
'X-Custom': '1',
});
});
it('should handle unknown CLI version', () => {
(
mockCliConfig.getCliVersion as MockedFunction<

View File

@@ -25,9 +25,14 @@ export class DefaultOpenAICompatibleProvider
buildHeaders(): Record<string, string | undefined> {
const version = this.cliConfig.getCliVersion() || 'unknown';
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
return {
const { customHeaders } = this.contentGeneratorConfig;
const defaultHeaders = {
'User-Agent': userAgent,
};
return customHeaders
? { ...defaultHeaders, ...customHeaders }
: defaultHeaders;
}
buildClient(): OpenAI {

View File

@@ -25,6 +25,7 @@ export const MODEL_GENERATION_CONFIG_FIELDS = [
'disableCacheControl',
'schemaCompliance',
'reasoning',
'customHeaders',
] as const satisfies ReadonlyArray<keyof ContentGeneratorConfig>;
/**
@@ -105,15 +106,6 @@ export const QWEN_OAUTH_MODELS: ModelConfig[] = [
description:
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
capabilities: { vision: false },
generationConfig: {
samplingParams: {
temperature: 0.7,
top_p: 0.9,
max_tokens: 8192,
},
timeout: 60000,
maxRetries: 3,
},
},
{
id: 'vision-model',
@@ -121,14 +113,5 @@ export const QWEN_OAUTH_MODELS: ModelConfig[] = [
description:
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)',
capabilities: { vision: true },
generationConfig: {
samplingParams: {
temperature: 0.7,
top_p: 0.9,
max_tokens: 8192,
},
timeout: 60000,
maxRetries: 3,
},
},
];

View File

@@ -112,11 +112,9 @@ describe('modelConfigResolver', () => {
modelProvider: {
id: 'provider-model',
name: 'Provider Model',
authType: AuthType.USE_OPENAI,
envKey: 'MY_CUSTOM_KEY',
baseUrl: 'https://provider.example.com',
generationConfig: {},
capabilities: {},
},
});
@@ -249,13 +247,11 @@ describe('modelConfigResolver', () => {
modelProvider: {
id: 'model',
name: 'Model',
authType: AuthType.USE_OPENAI,
envKey: 'MY_KEY',
baseUrl: 'https://api.example.com',
generationConfig: {
timeout: 60000,
},
capabilities: {},
},
});

View File

@@ -41,7 +41,7 @@ import {
QWEN_OAUTH_ALLOWED_MODELS,
MODEL_GENERATION_CONFIG_FIELDS,
} from './constants.js';
import type { ResolvedModelConfig } from './types.js';
import type { ModelConfig as ModelProviderConfig } from './types.js';
export {
validateModelConfig,
type ModelConfigValidationResult,
@@ -86,8 +86,8 @@ export interface ModelConfigSourcesInput {
/** Environment variables (injected for testability) */
env: Record<string, string | undefined>;
/** Resolved model from ModelProviders (explicit selection, highest priority) */
modelProvider?: ResolvedModelConfig;
/** Model from ModelProviders (explicit selection, highest priority) */
modelProvider?: ModelProviderConfig;
/** Proxy URL (computed from Config) */
proxy?: string;
@@ -277,7 +277,7 @@ function resolveQwenOAuthConfig(
input: ModelConfigSourcesInput,
warnings: string[],
): ModelConfigResolutionResult {
const { cli, settings, proxy } = input;
const { cli, settings, proxy, modelProvider } = input;
const sources: ConfigSources = {};
// Qwen OAuth only allows specific models
@@ -311,10 +311,10 @@ function resolveQwenOAuthConfig(
sources['proxy'] = computedSource('Config.getProxy()');
}
// Resolve generation config from settings
// Resolve generation config from settings and modelProvider
const generationConfig = resolveGenerationConfig(
settings?.generationConfig,
undefined,
modelProvider?.generationConfig,
AuthType.QWEN_OAUTH,
resolvedModel,
sources,
@@ -344,7 +344,7 @@ function resolveGenerationConfig(
const result: Partial<ContentGeneratorConfig> = {};
for (const field of MODEL_GENERATION_CONFIG_FIELDS) {
// ModelProvider config takes priority
// ModelProvider config takes priority over settings config
if (authType && modelProviderConfig && field in modelProviderConfig) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(result as any)[field] = modelProviderConfig[field];

View File

@@ -480,6 +480,91 @@ describe('ModelsConfig', () => {
expect(gc.apiKeyEnvKey).toBeUndefined();
});
it('should use default model for new authType when switching from different authType with env vars', () => {
// Simulate cold start with OPENAI env vars (OPENAI_MODEL and OPENAI_API_KEY)
// This sets the model in generationConfig but no authType is selected yet
const modelsConfig = new ModelsConfig({
generationConfig: {
model: 'gpt-4o', // From OPENAI_MODEL env var
apiKey: 'openai-key-from-env',
},
});
// User switches to qwen-oauth via AuthDialog
// refreshAuth calls syncAfterAuthRefresh with the current model (gpt-4o)
// which doesn't exist in qwen-oauth registry, so it should use default
modelsConfig.syncAfterAuthRefresh(AuthType.QWEN_OAUTH, 'gpt-4o');
const gc = currentGenerationConfig(modelsConfig);
// Should use default qwen-oauth model (coder-model), not the OPENAI model
expect(gc.model).toBe('coder-model');
expect(gc.apiKey).toBe('QWEN_OAUTH_DYNAMIC_TOKEN');
expect(gc.apiKeyEnvKey).toBeUndefined();
});
it('should clear manual credentials when switching from USE_OPENAI to QWEN_OAUTH', () => {
// User manually set credentials for OpenAI
const modelsConfig = new ModelsConfig({
initialAuthType: AuthType.USE_OPENAI,
generationConfig: {
model: 'gpt-4o',
apiKey: 'manual-openai-key',
baseUrl: 'https://manual.example.com/v1',
},
});
// Manually set credentials via updateCredentials
modelsConfig.updateCredentials({
apiKey: 'manual-openai-key',
baseUrl: 'https://manual.example.com/v1',
model: 'gpt-4o',
});
// User switches to qwen-oauth
// Since authType is not USE_OPENAI, manual credentials should be cleared
// and default qwen-oauth model should be applied
modelsConfig.syncAfterAuthRefresh(AuthType.QWEN_OAUTH, 'gpt-4o');
const gc = currentGenerationConfig(modelsConfig);
// Should use default qwen-oauth model, not preserve manual OpenAI credentials
expect(gc.model).toBe('coder-model');
expect(gc.apiKey).toBe('QWEN_OAUTH_DYNAMIC_TOKEN');
// baseUrl should be set to qwen-oauth default, not preserved from manual OpenAI config
expect(gc.baseUrl).toBe('DYNAMIC_QWEN_OAUTH_BASE_URL');
expect(gc.apiKeyEnvKey).toBeUndefined();
});
it('should preserve manual credentials when switching to USE_OPENAI', () => {
// User manually set credentials
const modelsConfig = new ModelsConfig({
initialAuthType: AuthType.USE_OPENAI,
generationConfig: {
model: 'gpt-4o',
apiKey: 'manual-openai-key',
baseUrl: 'https://manual.example.com/v1',
samplingParams: { temperature: 0.9 },
},
});
// Manually set credentials via updateCredentials
modelsConfig.updateCredentials({
apiKey: 'manual-openai-key',
baseUrl: 'https://manual.example.com/v1',
model: 'gpt-4o',
});
// User switches to USE_OPENAI (same or different model)
// Since authType is USE_OPENAI, manual credentials should be preserved
modelsConfig.syncAfterAuthRefresh(AuthType.USE_OPENAI, 'gpt-4o');
const gc = currentGenerationConfig(modelsConfig);
// Should preserve manual credentials
expect(gc.model).toBe('gpt-4o');
expect(gc.apiKey).toBe('manual-openai-key');
expect(gc.baseUrl).toBe('https://manual.example.com/v1');
expect(gc.samplingParams?.temperature).toBe(0.9); // Preserved from initial config
});
it('should maintain consistency between currentModelId and _generationConfig.model after initialization', () => {
const modelProvidersConfig: ModelProvidersConfig = {
openai: [

View File

@@ -600,7 +600,7 @@ export class ModelsConfig {
// If credentials were manually set, don't apply modelProvider defaults
// Just update the authType and preserve the manually set credentials
if (preserveManualCredentials) {
if (preserveManualCredentials && authType === AuthType.USE_OPENAI) {
this.strictModelProviderSelection = false;
this.currentAuthType = authType;
if (modelId) {
@@ -621,7 +621,17 @@ export class ModelsConfig {
this.applyResolvedModelDefaults(resolved);
}
} else {
// If the provided modelId doesn't exist in the registry for the new authType,
// use the default model for that authType instead of keeping the old model.
// This handles the case where switching from one authType (e.g., OPENAI with
// env vars) to another (e.g., qwen-oauth) - we should use the default model
// for the new authType, not the old model.
this.currentAuthType = authType;
const defaultModel =
this.modelRegistry.getDefaultModelForAuthType(authType);
if (defaultModel) {
this.applyResolvedModelDefaults(defaultModel);
}
}
}

View File

@@ -31,6 +31,7 @@ export type ModelGenerationConfig = Pick<
| 'disableCacheControl'
| 'schemaCompliance'
| 'reasoning'
| 'customHeaders'
>;
/**

View File

@@ -16,6 +16,8 @@ import {
isDeviceTokenPending,
isDeviceTokenSuccess,
isErrorResponse,
qwenOAuth2Events,
QwenOAuth2Event,
QwenOAuth2Client,
type DeviceAuthorizationResponse,
type DeviceTokenResponse,
@@ -845,6 +847,58 @@ describe('getQwenOAuthClient', () => {
SharedTokenManager.getInstance = originalGetInstance;
});
it('should include troubleshooting hints when device auth fetch fails', async () => {
// Make SharedTokenManager fail so we hit the fallback device-flow path
const mockTokenManager = {
getValidCredentials: vi
.fn()
.mockRejectedValue(new Error('Token refresh failed')),
};
const originalGetInstance = SharedTokenManager.getInstance;
SharedTokenManager.getInstance = vi.fn().mockReturnValue(mockTokenManager);
const tlsCause = new Error('unable to verify the first certificate');
(tlsCause as Error & { code?: string }).code =
'UNABLE_TO_VERIFY_LEAF_SIGNATURE';
const fetchError = new TypeError('fetch failed') as TypeError & {
cause?: unknown;
};
fetchError.cause = tlsCause;
vi.mocked(global.fetch).mockRejectedValue(fetchError);
const emitSpy = vi.spyOn(qwenOAuth2Events, 'emit');
let thrownError: unknown;
try {
const { getQwenOAuthClient } = await import('./qwenOAuth2.js');
await getQwenOAuthClient(mockConfig);
} catch (error: unknown) {
thrownError = error;
}
expect(thrownError).toBeInstanceOf(Error);
expect((thrownError as Error).message).toContain(
'Device authorization flow failed: fetch failed',
);
expect((thrownError as Error).message).toContain(
'UNABLE_TO_VERIFY_LEAF_SIGNATURE',
);
expect((thrownError as Error).message).toContain('NODE_EXTRA_CA_CERTS');
expect((thrownError as Error).message).toContain('--proxy');
expect(emitSpy).toHaveBeenCalledWith(
QwenOAuth2Event.AuthProgress,
'error',
expect.stringContaining('NODE_EXTRA_CA_CERTS'),
);
emitSpy.mockRestore();
SharedTokenManager.getInstance = originalGetInstance;
});
});
describe('CredentialsClearRequiredError', () => {

View File

@@ -13,6 +13,7 @@ import open from 'open';
import { EventEmitter } from 'events';
import type { Config } from '../config/config.js';
import { randomUUID } from 'node:crypto';
import { formatFetchErrorForUser } from '../utils/fetch.js';
import {
SharedTokenManager,
TokenManagerError,
@@ -558,6 +559,109 @@ export async function getQwenOAuthClient(
}
}
/**
* Displays a formatted box with OAuth device authorization URL.
* Uses process.stderr.write() to bypass ConsolePatcher and ensure the auth URL
* is always visible to users, especially in non-interactive mode.
* Using stderr prevents corruption of structured JSON output (which goes to stdout)
* and follows the standard Unix convention of user-facing messages to stderr.
*/
function showFallbackMessage(verificationUriComplete: string): void {
const title = 'Qwen OAuth Device Authorization';
const url = verificationUriComplete;
const minWidth = 70;
const maxWidth = 80;
const boxWidth = Math.min(Math.max(title.length + 4, minWidth), maxWidth);
// Calculate the width needed for the box (account for padding)
const contentWidth = boxWidth - 4; // Subtract 2 spaces and 2 border chars
// Helper to wrap text to fit within box width
const wrapText = (text: string, width: number): string[] => {
// For URLs, break at any character if too long
if (text.startsWith('http://') || text.startsWith('https://')) {
const lines: string[] = [];
for (let i = 0; i < text.length; i += width) {
lines.push(text.substring(i, i + width));
}
return lines;
}
// For regular text, break at word boundaries
const words = text.split(' ');
const lines: string[] = [];
let currentLine = '';
for (const word of words) {
if (currentLine.length + word.length + 1 <= width) {
currentLine += (currentLine ? ' ' : '') + word;
} else {
if (currentLine) {
lines.push(currentLine);
}
currentLine = word.length > width ? word.substring(0, width) : word;
}
}
if (currentLine) {
lines.push(currentLine);
}
return lines;
};
// Build the box borders with title centered in top border
// Format: +--- Title ---+
const titleWithSpaces = ' ' + title + ' ';
const totalDashes = boxWidth - 2 - titleWithSpaces.length; // Subtract corners and title
const leftDashes = Math.floor(totalDashes / 2);
const rightDashes = totalDashes - leftDashes;
const topBorder =
'+' +
'-'.repeat(leftDashes) +
titleWithSpaces +
'-'.repeat(rightDashes) +
'+';
const emptyLine = '|' + ' '.repeat(boxWidth - 2) + '|';
const bottomBorder = '+' + '-'.repeat(boxWidth - 2) + '+';
// Build content lines
const instructionLines = wrapText(
'Please visit the following URL in your browser to authorize:',
contentWidth,
);
const urlLines = wrapText(url, contentWidth);
const waitingLine = 'Waiting for authorization to complete...';
// Write the box
process.stderr.write('\n' + topBorder + '\n');
process.stderr.write(emptyLine + '\n');
// Write instructions
for (const line of instructionLines) {
process.stderr.write(
'| ' + line + ' '.repeat(contentWidth - line.length) + ' |\n',
);
}
process.stderr.write(emptyLine + '\n');
// Write URL
for (const line of urlLines) {
process.stderr.write(
'| ' + line + ' '.repeat(contentWidth - line.length) + ' |\n',
);
}
process.stderr.write(emptyLine + '\n');
// Write waiting message
process.stderr.write(
'| ' + waitingLine + ' '.repeat(contentWidth - waitingLine.length) + ' |\n',
);
process.stderr.write(emptyLine + '\n');
process.stderr.write(bottomBorder + '\n\n');
}
async function authWithQwenDeviceFlow(
client: QwenOAuth2Client,
config: Config,
@@ -570,6 +674,50 @@ async function authWithQwenDeviceFlow(
};
qwenOAuth2Events.once(QwenOAuth2Event.AuthCancel, cancelHandler);
// Helper to check cancellation and return appropriate result
const checkCancellation = (): AuthResult | null => {
if (!isCancelled) {
return null;
}
const message = 'Authentication cancelled by user.';
console.debug('\n' + message);
qwenOAuth2Events.emit(QwenOAuth2Event.AuthProgress, 'error', message);
return { success: false, reason: 'cancelled', message };
};
// Helper to emit auth progress events
const emitAuthProgress = (
status: 'polling' | 'success' | 'error' | 'timeout' | 'rate_limit',
message: string,
): void => {
qwenOAuth2Events.emit(QwenOAuth2Event.AuthProgress, status, message);
};
// Helper to handle browser launch with error handling
const launchBrowser = async (url: string): Promise<void> => {
try {
const childProcess = await open(url);
// IMPORTANT: Attach an error handler to the returned child process.
// Without this, if `open` fails to spawn a process (e.g., `xdg-open` is not found
// in a minimal Docker container), it will emit an unhandled 'error' event,
// causing the entire Node.js process to crash.
if (childProcess) {
childProcess.on('error', (err) => {
console.debug(
'Browser launch failed:',
err.message || 'Unknown error',
);
});
}
} catch (err) {
console.debug(
'Failed to open browser:',
err instanceof Error ? err.message : 'Unknown error',
);
}
};
try {
// Generate PKCE code verifier and challenge
const { code_verifier, code_challenge } = generatePKCEPair();
@@ -592,56 +740,18 @@ async function authWithQwenDeviceFlow(
// Emit device authorization event for UI integration immediately
qwenOAuth2Events.emit(QwenOAuth2Event.AuthUri, deviceAuth);
const showFallbackMessage = () => {
console.log('\n=== Qwen OAuth Device Authorization ===');
console.log(
'Please visit the following URL in your browser to authorize:',
);
console.log(`\n${deviceAuth.verification_uri_complete}\n`);
console.log('Waiting for authorization to complete...\n');
};
// Always show the fallback message in non-interactive environments to ensure
// users can see the authorization URL even if browser launching is attempted.
// This is critical for headless/remote environments where browser launching
// may silently fail without throwing an error.
if (config.isBrowserLaunchSuppressed()) {
// Browser launch is suppressed, show fallback message
showFallbackMessage();
} else {
// Try to open the URL in browser, but always show the URL as fallback
// to handle cases where browser launch silently fails (e.g., headless servers)
showFallbackMessage();
try {
const childProcess = await open(deviceAuth.verification_uri_complete);
showFallbackMessage(deviceAuth.verification_uri_complete);
// IMPORTANT: Attach an error handler to the returned child process.
// Without this, if `open` fails to spawn a process (e.g., `xdg-open` is not found
// in a minimal Docker container), it will emit an unhandled 'error' event,
// causing the entire Node.js process to crash.
if (childProcess) {
childProcess.on('error', (err) => {
console.debug(
'Browser launch failed:',
err.message || 'Unknown error',
);
});
}
} catch (err) {
console.debug(
'Failed to open browser:',
err instanceof Error ? err.message : 'Unknown error',
);
}
// Try to open browser if not suppressed
if (!config.isBrowserLaunchSuppressed()) {
await launchBrowser(deviceAuth.verification_uri_complete);
}
// Emit auth progress event
qwenOAuth2Events.emit(
QwenOAuth2Event.AuthProgress,
'polling',
'Waiting for authorization...',
);
emitAuthProgress('polling', 'Waiting for authorization...');
console.debug('Waiting for authorization...\n');
// Poll for the token
@@ -652,11 +762,9 @@ async function authWithQwenDeviceFlow(
for (let attempt = 0; attempt < maxAttempts; attempt++) {
// Check if authentication was cancelled
if (isCancelled) {
const message = 'Authentication cancelled by user.';
console.debug('\n' + message);
qwenOAuth2Events.emit(QwenOAuth2Event.AuthProgress, 'error', message);
return { success: false, reason: 'cancelled', message };
const cancellationResult = checkCancellation();
if (cancellationResult) {
return cancellationResult;
}
try {
@@ -699,9 +807,7 @@ async function authWithQwenDeviceFlow(
// minimal stub; cache invalidation is best-effort and should not break auth.
}
// Emit auth progress success event
qwenOAuth2Events.emit(
QwenOAuth2Event.AuthProgress,
emitAuthProgress(
'success',
'Authentication successful! Access token obtained.',
);
@@ -724,9 +830,7 @@ async function authWithQwenDeviceFlow(
pollInterval = 2000; // Reset to default interval
}
// Emit polling progress event
qwenOAuth2Events.emit(
QwenOAuth2Event.AuthProgress,
emitAuthProgress(
'polling',
`Polling... (attempt ${attempt + 1}/${maxAttempts})`,
);
@@ -756,15 +860,9 @@ async function authWithQwenDeviceFlow(
});
// Check for cancellation after waiting
if (isCancelled) {
const message = 'Authentication cancelled by user.';
console.debug('\n' + message);
qwenOAuth2Events.emit(
QwenOAuth2Event.AuthProgress,
'error',
message,
);
return { success: false, reason: 'cancelled', message };
const cancellationResult = checkCancellation();
if (cancellationResult) {
return cancellationResult;
}
continue;
@@ -792,15 +890,17 @@ async function authWithQwenDeviceFlow(
message: string,
eventType: 'error' | 'rate_limit' = 'error',
): AuthResult => {
qwenOAuth2Events.emit(
QwenOAuth2Event.AuthProgress,
eventType,
message,
);
emitAuthProgress(eventType, message);
console.error('\n' + message);
return { success: false, reason, message };
};
// Check for cancellation first
const cancellationResult = checkCancellation();
if (cancellationResult) {
return cancellationResult;
}
// Handle credential caching failures - stop polling immediately
if (errorMessage.includes('Failed to cache credentials')) {
return handleError('error', errorMessage);
@@ -824,31 +924,23 @@ async function authWithQwenDeviceFlow(
}
const message = `Error polling for token: ${errorMessage}`;
qwenOAuth2Events.emit(QwenOAuth2Event.AuthProgress, 'error', message);
if (isCancelled) {
const message = 'Authentication cancelled by user.';
return { success: false, reason: 'cancelled', message };
}
emitAuthProgress('error', message);
await new Promise((resolve) => setTimeout(resolve, pollInterval));
}
}
const timeoutMessage = 'Authorization timeout, please restart the process.';
// Emit timeout error event
qwenOAuth2Events.emit(
QwenOAuth2Event.AuthProgress,
'timeout',
timeoutMessage,
);
emitAuthProgress('timeout', timeoutMessage);
console.error('\n' + timeoutMessage);
return { success: false, reason: 'timeout', message: timeoutMessage };
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
const message = `Device authorization flow failed: ${errorMessage}`;
const fullErrorMessage = formatFetchErrorForUser(error, {
url: QWEN_OAUTH_BASE_URL,
});
const message = `Device authorization flow failed: ${fullErrorMessage}`;
emitAuthProgress('error', message);
console.error(message);
return { success: false, reason: 'error', message };
} finally {

View File

@@ -818,7 +818,7 @@ describe('ShellExecutionService child_process fallback', () => {
});
describe('Platform-Specific Behavior', () => {
it('should use cmd.exe on Windows', async () => {
it('should use cmd.exe and hide window on Windows', async () => {
mockPlatform.mockReturnValue('win32');
await simulateExecution('dir "foo bar"', (cp) =>
cp.emit('exit', 0, null),
@@ -829,7 +829,8 @@ describe('ShellExecutionService child_process fallback', () => {
[],
expect.objectContaining({
shell: true,
detached: true,
detached: false,
windowsHide: true,
}),
);
});

View File

@@ -229,7 +229,8 @@ export class ShellExecutionService {
stdio: ['ignore', 'pipe', 'pipe'],
windowsVerbatimArguments: true,
shell: isWindows ? true : 'bash',
detached: true,
detached: !isWindows,
windowsHide: isWindows,
env: {
...process.env,
QWEN_CODE: '1',

View File

@@ -5,8 +5,10 @@
*/
import * as fs from 'fs/promises';
import * as fsSync from 'fs';
import * as path from 'path';
import * as os from 'os';
import { watch as watchFs, type FSWatcher } from 'chokidar';
import { parse as parseYaml } from '../utils/yaml-parser.js';
import type {
SkillConfig,
@@ -29,6 +31,9 @@ export class SkillManager {
private skillsCache: Map<SkillLevel, SkillConfig[]> | null = null;
private readonly changeListeners: Set<() => void> = new Set();
private parseErrors: Map<string, SkillError> = new Map();
private readonly watchers: Map<string, FSWatcher> = new Map();
private watchStarted = false;
private refreshTimer: NodeJS.Timeout | null = null;
constructor(private readonly config: Config) {}
@@ -221,6 +226,37 @@ export class SkillManager {
this.notifyChangeListeners();
}
/**
* Starts watching skill directories for changes.
*/
async startWatching(): Promise<void> {
if (this.watchStarted) {
return;
}
this.watchStarted = true;
await this.ensureUserSkillsDir();
await this.refreshCache();
this.updateWatchersFromCache();
}
/**
* Stops watching skill directories for changes.
*/
stopWatching(): void {
for (const watcher of this.watchers.values()) {
void watcher.close().catch((error) => {
console.warn('Failed to close skills watcher:', error);
});
}
this.watchers.clear();
this.watchStarted = false;
if (this.refreshTimer) {
clearTimeout(this.refreshTimer);
this.refreshTimer = null;
}
}
/**
* Parses a SKILL.md file and returns the configuration.
*
@@ -449,4 +485,74 @@ export class SkillManager {
this.skillsCache.set(level, levelSkills);
}
}
private updateWatchersFromCache(): void {
const watchTargets = new Set<string>(
(['project', 'user'] as const)
.map((level) => this.getSkillsBaseDir(level))
.filter((baseDir) => fsSync.existsSync(baseDir)),
);
for (const existingPath of this.watchers.keys()) {
if (!watchTargets.has(existingPath)) {
void this.watchers
.get(existingPath)
?.close()
.catch((error) => {
console.warn(
`Failed to close skills watcher for ${existingPath}:`,
error,
);
});
this.watchers.delete(existingPath);
}
}
for (const watchPath of watchTargets) {
if (this.watchers.has(watchPath)) {
continue;
}
try {
const watcher = watchFs(watchPath, {
ignoreInitial: true,
})
.on('all', () => {
this.scheduleRefresh();
})
.on('error', (error) => {
console.warn(`Skills watcher error for ${watchPath}:`, error);
});
this.watchers.set(watchPath, watcher);
} catch (error) {
console.warn(
`Failed to watch skills directory at ${watchPath}:`,
error,
);
}
}
}
private scheduleRefresh(): void {
if (this.refreshTimer) {
clearTimeout(this.refreshTimer);
}
this.refreshTimer = setTimeout(() => {
this.refreshTimer = null;
void this.refreshCache().then(() => this.updateWatchersFromCache());
}, 150);
}
private async ensureUserSkillsDir(): Promise<void> {
const baseDir = this.getSkillsBaseDir('user');
try {
await fs.mkdir(baseDir, { recursive: true });
} catch (error) {
console.warn(
`Failed to create user skills directory at ${baseDir}:`,
error,
);
}
}
}

View File

@@ -1,67 +1,98 @@
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
exports[`ShellTool > getDescription > should return the non-windows description when not on windows 1`] = `
"This tool executes a given shell command as \`bash -c <command>\`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`.
"Executes a given shell command (as \`bash -c <command>\`) in a persistent shell session with optional timeout, ensuring proper handling and security measures.
**Background vs Foreground Execution:**
You should decide whether commands should run in background or foreground based on their nature:
**Use background execution (is_background: true) for:**
- Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
- Build watchers: \`npm run watch\`, \`webpack --watch\`
- Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
- Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
- Any command expected to run indefinitely until manually stopped
**Use foreground execution (is_background: false) for:**
- One-time commands: \`ls\`, \`cat\`, \`grep\`
- Build commands: \`npm run build\`, \`make\`
- Installation commands: \`npm install\`, \`pip install\`
- Git operations: \`git commit\`, \`git push\`
- Test runs: \`npm test\`, \`pytest\`
The following information is returned:
IMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.
Command: Executed command.
Directory: Directory where command was executed, or \`(root)\`.
Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
Error: Error or \`(none)\` if no error was reported for the subprocess.
Exit Code: Exit code or \`(none)\` if terminated by signal.
Signal: Signal number or \`(none)\` if no signal was received.
Background PIDs: List of background processes started or \`(none)\`.
Process Group PGID: Process group started or \`(none)\`"
**Usage notes**:
- The command argument is required.
- You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes).
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
- Avoid using run_shell_command with the \`find\`, \`grep\`, \`cat\`, \`head\`, \`tail\`, \`sed\`, \`awk\`, or \`echo\` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
- File search: Use glob (NOT find or ls)
- Content search: Use grep_search (NOT grep or rg)
- Read files: Use read_file (NOT cat/head/tail)
- Edit files: Use edit (NOT sed/awk)
- Write files: Use write_file (NOT echo >/cat <<EOF)
- Communication: Output text directly (NOT echo/printf)
- When issuing multiple commands:
- If the commands are independent and can run in parallel, make multiple run_shell_command tool calls in a single message. For example, if you need to run "git status" and "git diff", send a single message with two run_shell_command tool calls in parallel.
- If the commands depend on each other and must run sequentially, use a single run_shell_command call with '&&' to chain them together (e.g., \`git add . && git commit -m "message" && git push\`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before run_shell_command for git operations, or git add before git commit), run these operations sequentially instead.
- Use ';' only when you need to run commands sequentially but don't care if earlier commands fail
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
- Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of \`cd\`. You may use \`cd\` if the User explicitly requests it.
<good-example>
pytest /foo/bar/tests
</good-example>
<bad-example>
cd /foo/bar && pytest tests
</bad-example>
**Background vs Foreground Execution:**
- You should decide whether commands should run in background or foreground based on their nature:
- Use background execution (is_background: true) for:
- Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
- Build watchers: \`npm run watch\`, \`webpack --watch\`
- Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
- Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
- Any command expected to run indefinitely until manually stopped
- Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`.
- Use foreground execution (is_background: false) for:
- One-time commands: \`ls\`, \`cat\`, \`grep\`
- Build commands: \`npm run build\`, \`make\`
- Installation commands: \`npm install\`, \`pip install\`
- Git operations: \`git commit\`, \`git push\`
- Test runs: \`npm test\`, \`pytest\`
"
`;
exports[`ShellTool > getDescription > should return the windows description when on windows 1`] = `
"This tool executes a given shell command as \`cmd.exe /c <command>\`. Command can start background processes using \`start /b\`.
"Executes a given shell command (as \`cmd.exe /c <command>\`) in a persistent shell session with optional timeout, ensuring proper handling and security measures.
**Background vs Foreground Execution:**
You should decide whether commands should run in background or foreground based on their nature:
**Use background execution (is_background: true) for:**
- Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
- Build watchers: \`npm run watch\`, \`webpack --watch\`
- Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
- Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
- Any command expected to run indefinitely until manually stopped
**Use foreground execution (is_background: false) for:**
- One-time commands: \`ls\`, \`cat\`, \`grep\`
- Build commands: \`npm run build\`, \`make\`
- Installation commands: \`npm install\`, \`pip install\`
- Git operations: \`git commit\`, \`git push\`
- Test runs: \`npm test\`, \`pytest\`
The following information is returned:
IMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.
Command: Executed command.
Directory: Directory where command was executed, or \`(root)\`.
Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
Error: Error or \`(none)\` if no error was reported for the subprocess.
Exit Code: Exit code or \`(none)\` if terminated by signal.
Signal: Signal number or \`(none)\` if no signal was received.
Background PIDs: List of background processes started or \`(none)\`.
Process Group PGID: Process group started or \`(none)\`"
**Usage notes**:
- The command argument is required.
- You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes).
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
- Avoid using run_shell_command with the \`find\`, \`grep\`, \`cat\`, \`head\`, \`tail\`, \`sed\`, \`awk\`, or \`echo\` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
- File search: Use glob (NOT find or ls)
- Content search: Use grep_search (NOT grep or rg)
- Read files: Use read_file (NOT cat/head/tail)
- Edit files: Use edit (NOT sed/awk)
- Write files: Use write_file (NOT echo >/cat <<EOF)
- Communication: Output text directly (NOT echo/printf)
- When issuing multiple commands:
- If the commands are independent and can run in parallel, make multiple run_shell_command tool calls in a single message. For example, if you need to run "git status" and "git diff", send a single message with two run_shell_command tool calls in parallel.
- If the commands depend on each other and must run sequentially, use a single run_shell_command call with '&&' to chain them together (e.g., \`git add . && git commit -m "message" && git push\`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before run_shell_command for git operations, or git add before git commit), run these operations sequentially instead.
- Use ';' only when you need to run commands sequentially but don't care if earlier commands fail
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
- Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of \`cd\`. You may use \`cd\` if the User explicitly requests it.
<good-example>
pytest /foo/bar/tests
</good-example>
<bad-example>
cd /foo/bar && pytest tests
</bad-example>
**Background vs Foreground Execution:**
- You should decide whether commands should run in background or foreground based on their nature:
- Use background execution (is_background: true) for:
- Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
- Build watchers: \`npm run watch\`, \`webpack --watch\`
- Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
- Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
- Any command expected to run indefinitely until manually stopped
- Use foreground execution (is_background: false) for:
- One-time commands: \`ls\`, \`cat\`, \`grep\`
- Build commands: \`npm run build\`, \`make\`
- Installation commands: \`npm install\`, \`pip install\`
- Git operations: \`git commit\`, \`git push\`
- Test runs: \`npm test\`, \`pytest\`
"
`;

View File

@@ -59,6 +59,9 @@ describe('ShellTool', () => {
getWorkspaceContext: vi
.fn()
.mockReturnValue(createMockWorkspaceContext('/test/dir')),
storage: {
getUserSkillsDir: vi.fn().mockReturnValue('/test/dir/.qwen/skills'),
},
getGeminiClient: vi.fn(),
getGitCoAuthor: vi.fn().mockReturnValue({
enabled: true,
@@ -142,6 +145,42 @@ describe('ShellTool', () => {
);
});
it('should throw an error for a directory within the user skills directory', () => {
expect(() =>
shellTool.build({
command: 'ls',
directory: '/test/dir/.qwen/skills/my-skill',
is_background: false,
}),
).toThrow(
'Explicitly running shell commands from within the user skills directory is not allowed. Please use absolute paths for command parameter instead.',
);
});
it('should throw an error for the user skills directory itself', () => {
expect(() =>
shellTool.build({
command: 'ls',
directory: '/test/dir/.qwen/skills',
is_background: false,
}),
).toThrow(
'Explicitly running shell commands from within the user skills directory is not allowed. Please use absolute paths for command parameter instead.',
);
});
it('should resolve directory path before checking user skills directory', () => {
expect(() =>
shellTool.build({
command: 'ls',
directory: '/test/dir/.qwen/skills/../skills/my-skill',
is_background: false,
}),
).toThrow(
'Explicitly running shell commands from within the user skills directory is not allowed. Please use absolute paths for command parameter instead.',
);
});
it('should return an invocation for a valid absolute directory path', () => {
(mockConfig.getWorkspaceContext as Mock).mockReturnValue(
createMockWorkspaceContext('/test/dir', ['/another/workspace']),
@@ -670,7 +709,7 @@ describe('ShellTool', () => {
),
expect.any(String),
expect.any(Function),
mockAbortSignal,
expect.any(AbortSignal),
false,
{},
);
@@ -861,7 +900,7 @@ describe('ShellTool', () => {
),
expect.any(String),
expect.any(Function),
mockAbortSignal,
expect.any(AbortSignal),
false,
{},
);
@@ -870,8 +909,8 @@ describe('ShellTool', () => {
it('should add co-author to git commit with multi-line message', async () => {
const command = `git commit -m "Fix bug
This is a detailed description
spanning multiple lines"`;
This is a detailed description
spanning multiple lines"`;
const invocation = shellTool.build({ command, is_background: false });
const promise = invocation.execute(mockAbortSignal);
@@ -894,7 +933,7 @@ spanning multiple lines"`;
),
expect.any(String),
expect.any(Function),
mockAbortSignal,
expect.any(AbortSignal),
false,
{},
);
@@ -999,4 +1038,248 @@ spanning multiple lines"`;
);
});
});
describe('timeout parameter', () => {
it('should validate timeout parameter correctly', () => {
// Valid timeout
expect(() => {
shellTool.build({
command: 'echo test',
is_background: false,
timeout: 5000,
});
}).not.toThrow();
// Valid small timeout
expect(() => {
shellTool.build({
command: 'echo test',
is_background: false,
timeout: 500,
});
}).not.toThrow();
// Zero timeout
expect(() => {
shellTool.build({
command: 'echo test',
is_background: false,
timeout: 0,
});
}).toThrow('Timeout must be a positive number.');
// Negative timeout
expect(() => {
shellTool.build({
command: 'echo test',
is_background: false,
timeout: -1000,
});
}).toThrow('Timeout must be a positive number.');
// Timeout too large
expect(() => {
shellTool.build({
command: 'echo test',
is_background: false,
timeout: 700000,
});
}).toThrow('Timeout cannot exceed 600000ms (10 minutes).');
// Non-integer timeout
expect(() => {
shellTool.build({
command: 'echo test',
is_background: false,
timeout: 5000.5,
});
}).toThrow('Timeout must be an integer number of milliseconds.');
// Non-number timeout (schema validation catches this first)
expect(() => {
shellTool.build({
command: 'echo test',
is_background: false,
timeout: 'invalid' as unknown as number,
});
}).toThrow('params/timeout must be number');
});
it('should include timeout in description for foreground commands', () => {
const invocation = shellTool.build({
command: 'npm test',
is_background: false,
timeout: 30000,
});
expect(invocation.getDescription()).toBe('npm test [timeout: 30000ms]');
});
it('should not include timeout in description for background commands', () => {
const invocation = shellTool.build({
command: 'npm start',
is_background: true,
timeout: 30000,
});
expect(invocation.getDescription()).toBe('npm start [background]');
});
it('should create combined signal with timeout for foreground execution', async () => {
const mockAbortSignal = new AbortController().signal;
const invocation = shellTool.build({
command: 'sleep 1',
is_background: false,
timeout: 5000,
});
const promise = invocation.execute(mockAbortSignal);
resolveExecutionPromise({
rawOutput: Buffer.from(''),
output: '',
exitCode: 0,
signal: null,
error: null,
aborted: false,
pid: 12345,
executionMethod: 'child_process',
});
await promise;
// Verify that ShellExecutionService was called with a combined signal
expect(mockShellExecutionService).toHaveBeenCalledWith(
expect.any(String),
expect.any(String),
expect.any(Function),
expect.any(AbortSignal),
false,
{},
);
// The signal passed should be different from the original signal
const calledSignal = mockShellExecutionService.mock.calls[0][3];
expect(calledSignal).not.toBe(mockAbortSignal);
});
it('should not create timeout signal for background execution', async () => {
const mockAbortSignal = new AbortController().signal;
const invocation = shellTool.build({
command: 'npm start',
is_background: true,
timeout: 5000,
});
const promise = invocation.execute(mockAbortSignal);
resolveExecutionPromise({
rawOutput: Buffer.from(''),
output: 'Background command started. PID: 12345',
exitCode: 0,
signal: null,
error: null,
aborted: false,
pid: 12345,
executionMethod: 'child_process',
});
await promise;
// For background execution, the original signal should be used
expect(mockShellExecutionService).toHaveBeenCalledWith(
expect.any(String),
expect.any(String),
expect.any(Function),
mockAbortSignal,
false,
{},
);
});
it('should handle timeout vs user cancellation correctly', async () => {
const userAbortController = new AbortController();
const invocation = shellTool.build({
command: 'sleep 10',
is_background: false,
timeout: 5000,
});
// Mock AbortSignal.timeout and AbortSignal.any
const mockTimeoutSignal = {
aborted: false,
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
} as unknown as AbortSignal;
const mockCombinedSignal = {
aborted: true,
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
} as unknown as AbortSignal;
const originalAbortSignal = globalThis.AbortSignal;
vi.stubGlobal('AbortSignal', {
...originalAbortSignal,
timeout: vi.fn().mockReturnValue(mockTimeoutSignal),
any: vi.fn().mockReturnValue(mockCombinedSignal),
});
const promise = invocation.execute(userAbortController.signal);
resolveExecutionPromise({
rawOutput: Buffer.from('partial output'),
output: 'partial output',
exitCode: null,
signal: null,
error: null,
aborted: true,
pid: 12345,
executionMethod: 'child_process',
});
const result = await promise;
// Restore original AbortSignal
vi.stubGlobal('AbortSignal', originalAbortSignal);
expect(result.llmContent).toContain('Command timed out after 5000ms');
expect(result.llmContent).toContain(
'Below is the output before it timed out',
);
});
it('should use default timeout behavior when timeout is not specified', async () => {
const mockAbortSignal = new AbortController().signal;
const invocation = shellTool.build({
command: 'echo test',
is_background: false,
});
const promise = invocation.execute(mockAbortSignal);
resolveExecutionPromise({
rawOutput: Buffer.from('test'),
output: 'test',
exitCode: 0,
signal: null,
error: null,
aborted: false,
pid: 12345,
executionMethod: 'child_process',
});
await promise;
// Should create a combined signal with the default timeout when no timeout is specified
expect(mockShellExecutionService).toHaveBeenCalledWith(
expect.any(String),
expect.any(String),
expect.any(Function),
expect.any(AbortSignal),
false,
{},
);
});
});
});

View File

@@ -34,6 +34,7 @@ import type {
import { ShellExecutionService } from '../services/shellExecutionService.js';
import { formatMemoryUsage } from '../utils/formatters.js';
import type { AnsiOutput } from '../utils/terminalSerializer.js';
import { isSubpath } from '../utils/paths.js';
import {
getCommandRoots,
isCommandAllowed,
@@ -42,10 +43,12 @@ import {
} from '../utils/shell-utils.js';
export const OUTPUT_UPDATE_INTERVAL_MS = 1000;
const DEFAULT_FOREGROUND_TIMEOUT_MS = 120000;
export interface ShellToolParams {
command: string;
is_background: boolean;
timeout?: number;
description?: string;
directory?: string;
}
@@ -72,6 +75,9 @@ export class ShellToolInvocation extends BaseToolInvocation<
// append background indicator
if (this.params.is_background) {
description += ` [background]`;
} else if (this.params.timeout) {
// append timeout for foreground commands
description += ` [timeout: ${this.params.timeout}ms]`;
}
// append optional (description), replacing any line breaks with spaces
if (this.params.description) {
@@ -130,6 +136,17 @@ export class ShellToolInvocation extends BaseToolInvocation<
};
}
const effectiveTimeout = this.params.is_background
? undefined
: (this.params.timeout ?? DEFAULT_FOREGROUND_TIMEOUT_MS);
// Create combined signal with timeout for foreground execution
let combinedSignal = signal;
if (effectiveTimeout) {
const timeoutSignal = AbortSignal.timeout(effectiveTimeout);
combinedSignal = AbortSignal.any([signal, timeoutSignal]);
}
const isWindows = os.platform() === 'win32';
const tempFileName = `shell_pgrep_${crypto
.randomBytes(6)
@@ -219,7 +236,7 @@ export class ShellToolInvocation extends BaseToolInvocation<
lastUpdateTime = Date.now();
}
},
signal,
combinedSignal,
this.config.getShouldUseNodePtyShell(),
shellExecutionConfig ?? {},
);
@@ -270,11 +287,28 @@ export class ShellToolInvocation extends BaseToolInvocation<
let llmContent = '';
if (result.aborted) {
llmContent = 'Command was cancelled by user before it could complete.';
if (result.output.trim()) {
llmContent += ` Below is the output before it was cancelled:\n${result.output}`;
// Check if it was a timeout or user cancellation
const wasTimeout =
!this.params.is_background &&
effectiveTimeout &&
combinedSignal.aborted &&
!signal.aborted;
if (wasTimeout) {
llmContent = `Command timed out after ${effectiveTimeout}ms before it could complete.`;
if (result.output.trim()) {
llmContent += ` Below is the output before it timed out:\n${result.output}`;
} else {
llmContent += ' There was no output before it timed out.';
}
} else {
llmContent += ' There was no output before it was cancelled.';
llmContent =
'Command was cancelled by user before it could complete.';
if (result.output.trim()) {
llmContent += ` Below is the output before it was cancelled:\n${result.output}`;
} else {
llmContent += ' There was no output before it was cancelled.';
}
}
} else {
// Create a formatted error string for display, replacing the wrapper command
@@ -305,7 +339,16 @@ export class ShellToolInvocation extends BaseToolInvocation<
returnDisplayMessage = result.output;
} else {
if (result.aborted) {
returnDisplayMessage = 'Command cancelled by user.';
// Check if it was a timeout or user cancellation
const wasTimeout =
!this.params.is_background &&
effectiveTimeout &&
combinedSignal.aborted &&
!signal.aborted;
returnDisplayMessage = wasTimeout
? `Command timed out after ${effectiveTimeout}ms.`
: 'Command cancelled by user.';
} else if (result.signal) {
returnDisplayMessage = `Command terminated by signal: ${result.signal}`;
} else if (result.error) {
@@ -406,42 +449,59 @@ Co-authored-by: ${gitCoAuthorSettings.name} <${gitCoAuthorSettings.email}>`;
}
function getShellToolDescription(): string {
const toolDescription = `
const isWindows = os.platform() === 'win32';
const executionWrapper = isWindows
? 'cmd.exe /c <command>'
: 'bash -c <command>';
const processGroupNote = isWindows
? ''
: '\n - Command is executed as a subprocess that leads its own process group. Command process group can be terminated as `kill -- -PGID` or signaled as `kill -s SIGNAL -- -PGID`.';
**Background vs Foreground Execution:**
You should decide whether commands should run in background or foreground based on their nature:
**Use background execution (is_background: true) for:**
- Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
- Build watchers: \`npm run watch\`, \`webpack --watch\`
- Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
- Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
- Any command expected to run indefinitely until manually stopped
**Use foreground execution (is_background: false) for:**
- One-time commands: \`ls\`, \`cat\`, \`grep\`
- Build commands: \`npm run build\`, \`make\`
- Installation commands: \`npm install\`, \`pip install\`
- Git operations: \`git commit\`, \`git push\`
- Test runs: \`npm test\`, \`pytest\`
The following information is returned:
return `Executes a given shell command (as \`${executionWrapper}\`) in a persistent shell session with optional timeout, ensuring proper handling and security measures.
Command: Executed command.
Directory: Directory where command was executed, or \`(root)\`.
Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes.
Error: Error or \`(none)\` if no error was reported for the subprocess.
Exit Code: Exit code or \`(none)\` if terminated by signal.
Signal: Signal number or \`(none)\` if no signal was received.
Background PIDs: List of background processes started or \`(none)\`.
Process Group PGID: Process group started or \`(none)\``;
IMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.
if (os.platform() === 'win32') {
return `This tool executes a given shell command as \`cmd.exe /c <command>\`. Command can start background processes using \`start /b\`.${toolDescription}`;
} else {
return `This tool executes a given shell command as \`bash -c <command>\`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`.${toolDescription}`;
}
**Usage notes**:
- The command argument is required.
- You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes).
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
- Avoid using run_shell_command with the \`find\`, \`grep\`, \`cat\`, \`head\`, \`tail\`, \`sed\`, \`awk\`, or \`echo\` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
- File search: Use ${ToolNames.GLOB} (NOT find or ls)
- Content search: Use ${ToolNames.GREP} (NOT grep or rg)
- Read files: Use ${ToolNames.READ_FILE} (NOT cat/head/tail)
- Edit files: Use ${ToolNames.EDIT} (NOT sed/awk)
- Write files: Use ${ToolNames.WRITE_FILE} (NOT echo >/cat <<EOF)
- Communication: Output text directly (NOT echo/printf)
- When issuing multiple commands:
- If the commands are independent and can run in parallel, make multiple run_shell_command tool calls in a single message. For example, if you need to run "git status" and "git diff", send a single message with two run_shell_command tool calls in parallel.
- If the commands depend on each other and must run sequentially, use a single run_shell_command call with '&&' to chain them together (e.g., \`git add . && git commit -m "message" && git push\`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before run_shell_command for git operations, or git add before git commit), run these operations sequentially instead.
- Use ';' only when you need to run commands sequentially but don't care if earlier commands fail
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
- Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of \`cd\`. You may use \`cd\` if the User explicitly requests it.
<good-example>
pytest /foo/bar/tests
</good-example>
<bad-example>
cd /foo/bar && pytest tests
</bad-example>
**Background vs Foreground Execution:**
- You should decide whether commands should run in background or foreground based on their nature:
- Use background execution (is_background: true) for:
- Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
- Build watchers: \`npm run watch\`, \`webpack --watch\`
- Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
- Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
- Any command expected to run indefinitely until manually stopped
${processGroupNote}
- Use foreground execution (is_background: false) for:
- One-time commands: \`ls\`, \`cat\`, \`grep\`
- Build commands: \`npm run build\`, \`make\`
- Installation commands: \`npm install\`, \`pip install\`
- Git operations: \`git commit\`, \`git push\`
- Test runs: \`npm test\`, \`pytest\`
`;
}
function getCommandDescription(): string {
@@ -485,6 +545,10 @@ export class ShellTool extends BaseDeclarativeTool<
description:
'Whether to run the command in background. Default is false. Set to true for long-running processes like development servers, watchers, or daemons that should continue running without blocking further commands.',
},
timeout: {
type: 'number',
description: 'Optional timeout in milliseconds (max 600000)',
},
description: {
type: 'string',
description:
@@ -522,10 +586,35 @@ export class ShellTool extends BaseDeclarativeTool<
if (getCommandRoots(params.command).length === 0) {
return 'Could not identify command root to obtain permission from user.';
}
if (params.timeout !== undefined) {
if (
typeof params.timeout !== 'number' ||
!Number.isInteger(params.timeout)
) {
return 'Timeout must be an integer number of milliseconds.';
}
if (params.timeout <= 0) {
return 'Timeout must be a positive number.';
}
if (params.timeout > 600000) {
return 'Timeout cannot exceed 600000ms (10 minutes).';
}
}
if (params.directory) {
if (!path.isAbsolute(params.directory)) {
return 'Directory must be an absolute path.';
}
const userSkillsDir = this.config.storage.getUserSkillsDir();
const resolvedDirectoryPath = path.resolve(params.directory);
const isWithinUserSkills = isSubpath(
userSkillsDir,
resolvedDirectoryPath,
);
if (isWithinUserSkills) {
return `Explicitly running shell commands from within the user skills directory is not allowed. Please use absolute paths for command parameter instead.`;
}
const workspaceDirs = this.config.getWorkspaceContext().getDirectories();
const isWithinWorkspace = workspaceDirs.some((wsDir) =>
params.directory!.startsWith(wsDir),

View File

@@ -324,7 +324,9 @@ describe('SkillTool', () => {
'Review code for quality and best practices.',
);
expect(result.returnDisplay).toBe('Launching skill: code-review');
expect(result.returnDisplay).toBe(
'Specialized skill for reviewing code quality',
);
});
it('should include allowedTools in result when present', async () => {
@@ -349,7 +351,7 @@ describe('SkillTool', () => {
// Base description is omitted from llmContent; ensure body is present.
expect(llmText).toContain('Help write comprehensive tests.');
expect(result.returnDisplay).toBe('Launching skill: testing');
expect(result.returnDisplay).toBe('Skill for writing and running tests');
});
it('should handle skill not found error', async () => {
@@ -416,7 +418,7 @@ describe('SkillTool', () => {
).createInvocation(params);
const description = invocation.getDescription();
expect(description).toBe('Launching skill: "code-review"');
expect(description).toBe('Use skill: "code-review"');
});
it('should handle skill without additional files', async () => {
@@ -436,7 +438,9 @@ describe('SkillTool', () => {
const llmText = partToString(result.llmContent);
expect(llmText).not.toContain('## Additional Files');
expect(result.returnDisplay).toBe('Launching skill: code-review');
expect(result.returnDisplay).toBe(
'Specialized skill for reviewing code quality',
);
});
});
});

View File

@@ -49,11 +49,11 @@ export class SkillTool extends BaseDeclarativeTool<SkillParams, ToolResult> {
'Execute a skill within the main conversation. Loading available skills...', // Initial description
Kind.Read,
initialSchema,
true, // isOutputMarkdown
false, // isOutputMarkdown
false, // canUpdateOutput
);
this.skillManager = config.getSkillManager();
this.skillManager = config.getSkillManager()!;
this.skillManager.addChangeListener(() => {
void this.refreshSkills();
});
@@ -128,6 +128,10 @@ Important:
- Only use skills listed in <available_skills> below
- Do not invoke a skill that is already running
- Do not use this tool for built-in CLI commands (like /help, /clear, etc.)
- When executing scripts or loading referenced files, ALWAYS resolve absolute paths from skill's base directory. Examples:
- \`bash scripts/init.sh\` -> \`bash /path/to/skill/scripts/init.sh\`
- \`python scripts/helper.py\` -> \`python /path/to/skill/scripts/helper.py\`
- \`reference.md\` -> \`/path/to/skill/reference.md\`
</skills_instructions>
<available_skills>
@@ -183,7 +187,7 @@ class SkillToolInvocation extends BaseToolInvocation<SkillParams, ToolResult> {
}
getDescription(): string {
return `Launching skill: "${this.params.skill}"`;
return `Use skill: "${this.params.skill}"`;
}
override async shouldConfirmExecute(): Promise<false> {
@@ -238,16 +242,16 @@ class SkillToolInvocation extends BaseToolInvocation<SkillParams, ToolResult> {
const baseDir = path.dirname(skill.filePath);
// Build markdown content for LLM (show base dir, then body)
const llmContent = `Base directory for this skill: ${baseDir}\n\n${skill.body}\n`;
const llmContent = `Base directory for this skill: ${baseDir}\nImportant: ALWAYS resolve absolute paths from this base directory when working with skills.\n\n${skill.body}\n`;
return {
llmContent: [{ text: llmContent }],
returnDisplay: `Launching skill: ${skill.name}`,
returnDisplay: skill.description,
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
console.error(`[SkillsTool] Error launching skill: ${errorMessage}`);
console.error(`[SkillsTool] Error using skill: ${errorMessage}`);
// Log failed skill launch
logSkillLaunch(

View File

@@ -36,7 +36,7 @@ interface DiffCommand {
args: string[];
}
function commandExists(cmd: string): boolean {
export function commandExists(cmd: string): boolean {
try {
execSync(
process.platform === 'win32' ? `where.exe ${cmd}` : `command -v ${cmd}`,
@@ -52,7 +52,7 @@ function commandExists(cmd: string): boolean {
* Editor command configurations for different platforms.
* Each editor can have multiple possible command names, listed in order of preference.
*/
const editorCommands: Record<
export const editorCommands: Record<
EditorType,
{ win32: string[]; default: string[] }
> = {

View File

@@ -0,0 +1,52 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { FetchError, formatFetchErrorForUser } from './fetch.js';
describe('formatFetchErrorForUser', () => {
it('includes troubleshooting hints for TLS errors', () => {
const tlsCause = new Error('unable to verify the first certificate');
(tlsCause as Error & { code?: string }).code =
'UNABLE_TO_VERIFY_LEAF_SIGNATURE';
const fetchError = new TypeError('fetch failed') as TypeError & {
cause?: unknown;
};
fetchError.cause = tlsCause;
const message = formatFetchErrorForUser(fetchError, {
url: 'https://chat.qwen.ai',
});
expect(message).toContain('fetch failed');
expect(message).toContain('UNABLE_TO_VERIFY_LEAF_SIGNATURE');
expect(message).toContain('Troubleshooting:');
expect(message).toContain('Confirm you can reach https://chat.qwen.ai');
expect(message).toContain('--proxy');
expect(message).toContain('NODE_EXTRA_CA_CERTS');
});
it('includes troubleshooting hints for network codes', () => {
const fetchError = new FetchError(
'Request timed out after 100ms',
'ETIMEDOUT',
);
const message = formatFetchErrorForUser(fetchError, {
url: 'https://example.com',
});
expect(message).toContain('Request timed out after 100ms');
expect(message).toContain('Troubleshooting:');
expect(message).toContain('Confirm you can reach https://example.com');
expect(message).toContain('--proxy');
expect(message).not.toContain('NODE_EXTRA_CA_CERTS');
});
it('does not include troubleshooting for non-fetch errors', () => {
expect(formatFetchErrorForUser(new Error('boom'))).toBe('boom');
});
});

View File

@@ -17,6 +17,26 @@ const PRIVATE_IP_RANGES = [
/^fe80:/,
];
const TLS_ERROR_CODES = new Set([
'UNABLE_TO_GET_ISSUER_CERT_LOCALLY',
'UNABLE_TO_VERIFY_LEAF_SIGNATURE',
'SELF_SIGNED_CERT_IN_CHAIN',
'DEPTH_ZERO_SELF_SIGNED_CERT',
'CERT_HAS_EXPIRED',
'ERR_TLS_CERT_ALTNAME_INVALID',
]);
const FETCH_TROUBLESHOOTING_ERROR_CODES = new Set([
...TLS_ERROR_CODES,
'ECONNRESET',
'ETIMEDOUT',
'ECONNREFUSED',
'ENOTFOUND',
'EAI_AGAIN',
'EHOSTUNREACH',
'ENETUNREACH',
]);
export class FetchError extends Error {
constructor(
message: string,
@@ -55,3 +75,118 @@ export async function fetchWithTimeout(
clearTimeout(timeoutId);
}
}
function getErrorCode(error: unknown): string | undefined {
if (!error || typeof error !== 'object') {
return undefined;
}
if (
'code' in error &&
typeof (error as Record<string, unknown>)['code'] === 'string'
) {
return (error as Record<string, string>)['code'];
}
return undefined;
}
function formatUnknownErrorMessage(error: unknown): string | undefined {
if (typeof error === 'string') {
return error;
}
if (
typeof error === 'number' ||
typeof error === 'boolean' ||
typeof error === 'bigint'
) {
return String(error);
}
if (error instanceof Error) {
return error.message;
}
if (!error || typeof error !== 'object') {
return undefined;
}
const message = (error as Record<string, unknown>)['message'];
if (typeof message === 'string') {
return message;
}
return undefined;
}
function formatErrorCause(error: unknown): string | undefined {
if (!(error instanceof Error)) {
return undefined;
}
const cause = (error as Error & { cause?: unknown }).cause;
if (!cause) {
return undefined;
}
const causeCode = getErrorCode(cause);
const causeMessage = formatUnknownErrorMessage(cause);
if (!causeCode && !causeMessage) {
return undefined;
}
if (causeCode && causeMessage && !causeMessage.includes(causeCode)) {
return `${causeCode}: ${causeMessage}`;
}
return causeMessage ?? causeCode;
}
export function formatFetchErrorForUser(
error: unknown,
options: { url?: string } = {},
): string {
const errorMessage = getErrorMessage(error);
const code =
error instanceof Error
? (getErrorCode((error as Error & { cause?: unknown }).cause) ??
getErrorCode(error))
: getErrorCode(error);
const cause = formatErrorCause(error);
const fullErrorMessage = [
errorMessage,
cause ? `(cause: ${cause})` : undefined,
]
.filter(Boolean)
.join(' ');
const shouldShowFetchHints =
errorMessage.toLowerCase().includes('fetch failed') ||
(code != null && FETCH_TROUBLESHOOTING_ERROR_CODES.has(code));
const shouldShowTlsHint = code != null && TLS_ERROR_CODES.has(code);
if (!shouldShowFetchHints) {
return fullErrorMessage;
}
const hintLines = [
'',
'Troubleshooting:',
...(options.url
? [`- Confirm you can reach ${options.url} from this machine.`]
: []),
'- If you are behind a proxy, pass `--proxy <url>` (or set `proxy` in settings).',
...(shouldShowTlsHint
? [
'- If your network uses a corporate TLS inspection CA, set `NODE_EXTRA_CA_CERTS` to your CA bundle.',
]
: []),
];
return `${fullErrorMessage}${hintLines.join('\n')}`;
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/sdk",
"version": "0.1.0",
"version": "0.1.3",
"description": "TypeScript SDK for programmatic access to qwen-code CLI",
"main": "./dist/index.cjs",
"module": "./dist/index.mjs",

View File

@@ -5,7 +5,7 @@
import type { SDKUserMessage } from '../types/protocol.js';
import { serializeJsonLine } from '../utils/jsonLines.js';
import { ProcessTransport } from '../transport/ProcessTransport.js';
import { parseExecutableSpec } from '../utils/cliPath.js';
import { prepareSpawnInfo, type SpawnInfo } from '../utils/cliPath.js';
import { Query } from './Query.js';
import type { QueryOptions } from '../types/types.js';
import { QueryOptionsSchema } from '../types/queryOptionsSchema.js';
@@ -32,17 +32,17 @@ export function query({
*/
options?: QueryOptions;
}): Query {
const parsedExecutable = validateOptions(options);
const spawnInfo = validateOptions(options);
const isSingleTurn = typeof prompt === 'string';
const pathToQwenExecutable =
options.pathToQwenExecutable ?? parsedExecutable.executablePath;
const pathToQwenExecutable = options.pathToQwenExecutable;
const abortController = options.abortController ?? new AbortController();
const transport = new ProcessTransport({
pathToQwenExecutable,
spawnInfo,
cwd: options.cwd,
model: options.model,
permissionMode: options.permissionMode,
@@ -97,9 +97,7 @@ export function query({
return queryInstance;
}
function validateOptions(
options: QueryOptions,
): ReturnType<typeof parseExecutableSpec> {
function validateOptions(options: QueryOptions): SpawnInfo | undefined {
const validationResult = QueryOptionsSchema.safeParse(options);
if (!validationResult.success) {
const errors = validationResult.error.errors
@@ -108,13 +106,10 @@ function validateOptions(
throw new Error(`Invalid QueryOptions: ${errors}`);
}
let parsedExecutable: ReturnType<typeof parseExecutableSpec>;
try {
parsedExecutable = parseExecutableSpec(options.pathToQwenExecutable);
return prepareSpawnInfo(options.pathToQwenExecutable);
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
throw new Error(`Invalid pathToQwenExecutable: ${errorMessage}`);
}
return parsedExecutable;
}

View File

@@ -44,7 +44,9 @@ export class ProcessTransport implements Transport {
const cwd = this.options.cwd ?? process.cwd();
const env = { ...process.env, ...this.options.env };
const spawnInfo = prepareSpawnInfo(this.options.pathToQwenExecutable);
const spawnInfo =
this.options.spawnInfo ??
prepareSpawnInfo(this.options.pathToQwenExecutable);
const stderrMode =
this.options.debug || this.options.stderr ? 'pipe' : 'ignore';
@@ -140,6 +142,7 @@ export class ProcessTransport implements Transport {
'--output-format',
'stream-json',
'--channel=SDK',
'--experimental-skills',
];
if (this.options.model) {

Some files were not shown because too many files have changed in this diff Show More