Compare commits

...

357 Commits

Author SHA1 Message Date
github-actions[bot]
a86aae305d chore(release): v0.0.15-nightly.6 2025-10-15 00:13:05 +00:00
zhutao100
9d664623f5 Fix and update the token limits handling (#754)
* fix: make token limits regex normalize e.g. `some-model-1.1` -> `some-model` while preserve e.g. `gpt-4.1` as-is.

* feat: update token limits regex for latest models `GLM-4.6`, `deepseek-v3.2-exp`.

* feat: add exact token limit number 202752 per the model config file for `GLM-4.6`.
2025-10-14 16:11:55 +08:00
Mingholy
a779d44b38 fix: unable to quit when auth dialog is opened (#804) 2025-10-14 15:55:38 +08:00
Mingholy
40810945e0 fix: add missing trace info and cancellation events (#791)
* fix: add missing trace info and cancellation events

* fix: re-organize tool/request cancellation logging
2025-10-14 15:41:30 +08:00
Mingholy
e28255edb6 fix: token limits for qwen3-max (#724) 2025-10-14 15:40:20 +08:00
Mingholy
ae3223a317 fix: remove unavailable options (#685) 2025-10-14 15:39:48 +08:00
tanzhenxin
270dda4aa7 fix: invalid tool_calls request due to improper cancellation (#790) 2025-10-13 09:25:31 +08:00
Fan
d4fa15dd53 remove topp default value 0.0 (#785) 2025-10-09 15:41:57 +08:00
tanzhenxin
0922437bd5 chore: pump version to 0.0.14 2025-09-29 14:31:14 +08:00
tanzhenxin
9a0cb64a34 🚀 feat: DashScope cache control enhancement (#735) 2025-09-29 14:01:16 +08:00
Brando Magnani
9fce177bd8 Fix/qwen3 vl plus highres (#721)
* feat: Add Qwen3-VL-Plus token limits (256K input, 32K output)

- Added 256K input context window limit for Qwen3-VL-Plus model
- Updated output token limit from 8K to 32K for Qwen3-VL-Plus
- Added comprehensive tests for both input and output limits

As requested by Qwen maintainers for proper model support.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: enable high-res flag for qwen VL models

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-09-26 17:20:18 +08:00
Brando Magnani
f7841338c4 feat: Add Qwen3-VL-Plus token limits (256K input, 32K output) (#720)
- Added 256K input context window limit for Qwen3-VL-Plus model
- Updated output token limit from 8K to 32K for Qwen3-VL-Plus
- Added comprehensive tests for both input and output limits

As requested by Qwen maintainers for proper model support.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: Claude <noreply@anthropic.com>
2025-09-26 17:19:54 +08:00
tanzhenxin
c405434c41 Fix: TaskTool Dynamic Updates (#697) 2025-09-25 19:11:55 +08:00
tanzhenxin
673854b446 fix: Remove unreliable editCorrector that injects extra escape characters (#713) 2025-09-25 16:46:58 +08:00
tanzhenxin
4e7a7e2656 feat: Implement Plan Mode for Safe Code Planning (#658) 2025-09-24 14:26:17 +08:00
Mingholy
8379bc4d81 chore: bump version to 0.0.13 (#695) 2025-09-24 13:58:18 +08:00
tanzhenxin
e148e4be28 🐛 Fix: Resolve Markdown list display issues on Windows (#693) 2025-09-24 11:00:47 +08:00
Mingholy
48d8587bf9 feat: add yolo mode support to auto vision model switch (#652)
* feat: add yolo mode support to auto vision model switch

* feat: add cli args & env variables for switch behavoir

* fix: use dedicated model names and settings

* docs: add vision model instructions

* fix: failed test case

* fix: setModel failure
2025-09-24 10:21:09 +08:00
tanzhenxin
5ecb4a2430 fix: make ripgrep lazy load, to fix vscode ide companion unable to start (#676) 2025-09-23 14:44:48 +08:00
Mingholy
9c1d7228cb fix: auth hang when select qwen-oauth (#684) 2025-09-23 14:30:22 +08:00
hokupod
deb99a3b21 feat: add OpenAI and Qwen OAuth auth support to Zed ACP integration (#678)
- Add USE_OPENAI and QWEN_OAUTH authentication methods to GeminiAgent's authMethods array
- Enables Zed editor integration to support all available authentication options
- Add test case for QWEN_OAUTH authentication configuration
- Maintains backward compatibility with existing Google authentication methods

This allows Zed users to authenticate using:
- OpenAI API key (requires OPENAI_API_KEY environment variable)
- Qwen OAuth (2000 daily requests with OAuth2 flow)
- Existing Google authentication methods (unchanged)
2025-09-23 14:29:29 +08:00
Mingholy
014059e8a6 fix: output token limit for qwen (#664) 2025-09-23 14:28:59 +08:00
Mingholy
3579d6555a chore: bump version to 0.0.12 (#662) 2025-09-19 20:13:31 +08:00
Mingholy
9a56560eb4 fix: arrow keys on windows (#661) 2025-09-19 19:44:57 +08:00
Mingholy
da0863b943 fix: missing tool call chunks for openai logging (#657) 2025-09-19 15:19:30 +08:00
Mingholy
5f68a8b6b3 fix: switch system prompt to avoid malformed tool_calls (#650)
* fix: switch system prompt to avoid malformed tool_calls

* fix: circular dependency issue and configurable tool-call style

* fix: regExp issue
2025-09-18 21:10:03 +08:00
Mingholy
761833c915 Vision model support for Qwen-OAuth (#525)
* refactor: openaiContentGenerator

* refactor: optimize stream handling

* refactor: re-organize refactored files

* fix: unit test cases

* feat: `/model` command for switching to vision model

* fix: lint error

* feat: add image tokenizer to fit vlm context window

* fix: lint and type errors

* feat: add `visionModelPreview` to control default visibility of vision models

* fix: remove deprecated files

* fix: align supported image formats with bailian doc
2025-09-18 13:32:00 +08:00
Mingholy
56808ac210 fix: reset is_background (#644) 2025-09-18 13:27:09 +08:00
Peter Stewart
724c24933c Enable tool call type coersion (#477)
* feat: enable tool call type coercion

* fix: tests for type coercion

---------

Co-authored-by: Mingholy <mingholy.lmh@gmail.com>
2025-09-18 13:04:27 +08:00
pomelo
17cdce6298 Merge pull request #638 from QwenLM/fix/subagent-update
fix: subagent system improvements and UI fixes
2025-09-18 11:12:12 +08:00
tanzhenxin
de468f0525 fix: merge issue 2025-09-17 19:52:12 +08:00
tanzhenxin
50199288ec Merge branch 'main' into fix/subagent-update 2025-09-17 19:12:22 +08:00
tanzhenxin
8803b2eb76 feat: add system-reminder to help model use subagent 2025-09-17 18:56:30 +08:00
Mingholy
b99de25e38 Merge pull request #605 from QwenLM/chore/sync-gemini-cli-v0.3.4
Chore/sync gemini cli v0.3.4
2025-09-17 18:15:26 +08:00
tanzhenxin
e552bc9609 fix: terminal flicker when subagent is executing 2025-09-17 17:01:06 +08:00
tanzhenxin
5f90472a7d fix: duplicate subagents config if qwen-code runs in home dir 2025-09-17 11:32:52 +08:00
Mingholy
d3476a2d47 fix: lint errors 2025-09-16 20:19:44 +08:00
Mingholy
f599cda7d2 fix: replace branding keywords to avoid ambiguity 2025-09-16 19:57:33 +08:00
Mingholy
9df193ca42 Merge branch 'main' into chore/sync-gemini-cli-v0.3.4 2025-09-16 19:51:32 +08:00
tanzhenxin
19950e5b7c chore: update subagent docs 2025-09-16 16:03:35 +08:00
pomelo
6dcec540d6 Merge pull request #624 from QwenLM/chore-pomelo
feat: Enhance /init command with confirmation prompt
2025-09-16 15:32:30 +08:00
pomelo
0581344d48 Merge pull request #610 from QwenLM/feat/skip-loop-detection
Add `skipLoopDetection` Configuration Option
2025-09-16 15:31:51 +08:00
pomelo
da78a9ff94 Merge pull request #627 from QwenLM/fix/win-paste-multi-line
Fix: Windows Multi-line Paste Handling with Debounced Data Processing
2025-09-16 15:28:44 +08:00
tanzhenxin
8e2fc76c15 fix: Esc unable to cancel subagent dialog 2025-09-16 15:24:58 +08:00
tanzhenxin
35efa9f04a fix: try to fix test case failures on Windows 2025-09-16 14:59:03 +08:00
mingholy.lmh
484f8642c4 docs: improve docs to avoid ambiguity 2025-09-16 14:48:49 +08:00
tanzhenxin
c093bed38c fix: add test code again 2025-09-16 14:04:07 +08:00
tanzhenxin
b9fd4737c9 fix: add debounce timer for paste event 2025-09-16 11:25:33 +08:00
pomelo-nwu
8d17864959 chore: remove QWEN.md from repo and add to .gitignore
- Delete QWEN.md from repository as it should be generated locally
- Add QWEN.md to .gitignore to prevent accidental commits
- Context files should be created by developers using /init command locally
2025-09-16 11:06:46 +08:00
pomelo-nwu
7109914a86 feat: add confirmation prompt for /init command when context file exists
- Add confirmation dialog when QWEN.md already exists and has content
- Use React.createElement to maintain .ts file compatibility
- Allow users to choose whether to regenerate existing context file
2025-09-16 11:04:35 +08:00
pomelo-nwu
4721a6f324 fix: reduce exit cleanup timeout in slash command processor
Decreased the exit cleanup timeout from 1000ms to 100ms to speed up the shutdown process when using slash commands.

Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
2025-09-16 10:46:41 +08:00
mingholy.lmh
2c9e61af9e Merge branch 'main' into chore/sync-gemini-cli-v0.3.4 2025-09-16 10:23:28 +08:00
Mingholy
49d7947028 chore: bump version to 0.0.11 (#622) 2025-09-16 10:20:51 +08:00
mingholy.lmh
dbdb4db4f0 fix: lockfile and configs 2025-09-15 20:59:35 +08:00
tanzhenxin
3ad533c50b fix: test fix again 2025-09-15 20:13:10 +08:00
mingholy.lmh
3f535195b3 test: add project settings 2025-09-15 17:31:07 +08:00
mingholy.lmh
f19789b381 fix: lint error 2025-09-15 17:26:57 +08:00
tanzhenxin
6af52e74ab fix: test fix again 2025-09-15 16:23:10 +08:00
tanzhenxin
6dad6b7f31 fix: test fix 2025-09-15 16:03:51 +08:00
mingholy.lmh
e7850703b5 fix: gha issues 2025-09-15 14:48:29 +08:00
mingholy.lmh
62d858f344 fix: failed unit tests 2025-09-15 14:38:02 +08:00
mingholy.lmh
e9b423b43a fi: import type errors 2025-09-15 14:26:01 +08:00
tanzhenxin
2253c7b263 chore: update documentation 2025-09-15 14:19:45 +08:00
mingholy.lmh
f3cf732493 fix: propagate config to AgentExecutionDisplay to fix type errors 2025-09-15 14:11:36 +08:00
mingholy.lmh
acb93b1e1b fix: type errors 2025-09-15 14:11:31 +08:00
tanzhenxin
386538521b feat: Add skipLoopDetection Configuration Option 2025-09-15 14:02:46 +08:00
mingholy.lmh
1976837eda Merge branch 'main' into chore/sync-gemini-cli-v0.3.4 2025-09-15 13:42:42 +08:00
mingholy.lmh
1993156721 fix: change .geminiignore to .qwenignore 2025-09-14 19:38:40 +08:00
mingholy.lmh
8bb8660c72 Merge tag 'v0.3.4' of github.com:google-gemini/gemini-cli into chore/sync-gemini-cli-v0.3.4 2025-09-11 16:38:48 +08:00
mingholy.lmh
14ea33063f Merge tag 'v0.3.0' into chore/sync-gemini-cli-v0.3.0 2025-09-11 16:26:56 +08:00
gemini-cli-robot
20b6246dde chore(release): v0.3.4 2025-09-06 18:56:20 +00:00
Sandy Tao
2acadecaa1 Fix(core): Do not retry if last chunk is empty with finishReason previous chunks are good (#7859) 2025-09-06 11:15:57 -07:00
gemini-cli-robot
443148ea1e chore(release): v0.3.3 2025-09-06 01:12:31 +00:00
Sandy Tao
ddd4659d10 Fix(core): Fix stream validation logic (#7832) 2025-09-05 18:01:04 -07:00
Victor May
a2a3c66e28 Handle cleaning up the response text in the UI when a response stream retry occurs (#7416) 2025-09-05 18:00:12 -07:00
Victor May
dfa9dda1dd Refine stream validation to prevent unnecessary retries (#7278) 2025-09-05 17:32:29 -07:00
gemini-cli-robot
4c9746561c chore(release): v0.3.2 2025-09-04 23:55:40 +00:00
Shreya Keshive
ad3bc17e45 fix(process-utils): fix bug that prevented start-up when running process walking command fails (#7757) 2025-09-04 16:25:09 -07:00
gemini-cli-robot
9e574773c9 chore(release): v0.3.1 2025-09-04 19:50:25 +00:00
Shreya Keshive
2fb14ead1f Hotfix for issue #7730 (#7739)
Co-authored-by: Taylor Mullen <ntaylormullen@google.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-09-04 12:46:02 -07:00
gemini-cli-robot
76553622f6 chore(release): v0.3.0 2025-09-04 17:49:18 +00:00
matt korwel
7404949eff Patch 0.3.0 preview.4 (#7713)
Co-authored-by: gemini-cli-robot <gemini-cli-robot@google.com>
Co-authored-by: christine betts <chrstn@uw.edu>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
Co-authored-by: anthony bushong <agmsb@users.noreply.github.com>
2025-09-04 17:00:46 +00:00
Gal Zahavi
52cc0f6feb Fix setting migration nosiness and merging (#7571) 2025-09-02 01:23:35 +00:00
gemini-cli-robot
0c0309abdc chore(release): v0.3.0-preview.1 2025-08-28 04:33:56 +00:00
Gal Zahavi
5f16541c38 Revert "chore(cleanup): Consolidate MockTool definitions (#7228)" (#7283) 2025-08-28 04:06:50 +00:00
Abhi
bfdddcbd99 feat(commands): Enable @file processing in TOML commands (#6716) 2025-08-28 03:22:21 +00:00
Gal Zahavi
529c2649b8 Revert "Move mockTool into test-utils (#7245)" (#7277) 2025-08-28 03:05:47 +00:00
Shardul Natu
539a49bd24 add(telemetry): Add missing telemetry for UserPromptEvent (#6885)
Co-authored-by: Shnatu <snatu@google.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-28 02:17:32 +00:00
Gal Zahavi
f22263c9e8 refactor: refactor settings to a nested structure (#7244) 2025-08-28 01:39:45 +00:00
anthony bushong
b8a7bfd136 fix(e2e): skip flaky stdin context test (#7264) 2025-08-28 00:00:50 +00:00
anthony bushong
5e84006293 fix(e2e): add missing deps to fix sandbox module not found errors in cli/core (#7256) 2025-08-27 22:23:38 +00:00
Tommaso Sciortino
af4fe611ed Fix diff rendering in windows. (#7254) 2025-08-27 22:22:55 +00:00
Richie Foreman
cd2e237c73 fix(compression): Discard compression result if it results in more token usage (#7047) 2025-08-27 21:00:45 +00:00
Adam Weidman
da7901acaf Move mockTool into test-utils (#7245) 2025-08-27 20:18:15 +00:00
Tommaso Sciortino
19f2a07efa Fix shell argument parsing in windows (#7160) 2025-08-27 19:14:22 +00:00
Allen Hutchison
f2092b1ebc fix(bug): correct /about command in bug report template (#7235) 2025-08-27 18:28:50 +00:00
Adam Weidman
f0146c8b85 chore(cleanup): Consolidate MockTool definitions (#7228) 2025-08-27 18:18:26 +00:00
shishu314
5cf1c7bf79 feat(cli) - Define base class for token storage (#7221)
Co-authored-by: Shi Shu <shii@google.com>
2025-08-27 17:49:20 +00:00
David East
023053ed92 fix(tests): Fix Firebase Studio to IDE detection tests (#7163) 2025-08-27 17:31:59 +00:00
Gaurav
a33293ac60 refactor: improve intermediate result parsing in issue dedup workflow (#7218) 2025-08-27 17:14:38 +00:00
Gaurav
0c1f3acc7d fix: make test more reliable (#7233) 2025-08-27 17:13:38 +00:00
Bryant Chandler
99a28e6b6a fix: Enable disableFuzzySearch config option propagation (#7002)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-27 15:27:41 +00:00
Arya Gummadi
4c3ec1f0cc refactor: centralize tool status symbols in constants (#7054) 2025-08-27 14:54:32 +00:00
Gaurav
83a40ff9d4 fix: unset GEMINI_API_KEY env var if empty (#7214) 2025-08-27 13:20:14 +00:00
anthony bushong
ed68f8f03e feat(actions): create initial eval workflow (#7127) 2025-08-27 07:58:49 +00:00
uttamkanodia14
c7fc489005 Log Gemini CLI OS / Process platform in the clearcut (#7086) 2025-08-27 07:29:43 +00:00
N. Taylor Mullen
59cdf5933f chore(release): v0.2.1 (#7200) 2025-08-27 00:04:08 -07:00
christine betts
c79f145b37 Add prompt to migrate workspace extensions (#7065)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-27 00:43:02 +00:00
David East
be48414518 fix(ci): Fix self assign action permissions (#7130) 2025-08-27 00:25:03 +00:00
David East
2df3480cba fix(cli): make Ctrl+C UI test less flaky (#7166) 2025-08-27 00:06:02 +00:00
cornmander
1baa74ebbf Update build command to set GEMINI_SANDBOX var. (#7159)
Bypassing flaky test.
2025-08-26 20:11:39 -04:00
Lee James
3e74ff71b7 feat(errors): Make errors more informative (#7133) 2025-08-26 23:22:05 +00:00
Miguel Solorio
6fb01ddcc4 Update colors tokens for inputer/footer (#6523) 2025-08-26 22:08:47 +00:00
cornmander
327c5f889d Print error when failing to build sandbox (#7149) 2025-08-26 21:58:33 +00:00
shishu314
366483853e feat(cli) - Define shared interface for storage (#7049)
Co-authored-by: Shi Shu <shii@google.com>
2025-08-26 21:03:11 +00:00
Adam Weidman
bfef867ba7 Add a2a-server package to gemini-cli (#6597) 2025-08-26 20:49:25 +00:00
Victor May
08bdd08412 Clearcut Logging of Content Error Metrics (#7099)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-26 19:37:18 +00:00
shishu314
142192ae59 fix(cli) - Add logging for shell errors (#7007)
Co-authored-by: Shi Shu <shii@google.com>
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-26 19:26:16 +00:00
Jacob Richman
df79433bec Downgrade version of ripgrep to the version from 7 months ago without a node 22 dependency (#7132) 2025-08-26 18:58:19 +00:00
cornmander
47417ec05e Fix cloudbuild step. (#7131)
Co-authored-by: Shreya Keshive <skeshive@gmail.com>
2025-08-26 18:55:06 +00:00
Allen Hutchison
231576426c feat(subagent): Enable incremental output streaming (#5819) 2025-08-26 18:53:00 +00:00
Pavel Simakov
bdd63ce3e8 Added usage details to /tools command. (#6849)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-26 18:07:12 +00:00
HugoMurillo
cf9de689c3 fix(#6392): latest prompt being reloaded when ending a persistent process (#6857)
Co-authored-by: jacob314 <jacob314@gmail.com>
2025-08-26 18:01:31 +00:00
Victor May
d340ddae62 Ensure a Strictly Alternating Message History (#6995)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-26 18:00:20 +00:00
shrutip90
4e49ee4c73 Make config non optional in ToolConfirmationMessage (#7083) 2025-08-26 17:02:22 +00:00
Andrew Garrett
52dae2c583 feat(cli): Add --allowed-tools flag to bypass tool confirmation (#2417) (#6453) 2025-08-26 16:17:43 +00:00
Amy He
c33a0da1df feat(mcp): Add ODIC fallback to OAuth metadata look up (#6863)
Co-authored-by: cornmander <shikhman@google.com>
2025-08-26 16:06:26 +00:00
christine betts
51bb624d45 Add extensions enable command (#7042)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-26 15:49:00 +00:00
yoshi-taka
0324dc2eb2 chore: unused deps (#7109)
Co-authored-by: cornmander <shikhman@google.com>
2025-08-26 15:20:56 +00:00
Adam Weidman
1fd6a2f0b6 chore: format & imports (#7030) 2025-08-26 14:46:05 +00:00
christine betts
dff175c4f4 [extensions] Add disable command (#7001)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-26 14:36:55 +00:00
Mark McDonald
d77391b3cd Downgrade branch_protection to log (#2934)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-26 11:24:24 +00:00
Tommaso Sciortino
7e31577813 Standardize exit codes (#7055) 2025-08-26 04:44:45 +00:00
Silvio Junior
415a36a195 Do not call nextSpeakerCheck if there was an error processing the stream. (#7048)
Co-authored-by: christine betts <chrstn@uw.edu>
Co-authored-by: Antonio Scandurra <me@as-cii.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Shreya Keshive <skeshive@gmail.com>
Co-authored-by: Abhi <43648792+abhipatel12@users.noreply.github.com>
Co-authored-by: shishu314 <shishu_1998@yahoo.com>
Co-authored-by: Shi Shu <shii@google.com>
Co-authored-by: Steven <steventohme59@gmail.com>
Co-authored-by: Pascal Birchler <pascalb@google.com>
Co-authored-by: N. Taylor Mullen <ntaylormullen@google.com>
2025-08-26 04:10:53 +00:00
fuyou
45fff8f9f7 Fix(command): line/block Comments Incorrectly Parsed as Slash Commands (#6957)
Co-authored-by: Abhi <43648792+abhipatel12@users.noreply.github.com>
2025-08-26 03:51:27 +00:00
shrutip90
97ce197f38 Treat undefined same as true for isTrustedFolder (#7073) 2025-08-26 02:57:57 +00:00
christine betts
b6cca01161 [extensions] Add an initial set of extension variables (#7035)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-26 02:13:16 +00:00
Shreya Keshive
75b1e01bb0 fix(ide): remove noisy error log (#7066) 2025-08-26 01:29:31 +00:00
shrutip90
ae1f67df04 feat: Disable YOLO and AUTO_EDIT modes for untrusted folders (#7041) 2025-08-26 00:30:04 +00:00
Arya Gummadi
2c6794feed fix: resolve three flaky tests (#7058) 2025-08-26 00:27:36 +00:00
Jack Wotherspoon
8075300e34 chore: remove CLI flags all_files and show_memory_usage (#7059) 2025-08-26 00:04:35 +00:00
N. Taylor Mullen
ad71cdab4c fix(ci): allow release branches to run (#7060) 2025-08-25 23:39:32 +00:00
Tommaso Sciortino
925d747b9d Revert "feat: add explicit license selection and status visibility (#6751)" (#7057) 2025-08-25 23:16:30 +00:00
Andrew Grigorev
d820c2335b fix(core): enable thinking explicitly in flash-lite models (#3033)
Co-authored-by: Lee James <40045512+leehagoodjames@users.noreply.github.com>
2025-08-25 22:30:40 +00:00
owenofbrien
cf5e1da69f Added session id logging for non-interactive sessions in debug mode (#7032) 2025-08-25 22:21:00 +00:00
yoshi-taka
28912589d0 unused deps (#4732) 2025-08-25 22:17:48 +00:00
Pascal Birchler
0f031a7f89 Explict imports & exports with type modifier (#3774) 2025-08-25 22:04:53 +00:00
Arya Gummadi
71c090c696 feat: add golden snapshot test for ToolGroupMessage and improve success symbol (#7037) 2025-08-25 21:42:18 +00:00
Arya Gummadi
0a8e941097 fix(telemetry): Update logger tests to handle user email field (#7050) 2025-08-25 21:20:17 +00:00
Abhi
db0bf2b71f refactor(cli): Improve Kitty keycode handling and tests (#7046) 2025-08-25 21:14:07 +00:00
Richie Foreman
1b2249fb8f feat(ide): Enable Firebase Studio install now that FS has updated VsCode (#7027) 2025-08-25 21:10:36 +00:00
Jerop Kipruto
f719978476 feat: Update GitHub workflow configurations (#7004) 2025-08-25 21:10:20 +00:00
Pascal Birchler
ee4feea006 chore: consistently import node modules with prefix (#3013)
Co-authored-by: N. Taylor Mullen <ntaylormullen@google.com>
2025-08-25 20:11:27 +00:00
Steven
415d3413c4 feat(telemetry): Add email to telemetry prompt (#6339)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-25 20:06:53 +00:00
shishu314
cd75d94262 Log yolo mode + number of turns (#6055)
Co-authored-by: Shi Shu <shii@google.com>
2025-08-25 20:06:47 +00:00
christine betts
f32a54fefc [extensions] Add extensions update command (#6878)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-25 19:41:15 +00:00
Abhi
41ece1a8b7 fix(keyboard): Implement Tab and Backspace handling for Kitty Protocol (#7006) 2025-08-25 18:41:10 +00:00
Shreya Keshive
776627c855 refactor(ide): Improve IDE detection discovery (#6765) 2025-08-25 18:39:57 +00:00
christine betts
0641b1c095 [extensions] Add extensions list command (#6879)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-25 18:27:38 +00:00
Arya Gummadi
4170dbdac3 fix: misaligned right border on tool calls ui and spacing in multiple tool calls ui (#6941) 2025-08-25 18:26:06 +00:00
Antonio Scandurra
7fa592f342 Show error instead of aborting if model fails to call tool (#7005) 2025-08-25 17:58:04 +00:00
christine betts
ade703944d [extensions] Add extensions uninstall command (#6877) 2025-08-25 17:40:15 +00:00
christine betts
0bd496bd51 [extensions] Add extension management install command (#6703) 2025-08-25 17:02:10 +00:00
Pascal Birchler
49cce8a15d chore(test): install and configure vitest eslint plugin (#3228)
Co-authored-by: N. Taylor Mullen <ntaylormullen@google.com>
2025-08-25 14:21:47 +00:00
Billy Biggs
04953d60c1 Introduce system defaults (vs system overrides) (#6724) 2025-08-25 04:21:22 +00:00
Shardul Natu
1918f4466b add(OTel): Add OTel logging for MalformedJsonEvent (#6912)
Co-authored-by: Shnatu <snatu@google.com>
Co-authored-by: cornmander <shikhman@google.com>
2025-08-25 02:11:41 +00:00
Pascal Birchler
bedd1d2c20 Add .prettierignore file (#6914) 2025-08-24 21:01:26 +00:00
Billy Biggs
a8cac96cc9 Support JSON schema formats using ajv-formats (#6949) 2025-08-24 16:37:09 +00:00
kookyleo
5bba15b038 fix(cli): Improve proxy test isolation and sandbox path resolution (#6555) 2025-08-23 04:43:03 +00:00
sangwook
494a996ff8 feat(core): share file list patterns between glob and grep tools (#6359)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-23 04:35:00 +00:00
Shreya Keshive
f55b294570 Update instructions for patching a release (#6871)
Co-authored-by: matt korwel <matt.korwel@gmail.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-23 04:15:03 +00:00
Sudheer Tripathi
d89f7ea9b5 fix(cli): gemini command stuck in git bash (#6397)
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-23 00:19:20 +00:00
Sandy Tao
da73f13d02 fix(core): Skip loop check for dividers (#6893) 2025-08-23 00:08:16 +00:00
Neha Prasad
1a89d18526 fix: slash command completion menu column width and spacing issues (#5797)
Co-authored-by: matt korwel <matt.korwel@gmail.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-22 23:51:49 +00:00
Deepankar Sharma
53067fda74 Add support for debug logging of keystrokes to investigate #6227 (#6844)
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-22 23:31:55 +00:00
Bryan Morgan
fef89f5429 Filter thought parts before passing them to CountToken (#6859)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-22 23:15:37 +00:00
Victor May
5b5290146a Metrics for Retries on Content Errors (#6870)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-22 23:06:29 +00:00
George Smith
33d49291ec fix(cli): Support special characters in sandbox profile path (#2038)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
Co-authored-by: Allen Hutchison <adh@google.com>
2025-08-22 21:36:57 +00:00
Tommaso Sciortino
75822d3506 Change the type of ToolResult.responseParts (#6875) 2025-08-22 21:12:05 +00:00
gbbosak
9a0722625b Fix crash when encountering an included directory which doesn't exist (#6497)
Co-authored-by: Gal Zahavi <38544478+galz10@users.noreply.github.com>
2025-08-22 20:49:35 +00:00
Agus Zubiaga
cfcf14fd06 Support all content types in prompts from Zed (#6756) 2025-08-22 20:10:14 +00:00
Silvio Junior
4c1c6d2b0d Log all parts of a streaming response (#6855) 2025-08-22 20:01:45 +00:00
jason
5030ced9e1 feat: add explicit license selection and status visibility (#6751) 2025-08-22 20:01:01 +00:00
Victor May
bb8a23ae80 Retry Message Stream on Empty Chunks (#6777) 2025-08-22 19:43:53 +00:00
Santhosh Kumar
4b79ef877f feat(cli): Allow themes to be specified as file paths (#6828)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-22 19:28:41 +00:00
David East
11119c80f7 feat(ci): add self-assign workflow for issues (#6840) 2025-08-22 19:16:35 +00:00
matt korwel
c3cf1c61c1 fix(release): fallback to github.sha when ref is not provided (#6862) 2025-08-22 12:54:08 -07:00
Lee James
240830afac feat(mcp): log include MCP request with error (#6778) 2025-08-22 18:10:30 +00:00
Ahmed Mughal
d35abdab99 fix(editors): fix neovim closing when using modify with editor (#5337)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-22 17:57:11 +00:00
joshualitt
76bbbac7ff bug(core): Fix for "no changes" edits. (#6836) 2025-08-22 17:43:19 +00:00
Jacob MacDonald
5de66b4908 feat(mcp): Improve MCP prompt argument parsing (#6779)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-22 17:28:11 +00:00
Swapnaneel Patra
f61acf60f6 fix(core): correctly consolidate multi-part text content (#6235)
Co-authored-by: Allen Hutchison <adh@google.com>
2025-08-22 17:12:10 +00:00
Ben Guo
9c1490e985 fix(copyCommand): provide friendlier error messages for /copy command (#6723)
Co-authored-by: Ben Guo <hundunben@gmail.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-22 16:58:35 +00:00
Jerop Kipruto
c4a788b7b2 fix invalid json in workflow settings (#6831) 2025-08-22 16:19:07 +00:00
pokutuna
56ad22b39b fix(core): citation markers misplaced in search results containing multibyte characters (#5956)
Co-authored-by: Allen Hutchison <adh@google.com>
2025-08-22 16:09:16 +00:00
fuyou
3b29f11862 fix(cli): improve stdin handling and add initial state check (#6747) 2025-08-22 15:42:03 +00:00
Arya Gummadi
e1d5dc545d fix(checkpointing): improve error handling and messaging for Git issues (#6801) 2025-08-22 15:29:52 +00:00
Victor Miura
31cd35b8c4 fix(tools): Add an end of file list marker to ReadManyFilesTool (#5967)
Co-authored-by: Gal Zahavi <38544478+galz10@users.noreply.github.com>
Co-authored-by: Allen Hutchison <adh@google.com>
2025-08-22 15:10:14 +00:00
Nanda Kishore
528227a0f8 feat: Add programming language to CLI events (#6071)
Co-authored-by: christine betts <chrstn@uw.edu>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Adam Weidman <65992621+adamfweidman@users.noreply.github.com>
Co-authored-by: JaeHo Jang <diehreo@gmail.com>
Co-authored-by: Jacob Richman <jacob314@gmail.com>
Co-authored-by: Victor May <mayvic@google.com>
Co-authored-by: Gaurav <39389231+gsquared94@users.noreply.github.com>
Co-authored-by: joshualitt <joshualitt@google.com>
Co-authored-by: Billy Biggs <bbiggs@google.com>
Co-authored-by: Ricardo Fabbri <rfabbri@gmail.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
Co-authored-by: Tommaso Sciortino <sciortino@gmail.com>
Co-authored-by: Gal Zahavi <38544478+galz10@users.noreply.github.com>
Co-authored-by: Shreya Keshive <skeshive@gmail.com>
Co-authored-by: Ben Guo <36952867+HunDun0Ben@users.noreply.github.com>
Co-authored-by: Ben Guo <hundunben@gmail.com>
Co-authored-by: mkusaka <hinoshita1992@gmail.com>
2025-08-22 12:17:32 +00:00
Bryant Chandler
4ced997d63 feat(search): Add option to disable fuzzy search (#6510)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-22 06:31:39 +00:00
fuyou
ef46d64ae5 Fix(grep): memory overflow in grep search and enhance test coverage (#5911)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-22 06:10:45 +00:00
mkusaka
51f642f0a9 fix: Ctrl+E should move to current line end, not buffer end (#6729)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-22 04:39:58 +00:00
Ben Guo
348fa6c7c2 fix(console): fix debug icon rendering in "Debug Console" Box (#6737)
Co-authored-by: Ben Guo <hundunben@gmail.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Jacob Richman <jacob314@gmail.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-22 04:37:51 +00:00
Shreya Keshive
5be9172ad5 fix(ide): preserve focus when showing diff view (#6795) 2025-08-22 02:24:45 +00:00
Gal Zahavi
14ca687c05 test(integration-tests): isolate user memory from test runs (#6790) 2025-08-22 00:34:13 +00:00
Tommaso Sciortino
15c62bade3 Reuse CoreToolScheduler for nonInteractiveToolExecutor (#6714) 2025-08-21 23:49:12 +00:00
Jacob Richman
29699274bb feat(settings) support editing string settings. (#6732) 2025-08-21 23:43:56 +00:00
christine betts
10286934e6 Introduce initial screen reader mode handling and flag (#6653) 2025-08-21 22:29:15 +00:00
Ricardo Fabbri
679acc45b2 fix(docs): path of chat checkpoints in manual (#6303)
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-21 22:27:23 +00:00
Billy Biggs
2dd15572ea Support IDE connections via stdio MCP (#6417) 2025-08-21 22:00:05 +00:00
joshualitt
ec41b8db8e feat(core): Annotate remaining error paths in tools with type. (#6699) 2025-08-21 21:40:18 +00:00
Gaurav
299bf58309 fix: handle extra text in gemini output for dedup workflow (#6771) 2025-08-21 20:40:44 +00:00
Victor May
720eb81890 At Command Race Condition Bugfix For Non-Interactive Mode (#6676) 2025-08-21 18:47:40 +00:00
JaeHo Jang
1e5ead6960 perf(core): parallelize memory discovery file operations performance gain (#5751)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-21 18:21:04 +00:00
Adam Weidman
714b3dab73 chore(lint config): add test-utils to eslint config (#6768) 2025-08-21 18:00:59 +00:00
christine betts
0a7879272d Fix stats display layout (#6758) 2025-08-21 17:56:18 +00:00
christine betts
a90ca626d3 Quick fix for enterprise docs (#6753)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-21 14:38:10 +00:00
官余棚
589f5e6823 feat(cli): prompt completion (#4691)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-21 08:04:04 +00:00
shrutip90
ba5309c405 Force restart on trust level change to reload settings (#6713) 2025-08-21 07:38:12 +00:00
Shardul Natu
0242ecd83a fix(metrics): Do not convert numerical metrics to strings (#6701)
Co-authored-by: Shnatu <snatu@google.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
2025-08-21 07:25:42 +00:00
pwrwpw
f8f79bf2f7 fix(core): avoid error handling on cancelled requests to prevent crash (#6039)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-21 07:18:30 +00:00
Yoichiro Tanaka
63f9e86bc3 feat(mcp-client): Handle 401 error for httpUrl (#6640) 2025-08-21 07:05:45 +00:00
bonggwan
a64394a4fa (fix): Change broken emojis (#6725) 2025-08-21 06:14:29 +00:00
cornmander
16360588d7 Add integration test to confirm environment variable propagation. (#6696) 2025-08-21 01:44:34 +00:00
Allen Hutchison
a590a033be test(integration): add failing test for stdin context with prompt (#6158) 2025-08-20 23:52:27 +00:00
Tommaso Sciortino
653267a64f Remove unused attribute (#6661) 2025-08-20 23:13:50 +00:00
Tommaso Sciortino
0193ce77dd Remove unnecessary FileErrorType. (#6697) 2025-08-20 23:13:29 +00:00
cornmander
6eb6560d42 Limit dependabot PRs to security updates (#6657) 2025-08-20 22:24:43 +00:00
Shreya Keshive
0e9b06d5c2 feat(ide): improve IDE installation UX and feedback (#6677) 2025-08-20 21:11:31 +00:00
Shreya Keshive
80ff3cd25e feat(ide ext): Write workspace path to port file (#6659) 2025-08-20 21:09:53 +00:00
Sandy Tao
6aff66f501 feat(core): Handle special characters in file search paths (#6680) 2025-08-20 20:51:49 +00:00
VigneshKumar
b4ecdd67ec fix: typos/grammar in roadmap (#6675) 2025-08-20 20:15:54 +00:00
Jacob MacDonald
1738d40745 return the JSON stringified parameters from getDescription for MCP tools and Discovered tools (#6655) 2025-08-20 20:10:02 +00:00
Victor May
4642de2a5c Fixing at command race condition (#6663) 2025-08-20 19:51:31 +00:00
Jacob Richman
52e340a11b Revert "Ignore workspace settings for untrusted folders" (#6672) 2025-08-20 19:49:15 +00:00
hritan
fd64d89da0 fix: copy command gets stuck (#6482)
Co-authored-by: Hriday Taneja <hridayt@google.com>
2025-08-20 18:42:42 +00:00
VigneshKumar
716297fb32 fix: typo in readme (#6669) 2025-08-20 18:19:32 +00:00
christine betts
99b1ba9d10 Add enterprise settings docs (#6076) 2025-08-20 17:04:03 +00:00
matt korwel
0e24805806 feat(release): update release process for nightly and preview builds (#6643)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-20 15:50:00 +00:00
Andrii Abramov
acedcfb8f7 Fix formatting in README.md (#6621)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-20 14:54:36 +00:00
Lee James
99f03bf364 test(logging): Add tests for default log fields (#6583) 2025-08-20 14:33:25 +00:00
agarwalravikant
6b843ca3a8 Changes to add MCP tool count, and MCP tool name as dimension (#6631)
Co-authored-by: Ravikant Agarwal <ravikantag@google.com>
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-20 14:22:22 +00:00
dependabot[bot]
a773d0887c chore(deps)(deps): bump google-github-actions/run-gemini-cli from 0.1.10 to 0.1.11 (#6614)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-20 14:13:20 +00:00
fuyou
b6e7796346 docs: Update keyboard shortcuts for input clearing functionality (#6627) 2025-08-20 13:56:53 +00:00
cornmander
c668699e77 Add permissions specs to token generation. (#6595) 2025-08-20 05:00:02 +00:00
shrutip90
d250293c2e Ignore workspace settings for untrusted folders (#6606) 2025-08-20 04:20:41 +00:00
cornmander
179f1414da feat: add dependabot configuration (#6604) 2025-08-20 04:02:02 +00:00
matt korwel
e5f4d25f5e Update README.md (#6603)
Co-authored-by: N. Taylor Mullen <ntaylormullen@google.com>
2025-08-20 03:55:34 +00:00
Yuki Okita
21c6480b65 Refac: Centralize storage file management (#4078)
Co-authored-by: Taylor Mullen <ntaylormullen@google.com>
2025-08-20 01:55:47 +00:00
Jerop Kipruto
1049d38845 feat: update .gitignore in /setup-github (#6591) 2025-08-20 01:53:53 +00:00
Gal Zahavi
c93c06711a Update shell service integration tests (#6598) 2025-08-20 01:24:42 +00:00
Arya Gummadi
2a71c10b8a feat: auto-approve compatible pending tools when allow always is selected (#6519) 2025-08-20 01:22:41 +00:00
Blackoutta
d587c6f104 Fix IDE Companion Connection in Proxy Environments (#6308)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-20 00:32:08 +00:00
Shreya Keshive
6732665a08 fix(ide): Correctly identify IDE process when run from terminal (#6566) 2025-08-20 00:23:37 +00:00
Arya Gummadi
6505b0c8e1 fix: allow re-auth with another google account (#6544) 2025-08-20 00:06:25 +00:00
Sandy Tao
389102ec0e feat(core): Add --skip-next-speaker-check flag (#6589) 2025-08-19 23:45:13 +00:00
Shardul Natu
faff1c2ec7 update(opentelemetry): Upgrade OTel dependencies (#6540)
Co-authored-by: Shnatu <snatu@google.com>
2025-08-19 23:39:59 +00:00
Tommaso Sciortino
a01d411c5a Get ToolRegistry from config instead of passing it (#6592) 2025-08-19 23:27:15 +00:00
Gal Zahavi
f1575f6d8d feat(core): refactor shell execution to use node-pty (#6491)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 23:03:51 +00:00
Tommaso Sciortino
0cc2a1e7ef Remove unnecessary promiuse usage. (#6585) 2025-08-19 22:31:02 +00:00
Yoichiro Tanaka
1244ec6954 docs: Update mcpServers configuration documentation (#6556)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 21:47:36 +00:00
Jack Wotherspoon
fb5f2987f3 feat(ui): add hideFooter setting to hide footer from UI (#6505) 2025-08-19 21:06:01 +00:00
joshualitt
b9cece767d feat(core): Cleanup after migrating tools. (#6199)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 20:55:06 +00:00
Jacob Richman
2143731f6e fix(paste) incorrect handling of \\\n in pastes (#6532) 2025-08-19 20:41:08 +00:00
Shreya Keshive
ed1fc4ddb3 fix(ide): Fix bug where companion extension was not being installed on Windows correctly (#6576) 2025-08-19 20:25:11 +00:00
fuyou
24858b319a Fix locale-specific date formatting issue in environmentContext.ts (#5889)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 20:24:28 +00:00
anj-s
1b9107a8bb Remove checking for a condition just for logging (#6503) 2025-08-19 20:17:42 +00:00
Marat Boshernitsan
d543c8339a fix(core): harden user account caching (#6501) 2025-08-19 20:16:06 +00:00
owenofbrien
b561d3bbed Log all session metadata (#6423) 2025-08-19 20:06:00 +00:00
HugoMurillo
b9cf1ea3ce fix(#5605): .env file loaded after settings are parsed (#6494) 2025-08-19 19:07:42 +00:00
Ramón Medrano Llamas
b24c5887c4 feat: restart MCP servers on /mcp refresh (#5479)
Co-authored-by: Brian Ray <62354532+emeryray2002@users.noreply.github.com>
Co-authored-by: N. Taylor Mullen <ntaylormullen@google.com>
2025-08-19 19:03:19 +00:00
Shreya Keshive
4828e4daf1 feat: Add IDE client to /bug & /about if IDE mode is enabled (#6567) 2025-08-19 18:22:21 +00:00
Shreya Keshive
9588aa6ef9 feat: Show /ide subcommands based on connection status instead of ideMode boolean (#6496) 2025-08-19 17:24:58 +00:00
Akhil Appana
fde5511c27 feat(ui): implement message queuing during streaming responses (#6049)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 16:25:16 +00:00
Arya Gummadi
ec0d9f4ff7 fix: add privacy settings hook and tests (#6360) 2025-08-19 06:57:10 +00:00
Arya Gummadi
8f8082fe3d feat: add file change tracking to session metrics (#6094)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 05:57:53 +00:00
Arya Gummadi
da396bd566 fix: ensure consistent theme behavior between local and CI test runs (#6358)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 05:56:46 +00:00
agarwalravikant
58c2925624 Changes to add tool_type as dimension (#6538)
Co-authored-by: Ravikant Agarwal <ravikantag@google.com>
2025-08-19 05:25:47 +00:00
Ali Al Jufairi
e290a61a52 fix(settings) : Disable showing statics in the dialog (#5998)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
Co-authored-by: N. Taylor Mullen <ntaylormullen@google.com>
2025-08-19 03:38:55 +00:00
Ali Al Jufairi
92bb4624c4 feat(settings): enhance settings management with generic setter and display hel… (#6202)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-19 02:28:45 +00:00
bl-ue
36ea986cfe feat(sessions): Introduce core ChatRecordingService for automatic conversation saving (#5221) 2025-08-19 00:39:57 +00:00
Gal Zahavi
6fc68ff8d4 fix(tools): Handle special characters in file paths for glob and read_many_files (#6507)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-18 23:39:05 +00:00
Conrad Irwin
fb3ceb0da4 Read and write files through Zed (#6169)
Co-authored-by: Agus Zubiaga <agus@zed.dev>
2025-08-18 22:29:45 +00:00
Arya Gummadi
4394b6ab4f fix(docs): update authentication documentation to reflect available options (#6361)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-18 22:04:01 +00:00
Gaurav
5fe4e02310 fix: GCA creds loading order (#6498) 2025-08-18 21:11:19 +00:00
Jacob MacDonald
3960ccf781 Add MCP Root change notifications (#6502) 2025-08-18 21:09:02 +00:00
Bryant Chandler
465ac9f547 feat(filesearch): Introduce non-recursive file search strategy (#6087)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
Co-authored-by: Bryant Chandler <bryantchandler@chromium.org>
2025-08-18 20:43:24 +00:00
joshualitt
d66ddcd82e bug(core): Do not throw validation errors when building tools in nonInteractiveToolExecutor. (#6363) 2025-08-18 20:28:15 +00:00
Billy Biggs
91cd0db2b3 Log start session event through telemetry/loggers (#6431) 2025-08-18 20:03:03 +00:00
Richie Foreman
71f706cf29 feat(client/compression): Log telemetry when compressing chat context. (#6195) 2025-08-18 19:59:13 +00:00
cornmander
1a0cc68e29 Disable failing IDE tests. (#6481) 2025-08-18 17:51:44 +00:00
Shreya Keshive
0215811c4c Revert "Show /ide enable & /ide disable commands based on connection status" (#6486) 2025-08-18 16:42:45 +00:00
Abhi
065eb7897d fix(commands): Respect YOLO mode in custom slash commands (#6441) 2025-08-18 14:34:51 +00:00
uttamkanodia14
88fc6e5861 Sends Gemini CLI Client install id to Vertex and Gemini. (#6217) 2025-08-18 07:06:57 +00:00
fuyou
7b03a64b85 Fix URL truncation in CLI display components #5902 (#5925) 2025-08-18 05:26:34 +00:00
cornmander
133f0230c3 Add vi globals to fix integration tests. (#6445) 2025-08-18 05:17:22 +00:00
Richie Foreman
2998f27f70 chore(compiler): Enable strict property access TS compiler flag. (#6255)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-17 16:43:21 +00:00
cornmander
ec1fa954d1 Fix several IDE mode integration tests (#6408)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-17 07:55:39 +00:00
Abhi
33b9bdb11e feat(cli): Introduce arguments for shell execution in custom commands (#5966) 2025-08-17 04:02:54 +00:00
cornmander
e7dbc607a5 Fix missing import in ide integration tests. (#6407) 2025-08-17 01:26:12 +00:00
cornmander
5aadb02af0 Fix integration test. (#6403) 2025-08-17 00:08:14 +00:00
Marat Boshernitsan
bc60257e22 feat(oauth): Make oauth client a singleton to survive cache failures (#6348) 2025-08-16 05:05:59 +00:00
Jacob Richman
6c1373c332 Revert "Update semantic color tokens" (#6365) 2025-08-16 03:18:31 +00:00
Billy Biggs
d57cc0b930 Add support for HTTP OpenTelemetry exporters (#6357) 2025-08-16 01:10:21 +00:00
Sunny Sachanandani
4896c7739f [ide-mode] Fix path delimiter for multi-root workspaces on Windows (#6273)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-15 22:58:31 +00:00
Miguel Solorio
3c0af3654a Update semantic color tokens (#6253)
Co-authored-by: jacob314 <jacob314@gmail.com>
2025-08-15 22:39:54 +00:00
Evan Otero
5246aa11f4 Check for pending tool calls before appending IDE Context (#6317) 2025-08-15 22:04:47 +00:00
Gaurav
80763f5629 fix: handle multiline output in issue dedup workflow (#6338) 2025-08-15 21:33:13 +00:00
Wanlin Du
de6c759c28 fix: allow user to use responseSchema as well. (#6336) 2025-08-15 20:59:01 +00:00
Tommaso Sciortino
b55f19fdfc use toMatchSnapshot in test (#6344) 2025-08-15 20:31:00 +00:00
Jack Wotherspoon
31b4c76a6b fix: improve robustness of gemini mcp add command (#6332) 2025-08-15 19:36:38 +00:00
Deepankar Sharma
f5a5cdd973 fix(input): Handle numpad enter key in kitty protocol terminals (#6341)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-15 19:30:57 +00:00
Brian Ray
2c07dc0757 fixes for oauth spec - adds github oauth support. Resource paramater. (#6281) 2025-08-15 19:14:48 +00:00
Tommaso Sciortino
01b8a7565c Fix shell tool description to be os-specific (#6335) 2025-08-15 19:08:29 +00:00
blanca-delgado-parra
088f074839 Update quota-and-pricing.md to clarify billing (#6092)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Srinath Padmanabhan <srithreepo@google.com>
Co-authored-by: Srinath Padmanabhan <17151014+srithreepo@users.noreply.github.com>
2025-08-15 19:00:13 +00:00
Jacob Richman
bd5e49c5ff fix(input) Resolve cases where escape was broken (#6304) 2025-08-15 17:54:00 +00:00
Gal Zahavi
1a2906a8ad Revert #6088 (#6328) 2025-08-15 17:27:33 +00:00
Evan Otero
ab1c483cab feat(about): Add the IDE Client's display name to /about (#6311)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-15 16:32:15 +00:00
Hiroaki Mitsuyoshi
72195d5553 fix(ui): Fix hang in chat save confirmation dialog (#6312)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-15 15:46:45 +00:00
joshualitt
8f2fa5a537 feat(core): Migrate MockTools to declarative pattern. (#6197) 2025-08-15 15:44:26 +00:00
LongYinan
d2f4e2664e chore: remove duplicated lockfile (#6291)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-15 15:39:58 +00:00
Jacob Richman
32d1ac3ce2 fix(input) ctrl-j support was lost switching to keyBindings.ts (#6305)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-15 15:31:17 +00:00
owenofbrien
ddbe65e8c3 Add session id to session summary and /bug template (#6313)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-15 15:15:54 +00:00
Pyush Sinha
a92299069d fix: pendingSlashCommandHistoryItems corrected to properly memoize updates (#6282)
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
2025-08-15 15:11:43 +00:00
cornmander
41c5195ed3 feat(shell): Include disallowed commands in block reason (#6278)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-15 14:37:49 +00:00
Gaurav
a131555c9c fix: update issue dedup workflow (#6275) 2025-08-15 12:51:08 +00:00
Miguel Solorio
2e6c3580df Stylize diff line numbers & characters (#6269) 2025-08-15 06:18:39 +00:00
Jacob Richman
2690123af0 Fix flaky test for SettingsDialog. (#6294) 2025-08-15 06:04:48 +00:00
hritan
d46b91e09d fix: console patcher initialised in main (#6252)
Co-authored-by: Hriday Taneja <hridayt@google.com>
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-15 04:25:27 +00:00
JAYADITYA
93559d65c8 update docs (#6204)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-15 03:15:27 +00:00
Shreya Keshive
a84f749310 Show /ide enable & /ide disable commands based on connection status (#6248)
Co-authored-by: matt korwel <matt.korwel@gmail.com>
2025-08-15 00:22:32 +00:00
Shreya Keshive
db347eeee8 IDE integration Gemini command multi-folder support + bump version (#6265)
Co-authored-by: matt korwel <matt.korwel@gmail.com>
2025-08-15 00:07:06 +00:00
Shreya Keshive
cf7e6ff52d Fix license notice generation script to include transitive dependencies (#6272)
Co-authored-by: matt korwel <matt.korwel@gmail.com>
2025-08-15 00:06:20 +00:00
Jacob Richman
6037cb5d60 Fix bug where RadioButtonSelect treated an omitted isFocus parameter (#6274) 2025-08-14 23:48:54 +00:00
Jacob Richman
a5c81e3fe0 Fix flake due to using wait instead of waitFor (#6277) 2025-08-14 23:39:12 +00:00
HugoMurillo
8c0c8d7770 fix(#5340): unable to load chats with weird characters (#5969) 2025-08-14 23:24:57 +00:00
Tommaso Sciortino
1a41ba7daf Prevent writing outside of the workspace roots (#6178)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-14 22:59:37 +00:00
Lee James
f47af1607a bug(mcp): catch errors reported by GitHub MCP (#6194) 2025-08-14 22:30:05 +00:00
Jerop Kipruto
a01db2cfd5 feat(triage): Improve GitHub issue triage workflows (#6120) 2025-08-14 22:20:43 +00:00
Gal Zahavi
980091cbc2 feat(core): refactor shell execution to use node-pty (#6088) 2025-08-14 20:40:12 +00:00
joshualitt
48af0456c1 feat(core): Migrate web-search, write-file, and discovered-tool. (#6188)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-14 20:28:33 +00:00
christine betts
5c5fc89eb1 [ide-mode] Support multi-folder workspaces (#6177) 2025-08-14 20:12:57 +00:00
Tommaso Sciortino
e06d774996 docs: remove reference to Gemini Code Assist for Workspace (#6247) 2025-08-14 18:40:15 +00:00
shrutip90
69c5582723 feat: Show untrusted status in the Footer (#6210)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-14 18:15:48 +00:00
Shreya Keshive
69d666cfaf Fix release notes generation (#6233) 2025-08-14 18:13:13 +00:00
christine betts
af93a10a92 [ide-mode] Write port to file in ide-server (#5811)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-14 18:09:19 +00:00
Wietse Venema
ec7b84191f feat: Allow combining -p and stdin for prompt input (#4406)
Co-authored-by: Allen Hutchison <adh@google.com>
2025-08-14 18:08:59 +00:00
Shreya Keshive
798c4d1311 Update IDE integration context toggle shortcut to ctrl+G (#6245) 2025-08-14 17:50:20 +00:00
christine betts
2416a80e9c [ide-mode] Add docs on running the vscode companion extension locally (#6145)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Shreya <shreyakeshive@google.com>
Co-authored-by: Shreya Keshive <skeshive@gmail.com>
2025-08-14 17:47:36 +00:00
joshualitt
ef54f720de bug(cli): Exclude only specific tests. (#6244) 2025-08-14 17:35:34 +00:00
Kamal Raj Sekar
4973e7e1e0 /chat save command saves empty conversations with only system context (#6121)
Co-authored-by: Jacob Richman <jacob314@gmail.com>
2025-08-14 16:30:30 +00:00
Richie Foreman
8bebaedad4 chore(vscode): Add eslint as a recommended extension (#6196) 2025-08-14 16:14:02 +00:00
christine betts
d6403c67ee [ide-mode] Suggest the extension name in the installation messages (#6182) 2025-08-14 14:57:36 +00:00
Jerop Kipruto
2fc1ef7d59 Docs: update overview of Gemini CLI GitHub Actions (#6198) 2025-08-14 14:46:32 +00:00
Shreya Keshive
e74dc4d0e0 Update versioning script to also bump version for companion extension so they stay in sync (#6075) 2025-08-14 14:30:16 +00:00
owenofbrien
dd55a82a28 Log CLI version and git commit hash (v2) (#6176) 2025-08-14 10:12:26 +00:00
matt korwel
3e004048cf chore(release): v0.1.21 (#6207)
Co-authored-by: gemini-cli-robot <gemini-cli-robot@google.com>
2025-08-14 06:14:25 +00:00
650 changed files with 39458 additions and 17969 deletions

View File

@@ -0,0 +1 @@
action: 'log'

View File

@@ -26,15 +26,11 @@ steps:
- |-
SHELL_TAG_NAME="$TAG_NAME"
FINAL_TAG="$SHORT_SHA" # Default to SHA
if [[ "$$SHELL_TAG_NAME" == *"-nightly"* ]]; then
echo "Nightly release detected."
FINAL_TAG="$${SHELL_TAG_NAME#v}"
# Also escape the variable in the regex match
elif [[ "$$SHELL_TAG_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Official release detected."
if [[ "$$SHELL_TAG_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
echo "Release detected."
FINAL_TAG="$${SHELL_TAG_NAME#v}"
else
echo "Development/RC release detected. Using commit SHA as tag."
echo "Development release detected. Using commit SHA as tag."
fi
echo "Determined image tag: $$FINAL_TAG"
echo "$$FINAL_TAG" > /workspace/image_tag.txt

View File

@@ -32,6 +32,9 @@ body:
description: 'Please paste the full text from the `/about` command run from Qwen Code. Also include which platform (macOS, Windows, Linux).'
value: |
<details>
<summary>Client Information</summary>
Run `qwen` to enter the interactive CLI, then run the `/about` command.
```console
$ qwen /about

35
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
# See https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: 'npm'
directory: '/'
schedule:
interval: 'daily'
target-branch: 'main'
commit-message:
prefix: 'chore(deps)'
include: 'scope'
reviewers:
- 'google-gemini/gemini-cli-askmode-approvers'
groups:
# Group all non-major updates together.
# This is to reduce the number of PRs that need to be reviewed.
# Major updates will still be created as separate PRs.
npm-minor-patch:
applies-to: 'version-updates'
update-types:
- 'minor'
- 'patch'
open-pull-requests-limit: 0
- package-ecosystem: 'github-actions'
directory: '/'
schedule:
interval: 'daily'
target-branch: 'main'
commit-message:
prefix: 'chore(deps)'
include: 'scope'
reviewers:
- 'google-gemini/gemini-cli-askmode-approvers'
open-pull-requests-limit: 0

View File

@@ -30,6 +30,10 @@ jobs:
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
permission-pull-requests: 'read'
permission-discussions: 'read'
permission-contents: 'read'
- name: 'Generate Report 📜'
id: 'report'
@@ -164,7 +168,7 @@ jobs:
- name: '🤖 Get Insights from Report'
if: |-
${{ steps.report.outputs.report_body != '' }}
uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}'
REPOSITORY: '${{ github.repository }}'

29
.github/workflows/eval.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: 'Eval'
on:
workflow_dispatch:
jobs:
eval:
name: 'Eval'
runs-on: 'ubuntu-latest'
strategy:
matrix:
node-version:
- '20.x'
- '22.x'
- '24.x'
steps:
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version: '${{ matrix.node-version }}'
cache: 'npm'
- name: 'Set up Python'
uses: 'actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065' # ratchet:actions/setup-python@v5
with:
python-version: '3.11'
- name: 'Install and configure Poetry'
uses: 'snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a' # ratchet:snok/install-poetry@v1

View File

@@ -0,0 +1,262 @@
name: '🏷️ Gemini Automated Issue Deduplication'
on:
issues:
types:
- 'opened'
- 'reopened'
issue_comment:
types:
- 'created'
workflow_dispatch:
inputs:
issue_number:
description: 'issue number to dedup'
required: true
type: 'number'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
find-duplicates:
if: |-
github.repository == 'google-gemini/gemini-cli' &&
vars.TRIAGE_DEDUPLICATE_ISSUES != '' &&
(github.event_name == 'issues' ||
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@gemini-cli /deduplicate') &&
(github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR')))
permissions:
contents: 'read'
id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings
issues: 'read'
statuses: 'read'
packages: 'read'
timeout-minutes: 20
runs-on: 'ubuntu-latest'
outputs:
duplicate_issues_csv: '${{ env.DUPLICATE_ISSUES_CSV }}'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
with:
registry: 'ghcr.io'
username: '${{ github.actor }}'
password: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Find Duplicate Issues'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_issue_deduplication'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ISSUE_TITLE: '${{ github.event.issue.title }}'
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"mcpServers": {
"issue_deduplication": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"--network", "host",
"-e", "GITHUB_TOKEN",
"-e", "GEMINI_API_KEY",
"-e", "DATABASE_TYPE",
"-e", "FIRESTORE_DATABASE_ID",
"-e", "GCP_PROJECT",
"-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json",
"-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json",
"ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3"
],
"env": {
"GITHUB_TOKEN": "${GITHUB_TOKEN}",
"GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}",
"DATABASE_TYPE":"firestore",
"GCP_PROJECT": "${FIRESTORE_PROJECT}",
"FIRESTORE_DATABASE_ID": "(default)",
"GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}"
},
"enabled": true,
"timeout": 600000
}
},
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh issue view)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are an issue de-duplication assistant. Your goal is to find
duplicate issues for a given issue.
## Steps
1. **Find Potential Duplicates:**
- The repository is ${{ github.repository }} and the issue number is ${{ github.event.issue.number }}.
- Use the `duplicates` tool with the `repo` and `issue_number` to find potential duplicates for the current issue. Do not use the `threshold` parameter.
- If no duplicates are found, you are done.
- Print the JSON output from the `duplicates` tool to the logs.
2. **Refine Duplicates List (if necessary):**
- If the `duplicates` tool returns between 1 and 14 results, you must refine the list.
- For each potential duplicate issue, run `gh issue view <issue-number> --json title,body,comments` to fetch its content.
- Also fetch the content of the original issue: `gh issue view "${ISSUE_NUMBER}" --json title,body,comments`.
- Carefully analyze the content (title, body, comments) of the original issue and all potential duplicates.
- It is very important if the comments on either issue mention that they are not duplicates of each other, to treat them as not duplicates.
- Based on your analysis, create a final list containing only the issues you are highly confident are actual duplicates.
- If your final list is empty, you are done.
- Print to the logs if you omitted any potential duplicates based on your analysis.
- If the `duplicates` tool returned 15+ results, use the top 15 matches (based on descending similarity score value) to perform this step.
3. **Output final duplicates list as CSV:**
- Convert the list of appropriate duplicate issue numbers into a comma-separated list (CSV). If there are no appropriate duplicates, use the empty string.
- Use the "echo" shell command to append the CSV of issue numbers into the filepath referenced by the environment variable "${GITHUB_ENV}":
echo "DUPLICATE_ISSUES_CSV=[DUPLICATE_ISSUES_AS_CSV]" >> "${GITHUB_ENV}"
## Guidelines
- Only use the `duplicates` and `run_shell_command` tools.
- The `run_shell_command` tool can be used with `gh issue view`.
- Do not download or read media files like images, videos, or links. The `--json` flag for `gh issue view` will prevent this.
- Do not modify the issue content or status.
- Do not add comments or labels.
- Reference all shell variables as "${VAR}" (with quotes and braces).
add-comment-and-label:
needs: 'find-duplicates'
if: |-
github.repository == 'google-gemini/gemini-cli' &&
vars.TRIAGE_DEDUPLICATE_ISSUES != '' &&
needs.find-duplicates.outputs.duplicate_issues_csv != '' &&
(
github.event_name == 'issues' ||
github.event_name == 'workflow_dispatch' ||
(
github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@gemini-cli /deduplicate') &&
(
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR'
)
)
)
permissions:
issues: 'write'
timeout-minutes: 5
runs-on: 'ubuntu-latest'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
- name: 'Comment and Label Duplicate Issue'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
env:
DUPLICATES_OUTPUT: '${{ needs.find-duplicates.outputs.duplicate_issues_csv }}'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
const rawCsv = process.env.DUPLICATES_OUTPUT;
core.info(`Raw duplicates CSV: ${rawCsv}`);
const duplicateIssues = rawCsv.split(',').map(s => s.trim()).filter(s => s);
if (duplicateIssues.length === 0) {
core.info('No duplicate issues found. Nothing to do.');
return;
}
const issueNumber = ${{ github.event.issue.number }};
function formatCommentBody(issues, updated = false) {
const header = updated
? 'Found possible duplicate issues (updated):'
: 'Found possible duplicate issues:';
const issuesList = issues.map(num => `- #${num}`).join('\n');
const footer = 'If you believe this is not a duplicate, please remove the `status/possible-duplicate` label.';
const magicComment = '<!-- gemini-cli-deduplication -->';
return `${header}\n\n${issuesList}\n\n${footer}\n${magicComment}`;
}
const newCommentBody = formatCommentBody(duplicateIssues);
const newUpdatedCommentBody = formatCommentBody(duplicateIssues, true);
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
});
const magicComment = '<!-- gemini-cli-deduplication -->';
const existingComment = comments.find(comment =>
comment.user.type === 'Bot' && comment.body.includes(magicComment)
);
let commentMade = false;
if (existingComment) {
// To check if lists are same, just compare the formatted bodies without headers.
const existingBodyForCompare = existingComment.body.substring(existingComment.body.indexOf('- #'));
const newBodyForCompare = newCommentBody.substring(newCommentBody.indexOf('- #'));
if (existingBodyForCompare.trim() !== newBodyForCompare.trim()) {
core.info(`Updating existing comment ${existingComment.id}`);
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id,
body: newUpdatedCommentBody,
});
commentMade = true;
} else {
core.info('Existing comment is up-to-date. Nothing to do.');
}
} else {
core.info('Creating new comment.');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: newCommentBody,
});
commentMade = true;
}
if (commentMade) {
core.info('Adding "status/possible-duplicate" label.');
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
labels: ['status/possible-duplicate'],
});
}

View File

@@ -61,8 +61,10 @@ jobs:
prompt: |-
## Role
You are an issue triage assistant. Analyze the current GitHub issue and apply the most appropriate existing labels. Use the available
tools to gather information; do not ask for information to be provided. Do not remove labels titled help wanted or good first issue.
You are an issue triage assistant. Analyze the current GitHub issue
and identify the most appropriate existing labels. Use the available
tools to gather information; do not ask for information to be
provided. Do not remove labels titled help wanted or good first issue.
## Steps
@@ -77,13 +79,17 @@ jobs:
## Guidelines
- Only use labels that already exist in the repository.
- Do not add comments or modify the issue content.
- Triage only the current issue.
- Apply only one area/ label.
- Apply only one kind/ label.
- Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
- Only use labels that already exist in the repository
- Do not add comments or modify the issue content
- Triage only the current issue
- Identify only one area/ label
- Identify only one kind/ label
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario
- Reference all shell variables as "${VAR}" (with quotes and braces)
- Output only valid JSON format
- Do not include any explanation or additional text, just the JSON
Categorization Guidelines:
P0: Critical / Blocker
- A P0 bug is a catastrophic failure that demands immediate attention. It represents a complete showstopper for a significant portion of users or for the development process itself.

View File

@@ -0,0 +1,116 @@
name: '📋 Gemini Scheduled Issue Deduplication'
on:
schedule:
- cron: '0 * * * *' # Runs every hour
workflow_dispatch:
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
refresh-embeddings:
if: |-
${{ vars.TRIAGE_DEDUPLICATE_ISSUES != '' && github.repository == 'google-gemini/gemini-cli' }}
permissions:
contents: 'read'
id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings
issues: 'read'
statuses: 'read'
packages: 'read'
timeout-minutes: 20
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
with:
registry: 'ghcr.io'
username: '${{ github.actor }}'
password: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Run Gemini Issue Deduplication Refresh'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_refresh_embeddings'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ISSUE_TITLE: '${{ github.event.issue.title }}'
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"mcpServers": {
"issue_deduplication": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"--network", "host",
"-e", "GITHUB_TOKEN",
"-e", "GEMINI_API_KEY",
"-e", "DATABASE_TYPE",
"-e", "FIRESTORE_DATABASE_ID",
"-e", "GCP_PROJECT",
"-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json",
"-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json",
"ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3"
],
"env": {
"GITHUB_TOKEN": "${GITHUB_TOKEN}",
"GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}",
"DATABASE_TYPE":"firestore",
"GCP_PROJECT": "${FIRESTORE_PROJECT}",
"FIRESTORE_DATABASE_ID": "(default)",
"GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}"
},
"enabled": true,
"timeout": 600000
}
},
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are a database maintenance assistant for a GitHub issue deduplication system.
## Goal
Your sole responsibility is to refresh the embeddings for all open issues in the repository to ensure the deduplication database is up-to-date.
## Steps
1. **Extract Repository Information:** The repository is ${{ github.repository }}.
2. **Refresh Embeddings:** Call the `refresh` tool with the correct `repo`. Do not use the `force` parameter.
3. **Log Output:** Print the JSON output from the `refresh` tool to the logs.
## Guidelines
- Only use the `refresh` tool.
- Do not attempt to find duplicates or modify any issues.
- Your only task is to call the `refresh` tool and log its output.

View File

@@ -14,11 +14,8 @@ defaults:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
packages: 'read'
jobs:
triage-issues:
@@ -70,18 +67,14 @@ jobs:
{
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh label list)",
"run_shell_command(gh issue edit)",
"run_shell_command(gh issue view)",
"run_shell_command(gh issue list)"
"run_shell_command(echo)"
],
"sandbox": false
}
prompt: |-
## Role
You are an issue triage assistant. Analyze issues and apply
You are an issue triage assistant. Analyze issues and identify
appropriate labels. Use the available tools to gather information;
do not ask for information to be provided.
@@ -114,13 +107,15 @@ jobs:
## Guidelines
- Output only valid JSON format
- Do not include any explanation or additional text, just the JSON
- Only use labels that already exist in the repository.
- Do not add comments or modify the issue content.
- Do not remove labels titled help wanted or good first issue.
- Triage only the current issue.
- Apply only one area/ label
- Apply only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Identify only one area/ label
- Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
Categorization Guidelines:
P0: Critical / Blocker

View File

@@ -0,0 +1,98 @@
name: 'Assign Issue on Comment'
on:
issue_comment:
types:
- 'created'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
packages: 'read'
jobs:
self-assign-issue:
if: |-
github.repository == 'google-gemini/gemini-cli' &&
github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '/assign')
runs-on: 'ubuntu-latest'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b'
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
# Add 'assignments' write permission
permission-issues: 'write'
- name: 'Assign issue to user'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |
const issueNumber = context.issue.number;
const commenter = context.actor;
const owner = context.repo.owner;
const repo = context.repo.repo;
const MAX_ISSUES_ASSIGNED = 3;
// Search for open issues already assigned to the commenter in this repo
const { data: assignedIssues } = await github.rest.search.issuesAndPullRequests({
q: `is:issue repo:${owner}/${repo} assignee:${commenter} is:open`
});
if (assignedIssues.total_count >= MAX_ISSUES_ASSIGNED) {
await github.rest.issues.createComment({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: `👋 @${commenter}! You currently have ${assignedIssues.total_count} issues assigned to you. We have a ${MAX_ISSUES_ASSIGNED} max issues assigned at once policy. Once you close out an existing issue it will open up space to take another. You can also unassign yourself from an existing issue but please work on a hand-off if someone is expecting work on that issue.`
});
return; // exit
}
// Check if the issue is already assigned
const issue = await github.rest.issues.get({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
});
if (issue.data.assignees.length > 0) {
// Comment that it's already assigned
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: `@${commenter} Thanks for taking interest but this issue is already assigned. We'd still love to have you contribute. Check out our [Help Wanted](https://github.com/google-gemini/gemini-cli/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22) list for issues where we need some extra attention.`
});
return;
}
// If not taken, assign the user who commented
await github.rest.issues.addAssignees({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
assignees: [commenter]
});
// Post a comment to confirm assignment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: `👋 @${commenter}, you've been assigned to this issue! Thank you for taking the time to contribute. Make sure to check out our [contributing guidelines](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md).`
});

View File

@@ -4,6 +4,8 @@ on:
schedule:
# Runs every day at midnight UTC for the nightly release.
- cron: '0 0 * * *'
# Runs every Tuesday at 23:59 UTC for the preview release.
- cron: '59 23 * * 2'
workflow_dispatch:
inputs:
version:
@@ -25,6 +27,11 @@ on:
required: false
type: 'boolean'
default: false
create_preview_release:
description: 'Auto apply the preview release tag, input version is ignored.'
required: false
type: 'boolean'
default: false
force_skip_tests:
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
required: false
@@ -51,22 +58,30 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
ref: '${{ github.sha }}'
ref: '${{ github.event.inputs.ref || github.sha }}'
fetch-depth: 0
- name: 'Set booleans for simplified logic'
env:
CREATE_NIGHTLY_RELEASE: '${{ github.event.inputs.create_nightly_release }}'
CREATE_PREVIEW_RELEASE: '${{ github.event.inputs.create_preview_release }}'
EVENT_NAME: '${{ github.event_name }}'
CRON: '${{ github.event.schedule }}'
DRY_RUN_INPUT: '${{ github.event.inputs.dry_run }}'
id: 'vars'
run: |-
is_nightly="false"
if [[ "${EVENT_NAME}" == "schedule" || "${CREATE_NIGHTLY_RELEASE}" == "true" ]]; then
if [[ "${CRON}" == "0 0 * * *" || "${CREATE_NIGHTLY_RELEASE}" == "true" ]]; then
is_nightly="true"
fi
echo "is_nightly=${is_nightly}" >> "${GITHUB_OUTPUT}"
is_preview="false"
if [[ "${CRON}" == "59 23 * * 2" || "${CREATE_PREVIEW_RELEASE}" == "true" ]]; then
is_preview="true"
fi
echo "is_preview=${is_preview}" >> "${GITHUB_OUTPUT}"
is_dry_run="false"
if [[ "${DRY_RUN_INPUT}" == "true" ]]; then
is_dry_run="true"
@@ -96,7 +111,9 @@ jobs:
PREVIOUS_TAG=$(node scripts/get-previous-tag.js "$CURRENT_TAG" || echo "")
echo "PREVIOUS_TAG=${PREVIOUS_TAG}" >> "$GITHUB_OUTPUT"
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
MANUAL_VERSION: '${{ inputs.version }}'
- name: 'Run Tests'

2
.gitignore vendored
View File

@@ -47,3 +47,5 @@ packages/vscode-ide-companion/*.vsix
logs/
# GHA credentials
gha-creds-*.json
QWEN.md

20
.prettierignore Normal file
View File

@@ -0,0 +1,20 @@
**/bundle
**/coverage
**/dist
**/.git
**/node_modules
.docker
.DS_Store
.env
.gemini/
.idea
.integration-tests/
*.iml
*.tsbuildinfo
*.vsix
bower_components
eslint.config.js
**/generated
gha-creds-*.json
junit.xml
Thumbs.db

24
.vscode/launch.json vendored
View File

@@ -101,6 +101,24 @@
"env": {
"GEMINI_SANDBOX": "false"
}
},
{
"name": "Attach by Process ID",
"processId": "${command:PickProcess}",
"request": "attach",
"skipFiles": ["<node_internals>/**"],
"type": "node"
},
{
"type": "node",
"request": "launch",
"name": "Debug Current TS File",
"runtimeExecutable": "npx",
"runtimeArgs": ["tsx", "${file}"],
"skipFiles": ["<node_internals>/**"],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"env": {}
}
],
"inputs": [
@@ -115,6 +133,12 @@
"type": "promptString",
"description": "Enter your prompt for non-interactive mode",
"default": "Explain this code"
},
{
"id": "debugPort",
"type": "promptString",
"description": "Enter the debug port number (default: 9229)",
"default": "9229"
}
]
}

View File

@@ -1,5 +1,62 @@
# Changelog
## 0.0.14
- Added plan mode support for task planning
- Fixed unreliable editCorrector that injects extra escape characters
- Fixed task tool dynamic updates
- Added Qwen3-VL-Plus token limits (256K input, 32K output) and highres support
- Enhanced dashScope cache control
## 0.0.13
- Added YOLO mode support for automatic vision model switching with CLI arguments and environment variables.
- Fixed ripgrep lazy loading to resolve VS Code IDE companion startup issues.
- Fixed authentication hang when selecting Qwen OAuth.
- Added OpenAI and Qwen OAuth authentication support to Zed ACP integration.
- Fixed output token limit for Qwen models.
- Fixed Markdown list display issues on Windows.
- Enhanced vision model instructions and documentation.
- Improved authentication method compatibility across different IDE integrations.
## 0.0.12
- Added vision model support for Qwen-OAuth authentication.
- Synced upstream `gemini-cli` to v0.3.4 with numerous improvements and bug fixes.
- Enhanced subagent functionality with system reminders and improved user experience.
- Added tool call type coercion for better compatibility.
- Fixed arrow key navigation issues on Windows.
- Fixed missing tool call chunks for OpenAI logging.
- Fixed system prompt issues to avoid malformed tool calls.
- Fixed terminal flicker when subagent is executing.
- Fixed duplicate subagents configuration when running in home directory.
- Fixed Esc key unable to cancel subagent dialog.
- Added confirmation prompt for `/init` command when context file exists.
- Added `skipLoopDetection` configuration option.
- Fixed `is_background` parameter reset issues.
- Enhanced Windows compatibility with multi-line paste handling.
- Improved subagent documentation and branding consistency.
- Fixed various linting errors and improved code quality.
- Miscellaneous improvements and bug fixes.
## 0.0.11
- Added subagents feature with file-based configuration system for specialized AI assistants.
- Added Welcome Back Dialog with project summary and enhanced quit options.
- Fixed performance issues with SharedTokenManager causing 20-minute delays.
- Fixed tool calls UI issues and improved user experience.
- Fixed credential clearing when switching authentication types.
- Enhanced subagent capabilities to use tools requiring user confirmation.
- Improved ReadManyFiles tool with shared line limits across files.
- Re-implemented tokenLimits class for better compatibility with Qwen and other model types.
- Fixed chunk validation to avoid unnecessary retries.
- Resolved EditTool naming inconsistency causing agent confusion loops.
- Fixed unexpected re-authentication when auth-token is expired.
- Added Terminal Bench integration tests.
- Updated multilingual documentation links in README.
- Fixed various Windows compatibility issues.
- Miscellaneous improvements and bug fixes.
## 0.0.10
- Synced upstream `gemini-cli` to v0.2.1.
@@ -35,7 +92,7 @@
- Added deterministic cache control for the DashScope provider.
- Added option to choose a project-level or global save location.
- Limited `grep` results to 25 items by default.
- `grep` now respects `.geminiignore`.
- `grep` now respects `.qwenignore`.
- Miscellaneous improvements and bug fixes.
## 0.0.7

193
QWEN.md
View File

@@ -1,193 +0,0 @@
## Building and running
Before submitting any changes, it is crucial to validate them by running the full preflight check. This command will build the repository, run all tests, check for type errors, and lint the code.
To run the full suite of checks, execute the following command:
```bash
npm run preflight
```
This single command ensures that your changes meet all the quality gates of the project. While you can run the individual steps (`build`, `test`, `typecheck`, `lint`) separately, it is highly recommended to use `npm run preflight` to ensure a comprehensive validation.
## Writing Tests
This project uses **Vitest** as its primary testing framework. When writing tests, aim to follow existing patterns. Key conventions include:
### Test Structure and Framework
- **Framework**: All tests are written using Vitest (`describe`, `it`, `expect`, `vi`).
- **File Location**: Test files (`*.test.ts` for logic, `*.test.tsx` for React components) are co-located with the source files they test.
- **Configuration**: Test environments are defined in `vitest.config.ts` files.
- **Setup/Teardown**: Use `beforeEach` and `afterEach`. Commonly, `vi.resetAllMocks()` is called in `beforeEach` and `vi.restoreAllMocks()` in `afterEach`.
### Mocking (`vi` from Vitest)
- **ES Modules**: Mock with `vi.mock('module-name', async (importOriginal) => { ... })`. Use `importOriginal` for selective mocking.
- _Example_: `vi.mock('os', async (importOriginal) => { const actual = await importOriginal(); return { ...actual, homedir: vi.fn() }; });`
- **Mocking Order**: For critical dependencies (e.g., `os`, `fs`) that affect module-level constants, place `vi.mock` at the _very top_ of the test file, before other imports.
- **Hoisting**: Use `const myMock = vi.hoisted(() => vi.fn());` if a mock function needs to be defined before its use in a `vi.mock` factory.
- **Mock Functions**: Create with `vi.fn()`. Define behavior with `mockImplementation()`, `mockResolvedValue()`, or `mockRejectedValue()`.
- **Spying**: Use `vi.spyOn(object, 'methodName')`. Restore spies with `mockRestore()` in `afterEach`.
### Commonly Mocked Modules
- **Node.js built-ins**: `fs`, `fs/promises`, `os` (especially `os.homedir()`), `path`, `child_process` (`execSync`, `spawn`).
- **External SDKs**: `@google/genai`, `@modelcontextprotocol/sdk`.
- **Internal Project Modules**: Dependencies from other project packages are often mocked.
### React Component Testing (CLI UI - Ink)
- Use `render()` from `ink-testing-library`.
- Assert output with `lastFrame()`.
- Wrap components in necessary `Context.Provider`s.
- Mock custom React hooks and complex child components using `vi.mock()`.
### Asynchronous Testing
- Use `async/await`.
- For timers, use `vi.useFakeTimers()`, `vi.advanceTimersByTimeAsync()`, `vi.runAllTimersAsync()`.
- Test promise rejections with `await expect(promise).rejects.toThrow(...)`.
### General Guidance
- When adding tests, first examine existing tests to understand and conform to established conventions.
- Pay close attention to the mocks at the top of existing test files; they reveal critical dependencies and how they are managed in a test environment.
## Git Repo
The main branch for this project is called "main"
## JavaScript/TypeScript
When contributing to this React, Node, and TypeScript codebase, please prioritize the use of plain JavaScript objects with accompanying TypeScript interface or type declarations over JavaScript class syntax. This approach offers significant advantages, especially concerning interoperability with React and overall code maintainability.
### Preferring Plain Objects over Classes
JavaScript classes, by their nature, are designed to encapsulate internal state and behavior. While this can be useful in some object-oriented paradigms, it often introduces unnecessary complexity and friction when working with React's component-based architecture. Here's why plain objects are preferred:
- Seamless React Integration: React components thrive on explicit props and state management. Classes' tendency to store internal state directly within instances can make prop and state propagation harder to reason about and maintain. Plain objects, on the other hand, are inherently immutable (when used thoughtfully) and can be easily passed as props, simplifying data flow and reducing unexpected side effects.
- Reduced Boilerplate and Increased Conciseness: Classes often promote the use of constructors, this binding, getters, setters, and other boilerplate that can unnecessarily bloat code. TypeScript interface and type declarations provide powerful static type checking without the runtime overhead or verbosity of class definitions. This allows for more succinct and readable code, aligning with JavaScript's strengths in functional programming.
- Enhanced Readability and Predictability: Plain objects, especially when their structure is clearly defined by TypeScript interfaces, are often easier to read and understand. Their properties are directly accessible, and there's no hidden internal state or complex inheritance chains to navigate. This predictability leads to fewer bugs and a more maintainable codebase.
- Simplified Immutability: While not strictly enforced, plain objects encourage an immutable approach to data. When you need to modify an object, you typically create a new one with the desired changes, rather than mutating the original. This pattern aligns perfectly with React's reconciliation process and helps prevent subtle bugs related to shared mutable state.
- Better Serialization and Deserialization: Plain JavaScript objects are naturally easy to serialize to JSON and deserialize back, which is a common requirement in web development (e.g., for API communication or local storage). Classes, with their methods and prototypes, can complicate this process.
### Embracing ES Module Syntax for Encapsulation
Rather than relying on Java-esque private or public class members, which can be verbose and sometimes limit flexibility, we strongly prefer leveraging ES module syntax (`import`/`export`) for encapsulating private and public APIs.
- Clearer Public API Definition: With ES modules, anything that is exported is part of the public API of that module, while anything not exported is inherently private to that module. This provides a very clear and explicit way to define what parts of your code are meant to be consumed by other modules.
- Enhanced Testability (Without Exposing Internals): By default, unexported functions or variables are not accessible from outside the module. This encourages you to test the public API of your modules, rather than their internal implementation details. If you find yourself needing to spy on or stub an unexported function for testing purposes, it's often a "code smell" indicating that the function might be a good candidate for extraction into its own separate, testable module with a well-defined public API. This promotes a more robust and maintainable testing strategy.
- Reduced Coupling: Explicitly defined module boundaries through import/export help reduce coupling between different parts of your codebase. This makes it easier to refactor, debug, and understand individual components in isolation.
### Avoiding `any` Types and Type Assertions; Preferring `unknown`
TypeScript's power lies in its ability to provide static type checking, catching potential errors before your code runs. To fully leverage this, it's crucial to avoid the `any` type and be judicious with type assertions.
- **The Dangers of `any`**: Using any effectively opts out of TypeScript's type checking for that particular variable or expression. While it might seem convenient in the short term, it introduces significant risks:
- **Loss of Type Safety**: You lose all the benefits of type checking, making it easy to introduce runtime errors that TypeScript would otherwise have caught.
- **Reduced Readability and Maintainability**: Code with `any` types is harder to understand and maintain, as the expected type of data is no longer explicitly defined.
- **Masking Underlying Issues**: Often, the need for any indicates a deeper problem in the design of your code or the way you're interacting with external libraries. It's a sign that you might need to refine your types or refactor your code.
- **Preferring `unknown` over `any`**: When you absolutely cannot determine the type of a value at compile time, and you're tempted to reach for any, consider using unknown instead. unknown is a type-safe counterpart to any. While a variable of type unknown can hold any value, you must perform type narrowing (e.g., using typeof or instanceof checks, or a type assertion) before you can perform any operations on it. This forces you to handle the unknown type explicitly, preventing accidental runtime errors.
```ts
function processValue(value: unknown) {
if (typeof value === 'string') {
// value is now safely a string
console.log(value.toUpperCase());
} else if (typeof value === 'number') {
// value is now safely a number
console.log(value * 2);
}
// Without narrowing, you cannot access properties or methods on 'value'
// console.log(value.someProperty); // Error: Object is of type 'unknown'.
}
```
- **Type Assertions (`as Type`) - Use with Caution**: Type assertions tell the TypeScript compiler, "Trust me, I know what I'm doing; this is definitely of this type." While there are legitimate use cases (e.g., when dealing with external libraries that don't have perfect type definitions, or when you have more information than the compiler), they should be used sparingly and with extreme caution.
- **Bypassing Type Checking**: Like `any`, type assertions bypass TypeScript's safety checks. If your assertion is incorrect, you introduce a runtime error that TypeScript would not have warned you about.
- **Code Smell in Testing**: A common scenario where `any` or type assertions might be tempting is when trying to test "private" implementation details (e.g., spying on or stubbing an unexported function within a module). This is a strong indication of a "code smell" in your testing strategy and potentially your code structure. Instead of trying to force access to private internals, consider whether those internal details should be refactored into a separate module with a well-defined public API. This makes them inherently testable without compromising encapsulation.
### Type narrowing `switch` clauses
Use the `checkExhaustive` helper in the default clause of a switch statement.
This will ensure that all of the possible options within the value or
enumeration are used.
This helper method can be found in `packages/cli/src/utils/checks.ts`
### Embracing JavaScript's Array Operators
To further enhance code cleanliness and promote safe functional programming practices, leverage JavaScript's rich set of array operators as much as possible. Methods like `.map()`, `.filter()`, `.reduce()`, `.slice()`, `.sort()`, and others are incredibly powerful for transforming and manipulating data collections in an immutable and declarative way.
Using these operators:
- Promotes Immutability: Most array operators return new arrays, leaving the original array untouched. This functional approach helps prevent unintended side effects and makes your code more predictable.
- Improves Readability: Chaining array operators often lead to more concise and expressive code than traditional for loops or imperative logic. The intent of the operation is clear at a glance.
- Facilitates Functional Programming: These operators are cornerstones of functional programming, encouraging the creation of pure functions that take inputs and produce outputs without causing side effects. This paradigm is highly beneficial for writing robust and testable code that pairs well with React.
By consistently applying these principles, we can maintain a codebase that is not only efficient and performant but also a joy to work with, both now and in the future.
## React (mirrored and adjusted from [react-mcp-server](https://github.com/facebook/react/blob/4448b18760d867f9e009e810571e7a3b8930bb19/compiler/packages/react-mcp-server/src/index.ts#L376C1-L441C94))
### Role
You are a React assistant that helps users write more efficient and optimizable React code. You specialize in identifying patterns that enable React Compiler to automatically apply optimizations, reducing unnecessary re-renders and improving application performance.
### Follow these guidelines in all code you produce and suggest
Use functional components with Hooks: Do not generate class components or use old lifecycle methods. Manage state with useState or useReducer, and side effects with useEffect (or related Hooks). Always prefer functions and Hooks for any new component logic.
Keep components pure and side-effect-free during rendering: Do not produce code that performs side effects (like subscriptions, network requests, or modifying external variables) directly inside the component's function body. Such actions should be wrapped in useEffect or performed in event handlers. Ensure your render logic is a pure function of props and state.
Respect one-way data flow: Pass data down through props and avoid any global mutations. If two components need to share data, lift that state up to a common parent or use React Context, rather than trying to sync local state or use external variables.
Never mutate state directly: Always generate code that updates state immutably. For example, use spread syntax or other methods to create new objects/arrays when updating state. Do not use assignments like state.someValue = ... or array mutations like array.push() on state variables. Use the state setter (setState from useState, etc.) to update state.
Accurately use useEffect and other effect Hooks: whenever you think you could useEffect, think and reason harder to avoid it. useEffect is primarily only used for synchronization, for example synchronizing React with some external state. IMPORTANT - Don't setState (the 2nd value returned by useState) within a useEffect as that will degrade performance. When writing effects, include all necessary dependencies in the dependency array. Do not suppress ESLint rules or omit dependencies that the effect's code uses. Structure the effect callbacks to handle changing values properly (e.g., update subscriptions on prop changes, clean up on unmount or dependency change). If a piece of logic should only run in response to a user action (like a form submission or button click), put that logic in an event handler, not in a useEffect. Where possible, useEffects should return a cleanup function.
Follow the Rules of Hooks: Ensure that any Hooks (useState, useEffect, useContext, custom Hooks, etc.) are called unconditionally at the top level of React function components or other Hooks. Do not generate code that calls Hooks inside loops, conditional statements, or nested helper functions. Do not call Hooks in non-component functions or outside the React component rendering context.
Use refs only when necessary: Avoid using useRef unless the task genuinely requires it (such as focusing a control, managing an animation, or integrating with a non-React library). Do not use refs to store application state that should be reactive. If you do use refs, never write to or read from ref.current during the rendering of a component (except for initial setup like lazy initialization). Any ref usage should not affect the rendered output directly.
Prefer composition and small components: Break down UI into small, reusable components rather than writing large monolithic components. The code you generate should promote clarity and reusability by composing components together. Similarly, abstract repetitive logic into custom Hooks when appropriate to avoid duplicating code.
Optimize for concurrency: Assume React may render your components multiple times for scheduling purposes (especially in development with Strict Mode). Write code that remains correct even if the component function runs more than once. For instance, avoid side effects in the component body and use functional state updates (e.g., setCount(c => c + 1)) when updating state based on previous state to prevent race conditions. Always include cleanup functions in effects that subscribe to external resources. Don't write useEffects for "do this when this changes" side effects. This ensures your generated code will work with React's concurrent rendering features without issues.
Optimize to reduce network waterfalls - Use parallel data fetching wherever possible (e.g., start multiple requests at once rather than one after another). Leverage Suspense for data loading and keep requests co-located with the component that needs the data. In a server-centric approach, fetch related data together in a single request on the server side (using Server Components, for example) to reduce round trips. Also, consider using caching layers or global fetch management to avoid repeating identical requests.
Rely on React Compiler - useMemo, useCallback, and React.memo can be omitted if React Compiler is enabled. Avoid premature optimization with manual memoization. Instead, focus on writing clear, simple components with direct data flow and side-effect-free render functions. Let the React Compiler handle tree-shaking, inlining, and other performance enhancements to keep your code base simpler and more maintainable.
Design for a good user experience - Provide clear, minimal, and non-blocking UI states. When data is loading, show lightweight placeholders (e.g., skeleton screens) rather than intrusive spinners everywhere. Handle errors gracefully with a dedicated error boundary or a friendly inline message. Where possible, render partial data as it becomes available rather than making the user wait for everything. Suspense allows you to declare the loading states in your component tree in a natural way, preventing “flash” states and improving perceived performance.
### Process
1. Analyze the user's code for optimization opportunities:
- Check for React anti-patterns that prevent compiler optimization
- Look for component structure issues that limit compiler effectiveness
- Think about each suggestion you are making and consult React docs for best practices
2. Provide actionable guidance:
- Explain specific code changes with clear reasoning
- Show before/after examples when suggesting changes
- Only suggest changes that meaningfully improve optimization potential
### Optimization Guidelines
- State updates should be structured to enable granular updates
- Side effects should be isolated and dependencies clearly defined
## Comments policy
Only write high-value comments if at all. Avoid talking to the user through comments.
## General style requirements
Use hyphens instead of underscores in flag names (e.g. `my-flag` instead of `my_flag`).

View File

@@ -1,162 +0,0 @@
# Gemini CLI
[![Gemini CLI CI](https://github.com/google-gemini/gemini-cli/actions/workflows/ci.yml/badge.svg)](https://github.com/google-gemini/gemini-cli/actions/workflows/ci.yml)
![Gemini CLI Screenshot](./docs/assets/gemini-screenshot.png)
This repository contains the Gemini CLI, a command-line AI workflow tool that connects to your
tools, understands your code and accelerates your workflows.
With the Gemini CLI you can:
- Query and edit large codebases in and beyond Gemini's 1M token context window.
- Generate new apps from PDFs or sketches, using Gemini's multimodal capabilities.
- Automate operational tasks, like querying pull requests or handling complex rebases.
- Use tools and MCP servers to connect new capabilities, including [media generation with Imagen,
Veo or Lyria](https://github.com/GoogleCloudPlatform/vertex-ai-creative-studio/tree/main/experiments/mcp-genmedia)
- Ground your queries with the [Google Search](https://ai.google.dev/gemini-api/docs/grounding)
tool, built in to Gemini.
## Quickstart
1. **Prerequisites:** Ensure you have [Node.js version 20](https://nodejs.org/en/download) or higher installed.
2. **Run the CLI:** Execute the following command in your terminal:
```bash
npx https://github.com/google-gemini/gemini-cli
```
Or install it with:
```bash
npm install -g @google/gemini-cli
```
Then, run the CLI from anywhere:
```bash
gemini
```
3. **Pick a color theme**
4. **Authenticate:** When prompted, sign in with your personal Google account. This will grant you up to 60 model requests per minute and 1,000 model requests per day using Gemini.
You are now ready to use the Gemini CLI!
### Use a Gemini API key:
The Gemini API provides a free tier with [100 requests per day](https://ai.google.dev/gemini-api/docs/rate-limits#free-tier) using Gemini 2.5 Pro, control over which model you use, and access to higher rate limits (with a paid plan):
1. Generate a key from [Google AI Studio](https://aistudio.google.com/apikey).
2. Set it as an environment variable in your terminal. Replace `YOUR_API_KEY` with your generated key.
```bash
export GEMINI_API_KEY="YOUR_API_KEY"
```
3. (Optionally) Upgrade your Gemini API project to a paid plan on the API key page (will automatically unlock [Tier 1 rate limits](https://ai.google.dev/gemini-api/docs/rate-limits#tier-1))
### Use a Vertex AI API key:
The Vertex AI API provides a [free tier](https://cloud.google.com/vertex-ai/generative-ai/docs/start/express-mode/overview) using express mode for Gemini 2.5 Pro, control over which model you use, and access to higher rate limits with a billing account:
1. Generate a key from [Google Cloud](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys).
2. Set it as an environment variable in your terminal. Replace `YOUR_API_KEY` with your generated key and set GOOGLE_GENAI_USE_VERTEXAI to true
```bash
export GOOGLE_API_KEY="YOUR_API_KEY"
export GOOGLE_GENAI_USE_VERTEXAI=true
```
3. (Optionally) Add a billing account on your project to get access to [higher usage limits](https://cloud.google.com/vertex-ai/generative-ai/docs/quotas)
For other authentication methods, including Google Workspace accounts, see the [authentication](./docs/cli/authentication.md) guide.
## Examples
Once the CLI is running, you can start interacting with Gemini from your shell.
You can start a project from a new directory:
```sh
cd new-project/
gemini
> Write me a Gemini Discord bot that answers questions using a FAQ.md file I will provide
```
Or work with an existing project:
```sh
git clone https://github.com/google-gemini/gemini-cli
cd gemini-cli
gemini
> Give me a summary of all of the changes that went in yesterday
```
### Next steps
- Learn how to [contribute to or build from the source](./CONTRIBUTING.md).
- Explore the available **[CLI Commands](./docs/cli/commands.md)**.
- If you encounter any issues, review the **[troubleshooting guide](./docs/troubleshooting.md)**.
- For more comprehensive documentation, see the [full documentation](./docs/index.md).
- Take a look at some [popular tasks](#popular-tasks) for more inspiration.
- Check out our **[Official Roadmap](./ROADMAP.md)**
### Troubleshooting
Head over to the [troubleshooting guide](docs/troubleshooting.md) if you're
having issues.
## Popular tasks
### Explore a new codebase
Start by `cd`ing into an existing or newly-cloned repository and running `gemini`.
```text
> Describe the main pieces of this system's architecture.
```
```text
> What security mechanisms are in place?
```
### Work with your existing code
```text
> Implement a first draft for GitHub issue #123.
```
```text
> Help me migrate this codebase to the latest version of Java. Start with a plan.
```
### Automate your workflows
Use MCP servers to integrate your local system tools with your enterprise collaboration suite.
```text
> Make me a slide deck showing the git history from the last 7 days, grouped by feature and team member.
```
```text
> Make a full-screen web app for a wall display to show our most interacted-with GitHub issues.
```
### Interact with your system
```text
> Convert all the images in this directory to png, and rename them to use dates from the exif data.
```
```text
> Organize my PDF invoices by month of expenditure.
```
### Uninstall
Head over to the [Uninstall](docs/Uninstall.md) guide for uninstallation instructions.
## Terms of Service and Privacy Notice
For details on the terms of service and privacy notice applicable to your use of Gemini CLI, see the [Terms of Service and Privacy Notice](./docs/tos-privacy.md).

View File

@@ -54,6 +54,7 @@ For detailed setup instructions, see [Authorization](#authorization).
- **Code Understanding & Editing** - Query and edit large codebases beyond traditional context window limits
- **Workflow Automation** - Automate operational tasks like handling pull requests and complex rebases
- **Enhanced Parser** - Adapted parser specifically optimized for Qwen-Coder models
- **Vision Model Support** - Automatically detect images in your input and seamlessly switch to vision-capable models for multimodal analysis
## Installation
@@ -121,6 +122,58 @@ Create or edit `.qwen/settings.json` in your home directory:
> 📝 **Note**: Session token limit applies to a single conversation, not cumulative API calls.
### Vision Model Configuration
Qwen Code includes intelligent vision model auto-switching that detects images in your input and can automatically switch to vision-capable models for multimodal analysis. **This feature is enabled by default** - when you include images in your queries, you'll see a dialog asking how you'd like to handle the vision model switch.
#### Skip the Switch Dialog (Optional)
If you don't want to see the interactive dialog each time, configure the default behavior in your `.qwen/settings.json`:
```json
{
"experimental": {
"vlmSwitchMode": "once"
}
}
```
**Available modes:**
- **`"once"`** - Switch to vision model for this query only, then revert
- **`"session"`** - Switch to vision model for the entire session
- **`"persist"`** - Continue with current model (no switching)
- **Not set** - Show interactive dialog each time (default)
#### Command Line Override
You can also set the behavior via command line:
```bash
# Switch once per query
qwen --vlm-switch-mode once
# Switch for entire session
qwen --vlm-switch-mode session
# Never switch automatically
qwen --vlm-switch-mode persist
```
#### Disable Vision Models (Optional)
To completely disable vision model support, add to your `.qwen/settings.json`:
```json
{
"experimental": {
"visionModelPreview": false
}
}
```
> 💡 **Tip**: In YOLO mode (`--yolo`), vision switching happens automatically without prompts when images are detected.
### Authorization
Choose your preferred authentication method based on your needs:

View File

@@ -1,63 +0,0 @@
# Qwen CLI Roadmap
The [Official Gemini CLI Roadmap](https://github.com/orgs/google-gemini/projects/11/)
Gemini CLI is an open-source AI agent that brings the power of Gemini directly into your terminal. It provides lightweight access to Gemini, giving you the most direct path from your prompt to our model.
This document outlines our approach to the Gemini CLI roadmap. Here, you'll find our guiding principles and a breakdown of the key areas we are
focused on for development. Our roadmap is not a static list but a dynamic set of priorities that are tracked live in our GitHub Issues.
As an [Apache 2.0 open source project](https://github.com/google-gemini/gemini-cli?tab=Apache-2.0-1-ov-file#readme), we appreciate and welcome [public contributions](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md), and will give first priority to those contributions aligned with our roadmap. If you want to propose a new feature or change to our roadmap, please start by [opening an issue for discussion](https://github.com/google-gemini/gemini-cli/issues/new/choose).
## Disclaimer
This roadmap represents our current thinking and is for informational purposes only. It is not a commitment or a guarantee of future delivery. The development, release, and timing of any features are subject to change, and we may update the roadmap based on community discussions as well as when our priorities evolve.
## Guiding Principles
Our development is guided by the following principles:
- **Power & Simplicity:** Deliver access to state-of-the-art Gemini models with an intuitive and easy-to-use lightweight command-line interface.
- **Extensibility:** An adaptable agent to help you with a variety of use cases and environments along with the ability to run these agents anywhere.
- **Intelligent:** Gemini CLI should be reliably ranked among the best agentic tools as measured by benchmarks like SWE Bench, Terminal Bench, and CSAT.
- **Free and Open Source:** Foster a thriving open source community where cost isnt a barrier to personal use, and PRs get merged quickly. This means resolving and closing issues, pull requests, and discussion posts quickly.
## How the Roadmap Works
Our roadmap is managed directly through Github Issues. See our entry point Roadmap Issue [here](https://github.com/google-gemini/gemini-cli/issues/4191). This approach allows for transparency and gives you a direct way to learn more or get involved with any specific initiative. All our roadmap items will be tagged as Type:`Feature` and Label:`maintainer` for features we are actively working on, or Type:`Task` and Label:`maintainer` for a more detailed list of tasks.
Issues are organized to provide key information at a glance:
- **Target Quarter:** `Milestone` denotes the anticipated delivery timeline.
- **Feature Area:** Labels such as `area/model` or `area/tooling` categorizes the work.
- **Issue Type:** _Workstream_ => _Epics_ => _Features_ => _Tasks|Bugs_
To see what we're working on, you can filter our issues by these dimensions. See all our items [here](https://github.com/orgs/google-gemini/projects/11/views/19)
## Focus Areas
To better organize our efforts, we categorize our work into several key feature areas. These labels are used on our GitHub Issues to help you filter and
find initiatives that interest you.
- **Authentication:** Secure user access via API keys, Gemini Code Assist login etc.
- **Model:** Support new Gemini models, multi-modality, local execution, and performance tuning.
- **User Experience:** Improve the CLI's usability, performance, interactive features, and documentation.
- **Tooling:** Built-in tools and the MCP ecosystem.
- **Core:** Core functionality of the CLI
- **Extensibility:** Bringing Gemini CLI to other surfaces e.g. GitHub.
- **Contribution:** Improve the contribution process via test automation and CI/CD pipeline enhancements.
- **Platform:** Manage installation, OS support, and the underlying CLI framework.
- **Quality:** Focus on testing, reliability, performance, and overall product quality.
- **Background Agents:** Enable long-running, autonomous tasks and proactive assistance.
- **Security and Privacy:** For all things related to security and privacy
## How to Contribute
Gemini CLI is an open-source project, and we welcome contributions from the community! Whether you're a developer, a designer, or just an enthusiastic user you can find our [Community Guidelines here](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) to learn how to get started. There are many ways to get involved:
- **Roadmap:** Please review and find areas in our [roadmap](https://github.com/google-gemini/gemini-cli/issues/4191) that you would like to contribute to. Contributions based on this will be easiest to integrate with.
- **Report Bugs:** If you find an issue, please create a [bug](https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml) with as much detail as possible. If you believe it is a critical breaking issue preventing direct CLI usage, please tag it as `priority/p0`.
- **Suggest Features:** Have a great idea? We'd love to hear it! Open a [feature request](https://github.com/google-gemini/gemini-cli/issues/new?template=feature_request.yml).
- **Contribute Code:** Check out our [CONTRIBUTING.md](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) file for guidelines on how to submit pull requests. We have a list of "good first issues" for new contributors.
- **Write Documentation:** Help us improve our documentation, tutorials, and examples.
We are excited about the future of Gemini CLI and look forward to building it with you!

Binary file not shown.

After

Width:  |  Height:  |  Size: 381 KiB

View File

@@ -18,8 +18,8 @@ Slash commands provide meta-level control over the CLI itself.
- **Description:** Saves the current conversation history. You must add a `<tag>` for identifying the conversation state.
- **Usage:** `/chat save <tag>`
- **Details on Checkpoint Location:** The default locations for saved chat checkpoints are:
- Linux/macOS: `~/.config/qwen-code/checkpoints/`
- Windows: `C:\Users\<YourUsername>\AppData\Roaming\qwen-code\checkpoints\`
- Linux/macOS: `~/.qwen/tmp/<project_hash>/`
- Windows: `C:\Users\<YourUsername>\.qwen\tmp\<project_hash>\`
- When you run `/chat list`, the CLI only scans these specific directories to find available checkpoints.
- **Note:** These checkpoints are for manually saving and resuming conversation states. For automatic checkpoints created before file modifications, see the [Checkpointing documentation](../checkpointing.md).
- **`resume`**
@@ -124,6 +124,18 @@ Slash commands provide meta-level control over the CLI itself.
- **`/auth`**
- **Description:** Open a dialog that lets you change the authentication method.
- **`/approval-mode`**
- **Description:** Change the approval mode for tool usage.
- **Usage:** `/approval-mode [mode] [--session|--project|--user]`
- **Available Modes:**
- **`plan`**: Analyze only; do not modify files or execute commands
- **`default`**: Require approval for file edits or shell commands
- **`auto-edit`**: Automatically approve file edits
- **`yolo`**: Automatically approve all tools
- **Examples:**
- `/approval-mode plan --project` (persist plan mode for this project)
- `/approval-mode yolo --user` (persist YOLO mode for this user across projects)
- **`/about`**
- **Description:** Show version info. Please share this information when filing issues.
@@ -143,6 +155,7 @@ Slash commands provide meta-level control over the CLI itself.
- [**`/tools`**](../tools/index.md)
- **Description:** Display a list of tools that are currently available within Qwen Code.
- **Usage:** `/tools [desc]`
- **Sub-commands:**
- **`desc`** or **`descriptions`**:
- **Description:** Show detailed descriptions of each tool, including each tool's name with its full description as provided to the model.
@@ -313,7 +326,7 @@ When a custom command attempts to execute a shell command, Qwen Code will now pr
1. **Inject Commands:** Use the `!{...}` syntax.
2. **Argument Substitution:** If `{{args}}` is present inside the block, it is automatically shell-escaped (see [Context-Aware Injection](#1-context-aware-injection-with-args) above).
3. **Robust Parsing:** The parser correctly handles complex shell commands that include nested braces, such as JSON payloads.
3. **Robust Parsing:** The parser correctly handles complex shell commands that include nested braces, such as JSON payloads. **Note:** The content inside `!{...}` must have balanced braces (`{` and `}`). If you need to execute a command containing unbalanced braces, consider wrapping it in an external script file and calling the script within the `!{...}` block.
4. **Security Check and Confirmation:** The CLI performs a security check on the final, resolved command (after arguments are escaped and substituted). A dialog will appear showing the exact command(s) to be executed.
5. **Execution and Error Reporting:** The command is executed. If the command fails, the output injected into the prompt will include the error messages (stderr) followed by a status line, e.g., `[Shell command exited with code 1]`. This helps the model understand the context of the failure.
@@ -341,6 +354,41 @@ Please generate a Conventional Commit message based on the following git diff:
When you run `/git:commit`, the CLI first executes `git diff --staged`, then replaces `!{git diff --staged}` with the output of that command before sending the final, complete prompt to the model.
##### 4. Injecting File Content with `@{...}`
You can directly embed the content of a file or a directory listing into your prompt using the `@{...}` syntax. This is useful for creating commands that operate on specific files.
**How It Works:**
- **File Injection**: `@{path/to/file.txt}` is replaced by the content of `file.txt`.
- **Multimodal Support**: If the path points to a supported image (e.g., PNG, JPEG), PDF, audio, or video file, it will be correctly encoded and injected as multimodal input. Other binary files are handled gracefully and skipped.
- **Directory Listing**: `@{path/to/dir}` is traversed and each file present within the directory and all subdirectories are inserted into the prompt. This respects `.gitignore` and `.qwenignore` if enabled.
- **Workspace-Aware**: The command searches for the path in the current directory and any other workspace directories. Absolute paths are allowed if they are within the workspace.
- **Processing Order**: File content injection with `@{...}` is processed _before_ shell commands (`!{...}`) and argument substitution (`{{args}}`).
- **Parsing**: The parser requires the content inside `@{...}` (the path) to have balanced braces (`{` and `}`).
**Example (`review.toml`):**
This command injects the content of a _fixed_ best practices file (`docs/best-practices.md`) and uses the user's arguments to provide context for the review.
```toml
# In: <project>/.qwen/commands/review.toml
# Invoked via: /review FileCommandLoader.ts
description = "Reviews the provided context using a best practice guide."
prompt = """
You are an expert code reviewer.
Your task is to review {{args}}.
Use the following best practices when providing your review:
@{docs/best-practices.md}
"""
```
When you run `/review FileCommandLoader.ts`, the `@{docs/best-practices.md}` placeholder is replaced by the content of that file, and `{{args}}` is replaced by the text you provided, before the final prompt is sent to the model.
---
#### Example: A "Pure Function" Refactoring Command

View File

@@ -7,16 +7,20 @@ Qwen Code offers several ways to configure its behavior, including environment v
Configuration is applied in the following order of precedence (lower numbers are overridden by higher numbers):
1. **Default values:** Hardcoded defaults within the application.
2. **User settings file:** Global settings for the current user.
3. **Project settings file:** Project-specific settings.
4. **System settings file:** System-wide settings.
5. **Environment variables:** System-wide or session-specific variables, potentially loaded from `.env` files.
6. **Command-line arguments:** Values passed when launching the CLI.
2. **System defaults file:** System-wide default settings that can be overridden by other settings files.
3. **User settings file:** Global settings for the current user.
4. **Project settings file:** Project-specific settings.
5. **System settings file:** System-wide settings that override all other settings files.
6. **Environment variables:** System-wide or session-specific variables, potentially loaded from `.env` files.
7. **Command-line arguments:** Values passed when launching the CLI.
## Settings files
Qwen Code uses `settings.json` files for persistent configuration. There are three locations for these files:
Qwen Code uses JSON settings files for persistent configuration. There are four locations for these files:
- **System defaults file:**
- **Location:** `/etc/qwen-code/system-defaults.json` (Linux), `C:\ProgramData\qwen-code\system-defaults.json` (Windows) or `/Library/Application Support/QwenCode/system-defaults.json` (macOS). The path can be overridden using the `QWEN_CODE_SYSTEM_DEFAULTS_PATH` environment variable.
- **Scope:** Provides a base layer of system-wide default settings. These settings have the lowest precedence and are intended to be overridden by user, project, or system override settings.
- **User settings file:**
- **Location:** `~/.qwen/settings.json` (where `~` is your home directory).
- **Scope:** Applies to all Qwen Code sessions for the current user.
@@ -61,19 +65,36 @@ In addition to a project settings file, a project's `.qwen` directory can contai
- **Properties:**
- **`respectGitIgnore`** (boolean): Whether to respect .gitignore patterns when discovering files. When set to `true`, git-ignored files (like `node_modules/`, `dist/`, `.env`) are automatically excluded from @ commands and file listing operations.
- **`enableRecursiveFileSearch`** (boolean): Whether to enable searching recursively for filenames under the current tree when completing @ prefixes in the prompt.
- **`disableFuzzySearch`** (boolean): When `true`, disables the fuzzy search capabilities when searching for files, which can improve performance on projects with a large number of files.
- **Example:**
```json
"fileFiltering": {
"respectGitIgnore": true,
"enableRecursiveFileSearch": false
"enableRecursiveFileSearch": false,
"disableFuzzySearch": true
}
```
### Troubleshooting File Search Performance
If you are experiencing performance issues with file searching (e.g., with `@` completions), especially in projects with a very large number of files, here are a few things you can try in order of recommendation:
1. **Use `.qwenignore`:** Create a `.qwenignore` file in your project root to exclude directories that contain a large number of files that you don't need to reference (e.g., build artifacts, logs, `node_modules`). Reducing the total number of files crawled is the most effective way to improve performance.
2. **Disable Fuzzy Search:** If ignoring files is not enough, you can disable fuzzy search by setting `disableFuzzySearch` to `true` in your `settings.json` file. This will use a simpler, non-fuzzy matching algorithm, which can be faster.
3. **Disable Recursive File Search:** As a last resort, you can disable recursive file search entirely by setting `enableRecursiveFileSearch` to `false`. This will be the fastest option as it avoids a recursive crawl of your project. However, it means you will need to type the full path to files when using `@` completions.
- **`coreTools`** (array of strings):
- **Description:** Allows you to specify a list of core tool names that should be made available to the model. This can be used to restrict the set of built-in tools. See [Built-in Tools](../core/tools-api.md#built-in-tools) for a list of core tools. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"coreTools": ["ShellTool(ls -l)"]` will only allow the `ls -l` command to be executed.
- **Default:** All tools available for use by the model.
- **Example:** `"coreTools": ["ReadFileTool", "GlobTool", "ShellTool(ls)"]`.
- **`allowedTools`** (array of strings):
- **Default:** `undefined`
- **Description:** A list of tool names that will bypass the confirmation dialog. This is useful for tools that you trust and use frequently. The match semantics are the same as `coreTools`.
- **Example:** `"allowedTools": ["ShellTool(git status)"]`.
- **`excludeTools`** (array of strings):
- **Description:** Allows you to specify a list of core tool names that should be excluded from the model. A tool listed in both `excludeTools` and `coreTools` is excluded. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"excludeTools": ["ShellTool(rm -rf)"]` will block the `rm -rf` command.
- **Default**: No tools excluded.
@@ -115,12 +136,12 @@ In addition to a project settings file, a project's `.qwen` directory can contai
- **Example:** `"sandbox": "docker"`
- **`toolDiscoveryCommand`** (string):
- **Description:** Defines a custom shell command for discovering tools from your project. The shell command must return on `stdout` a JSON array of [function declarations](https://ai.google.dev/gemini-api/docs/function-calling#function-declarations). Tool wrappers are optional.
- **Description:** **Align with Gemini CLI.** Defines a custom shell command for discovering tools from your project. The shell command must return on `stdout` a JSON array of [function declarations](https://ai.google.dev/gemini-api/docs/function-calling#function-declarations). Tool wrappers are optional.
- **Default:** Empty
- **Example:** `"toolDiscoveryCommand": "bin/get_tools"`
- **`toolCallCommand`** (string):
- **Description:** Defines a custom shell command for calling a specific tool that was discovered using `toolDiscoveryCommand`. The shell command must meet the following criteria:
- **Description:** **Align with Gemini CLI.** Defines a custom shell command for calling a specific tool that was discovered using `toolDiscoveryCommand`. The shell command must meet the following criteria:
- It must take function `name` (exactly as in [function declaration](https://ai.google.dev/gemini-api/docs/function-calling#function-declarations)) as first command line argument.
- It must read function arguments as JSON on `stdin`, analogous to [`functionCall.args`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functioncall).
- It must return function output as JSON on `stdout`, analogous to [`functionResponse.response.content`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functionresponse).
@@ -268,7 +289,7 @@ In addition to a project settings file, a project's `.qwen` directory can contai
```
- **`includeDirectories`** (array of strings):
- **Description:** Specifies an array of additional absolute or relative paths to include in the workspace context. This allows you to work with files across multiple directories as if they were one. Paths can use `~` to refer to the user's home directory. This setting can be combined with the `--include-directories` command-line flag.
- **Description:** Specifies an array of additional absolute or relative paths to include in the workspace context. Missing directories will be skipped with a warning by default. Paths can use `~` to refer to the user's home directory. This setting can be combined with the `--include-directories` command-line flag.
- **Default:** `[]`
- **Example:**
```json
@@ -311,6 +332,48 @@ In addition to a project settings file, a project's `.qwen` directory can contai
"showLineNumbers": false
```
- **`accessibility`** (object):
- **Description:** Configures accessibility features for the CLI.
- **Properties:**
- **`screenReader`** (boolean): Enables screen reader mode, which adjusts the TUI for better compatibility with screen readers. This can also be enabled with the `--screen-reader` command-line flag, which will take precedence over the setting.
- **`disableLoadingPhrases`** (boolean): Disables the display of loading phrases during operations.
- **Default:** `{"screenReader": false, "disableLoadingPhrases": false}`
- **Example:**
```json
"accessibility": {
"screenReader": true,
"disableLoadingPhrases": true
}
```
- **`skipNextSpeakerCheck`** (boolean):
- **Description:** Skips the next speaker check after text responses. When enabled, the system bypasses analyzing whether the AI should continue speaking.
- **Default:** `false`
- **Example:**
```json
"skipNextSpeakerCheck": true
```
- **`skipLoopDetection`** (boolean):
- **Description:** Disables all loop detection checks (streaming and LLM-based). Loop detection prevents infinite loops in AI responses but can generate false positives that interrupt legitimate workflows. Enable this option if you experience frequent false positive loop detection interruptions.
- **Default:** `false`
- **Example:**
```json
"skipLoopDetection": true
```
- **`approvalMode`** (string):
- **Description:** Sets the default approval mode for tool usage. Accepted values are:
- `plan`: Analyze only, do not modify files or execute commands.
- `default`: Require approval before file edits or shell commands run.
- `auto-edit`: Automatically approve file edits.
- `yolo`: Automatically approve all tool calls.
- **Default:** `"default"`
- **Example:**
```json
"approvalMode": "plan"
```
### Example `settings.json`:
```json
@@ -338,6 +401,8 @@ In addition to a project settings file, a project's `.qwen` directory can contai
"usageStatisticsEnabled": true,
"hideTips": false,
"hideBanner": false,
"skipNextSpeakerCheck": false,
"skipLoopDetection": false,
"maxSessionTurns": 10,
"summarizeToolOutput": {
"run_shell_command": {
@@ -433,12 +498,16 @@ Arguments passed directly when running the CLI can override other configurations
- **`--yolo`**:
- Enables YOLO mode, which automatically approves all tool calls.
- **`--approval-mode <mode>`**:
- Sets the approval mode for tool calls. Available modes:
- `default`: Prompt for approval on each tool call (default behavior)
- `auto_edit`: Automatically approve edit tools (edit, write_file) while prompting for others
- `yolo`: Automatically approve all tool calls (equivalent to `--yolo`)
- Sets the approval mode for tool calls. Supported modes:
- `plan`: Analyze only—do not modify files or execute commands.
- `default`: Require approval for file edits or shell commands (default behavior).
- `auto-edit`: Automatically approve edit tools (edit, write_file) while prompting for others.
- `yolo`: Automatically approve all tool calls (equivalent to `--yolo`).
- Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach.
- Example: `qwen --approval-mode auto_edit`
- Example: `qwen --approval-mode auto-edit`
- **`--allowed-tools <tool1,tool2,...>`**:
- A comma-separated list of tool names that will bypass the confirmation dialog.
- Example: `qwen --allowed-tools "ShellTool(git status)"`
- **`--telemetry`**:
- Enables [telemetry](../telemetry.md).
- **`--telemetry-target`**:
@@ -465,6 +534,8 @@ Arguments passed directly when running the CLI can override other configurations
- Can be specified multiple times or as comma-separated values.
- 5 directories can be added at maximum.
- Example: `--include-directories /path/to/project1,/path/to/project2` or `--include-directories /path/to/project1 --include-directories /path/to/project2`
- **`--screen-reader`**:
- Enables screen reader mode for accessibility.
- **`--version`**:
- Displays the version of the CLI.
- **`--openai-logging`**:

View File

@@ -28,6 +28,8 @@ Qwen Code comes with a selection of pre-defined themes, which you can list using
3. Using the arrow keys, select a theme. Some interfaces might offer a live preview or highlight as you select.
4. Confirm your selection to apply the theme.
**Note:** If a theme is defined in your `settings.json` file (either by name or by a file path), you must remove the `"theme"` setting from the file before you can change the theme using the `/theme` command.
### Theme Persistence
Selected themes are saved in Qwen Code's [configuration](./configuration.md) so your preference is remembered across sessions.
@@ -105,6 +107,46 @@ You can use either hex codes (e.g., `#FF0000`) **or** standard CSS color names (
You can define multiple custom themes by adding more entries to the `customThemes` object.
### Loading Themes from a File
In addition to defining custom themes in `settings.json`, you can also load a theme directly from a JSON file by specifying the file path in your `settings.json`. This is useful for sharing themes or keeping them separate from your main configuration.
To load a theme from a file, set the `theme` property in your `settings.json` to the path of your theme file:
```json
{
"theme": "/path/to/your/theme.json"
}
```
The theme file must be a valid JSON file that follows the same structure as a custom theme defined in `settings.json`.
**Example `my-theme.json`:**
```json
{
"name": "My File Theme",
"type": "custom",
"Background": "#282A36",
"Foreground": "#F8F8F2",
"LightBlue": "#82AAFF",
"AccentBlue": "#61AFEF",
"AccentPurple": "#BD93F9",
"AccentCyan": "#8BE9FD",
"AccentGreen": "#50FA7B",
"AccentYellow": "#F1FA8C",
"AccentRed": "#FF5555",
"Comment": "#6272A4",
"Gray": "#ABB2BF",
"DiffAdded": "#A6E3A1",
"DiffRemoved": "#F38BA8",
"DiffModified": "#89B4FA",
"GradientColors": ["#4796E4", "#847ACE", "#C3677F"]
}
```
**Security Note:** For your safety, Gemini CLI will only load theme files that are located within your home directory. If you attempt to load a theme from outside your home directory, a warning will be displayed and the theme will not be loaded. This is to prevent loading potentially malicious theme files from untrusted sources.
### Example Custom Theme
<img src="../assets/theme-custom.png" alt="Custom theme example" width="600" />

View File

@@ -41,7 +41,7 @@ For security and isolation, Qwen Code can be run inside a container. This is the
You can run the published sandbox image directly. This is useful for environments where you only have Docker and want to run the CLI.
```bash
# Run the published sandbox image
docker run --rm -it ghcr.io/qwenlm/qwen-code:0.0.10
docker run --rm -it ghcr.io/qwenlm/qwen-code:0.0.11
```
- **Using the `--sandbox` flag:**
If you have Qwen Code installed locally (using the standard installation described above), you can instruct it to run inside the sandbox container.

View File

@@ -15,10 +15,10 @@ The following is an example of a proxy script that can be used with the `GEMINI_
// Set `GEMINI_SANDBOX_PROXY_COMMAND=scripts/example-proxy.js` to run proxy alongside sandbox
// Test via `curl https://example.com` inside sandbox (in shell mode or via shell tool)
import http from 'http';
import net from 'net';
import { URL } from 'url';
import console from 'console';
import http from 'node:http';
import net from 'node:net';
import { URL } from 'node:url';
import console from 'node:console';
const PROXY_PORT = 8877;
const ALLOWED_DOMAINS = ['example.com', 'googleapis.com'];

View File

@@ -74,3 +74,27 @@ For example, if both a user and the `gcp` extension define a `deploy` command:
- `/deploy` - Executes the user's deploy command
- `/gcp.deploy` - Executes the extension's deploy command (marked with `[gcp]` tag)
## Installing Extensions
You can install extensions using the `install` command. This command allows you to install extensions from a Git repository or a local path.
### Usage
`qwen extensions install <source> | [options]`
### Options
- `source <url> positional argument`: The URL of a Git repository to install the extension from. The repository must contain a `qwen-extension.json` file in its root.
- `--path <path>`: The path to a local directory to install as an extension. The directory must contain a `qwen-extension.json` file.
# Variables
Qwen Code extensions allow variable substitution in `qwen-extension.json`. This can be useful if e.g., you need the current directory to run an MCP server using `"cwd": "${extensionPath}${/}run.ts"`.
**Supported variables:**
| variable | description |
| -------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `${extensionPath}` | The fully-qualified path of the extension in the user's filesystem e.g., '/Users/username/.qwen/extensions/example-extension'. This will not unwrap symlinks. |
| `${/} or ${pathSeparator}` | The path separator (differs per OS). |

View File

@@ -1,59 +0,0 @@
# Ignoring Files
This document provides an overview of the Gemini Ignore (`.geminiignore`) feature of Qwen Code.
Qwen Code includes the ability to automatically ignore files, similar to `.gitignore` (used by Git) and `.aiexclude` (used by Gemini Code Assist). Adding paths to your `.geminiignore` file will exclude them from tools that support this feature, although they will still be visible to other services (such as Git).
## How it works
When you add a path to your `.geminiignore` file, tools that respect this file will exclude matching files and directories from their operations. For example, when you use the [`read_many_files`](./tools/multi-file.md) command, any paths in your `.geminiignore` file will be automatically excluded.
For the most part, `.geminiignore` follows the conventions of `.gitignore` files:
- Blank lines and lines starting with `#` are ignored.
- Standard glob patterns are supported (such as `*`, `?`, and `[]`).
- Putting a `/` at the end will only match directories.
- Putting a `/` at the beginning anchors the path relative to the `.geminiignore` file.
- `!` negates a pattern.
You can update your `.geminiignore` file at any time. To apply the changes, you must restart your Qwen Code session.
## How to use `.geminiignore`
To enable `.geminiignore`:
1. Create a file named `.geminiignore` in the root of your project directory.
To add a file or directory to `.geminiignore`:
1. Open your `.geminiignore` file.
2. Add the path or file you want to ignore, for example: `/archive/` or `apikeys.txt`.
### `.geminiignore` examples
You can use `.geminiignore` to ignore directories and files:
```
# Exclude your /packages/ directory and all subdirectories
/packages/
# Exclude your apikeys.txt file
apikeys.txt
```
You can use wildcards in your `.geminiignore` file with `*`:
```
# Exclude all .md files
*.md
```
Finally, you can exclude files and directories from exclusion with `!`:
```
# Exclude all .md files except README.md
*.md
!README.md
```
To remove paths from your `.geminiignore` file, delete the relevant lines.

View File

@@ -44,7 +44,10 @@ You can also install the extension directly from a marketplace.
- **For Visual Studio Code:** Install from the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion).
- **For VS Code Forks:** To support forks of VS Code, the extension is also published on the [Open VSX Registry](https://open-vsx.org/extension/qwenlm/qwen-code-vscode-ide-companion). Follow your editor's instructions for installing extensions from this registry.
After any installation method, it's recommended to open a new terminal window to ensure the integration is activated correctly. Once installed, you can use `/ide enable` to connect.
> NOTE:
> The "Qwen Code Companion" extension may appear towards the bottom of search results. If you don't see it immediately, try scrolling down or sorting by "Newly Published".
>
> After manually installing the extension, you must run `/ide enable` in the CLI to activate the integration.
## Usage
@@ -77,7 +80,7 @@ If connected, this command will show the IDE it's connected to and a list of rec
### Working with Diffs
When you ask Gemini to modify a file, it can open a diff view directly in your editor.
When you ask Qwen model to modify a file, it can open a diff view directly in your editor.
**To accept a diff**, you can perform any of the following actions:
@@ -126,9 +129,9 @@ If you encounter issues with IDE integration, here are some common error message
- **Cause:** The CLI's current working directory is outside the folder or workspace you have open in your IDE.
- **Solution:** `cd` into the same directory that is open in your IDE and restart the CLI.
- **Message:** `🔴 Disconnected: To use this feature, please open a single workspace folder in [IDE Name] and try again.`
- **Cause:** You have multiple workspace folders open in your IDE, or no folder is open at all. The IDE integration requires a single root workspace folder to operate correctly.
- **Solution:** Open a single project folder in your IDE and restart the CLI.
- **Message:** `🔴 Disconnected: To use this feature, please open a workspace folder in [IDE Name] and try again.`
- **Cause:** You have no workspace open in your IDE.
- **Solution:** Open a workspace in your IDE and restart the CLI.
### General Errors
@@ -136,6 +139,6 @@ If you encounter issues with IDE integration, here are some common error message
- **Cause:** You are running Qwen Code in a terminal or environment that is not a supported IDE.
- **Solution:** Run Qwen Code from the integrated terminal of a supported IDE, like VS Code.
- **Message:** `No installer is available for [IDE Name]. Please install the IDE companion manually from its marketplace.`
- **Message:** `No installer is available for IDE. Please install the Qwen Code Companion extension manually from the marketplace.`
- **Cause:** You ran `/ide install`, but the CLI does not have an automated installer for your specific IDE.
- **Solution:** Open your IDE's extension marketplace, search for "Qwen Code Companion", and install it manually.

View File

@@ -33,7 +33,7 @@ This documentation is organized into the following sections:
- **[Memory Tool](./tools/memory.md):** Documentation for the `save_memory` tool.
- **[Subagents](./subagents.md):** Specialized AI assistants for focused tasks with comprehensive management, configuration, and usage guidance.
- **[Contributing & Development Guide](../CONTRIBUTING.md):** Information for contributors and developers, including setup, building, testing, and coding conventions.
- **[NPM Workspaces and Publishing](./npm.md):** Details on how the project's packages are managed and published.
- **[NPM](./npm.md):** Details on how the project's packages are structured
- **[Troubleshooting Guide](./troubleshooting.md):** Find solutions to common problems and FAQs.
- **[Terms of Service and Privacy Notice](./tos-privacy.md):** Information on the terms of service and privacy notices applicable to your use of Qwen Code.

View File

@@ -4,16 +4,16 @@ This document lists the available keyboard shortcuts in Qwen Code.
## General
| Shortcut | Description |
| -------- | --------------------------------------------------------------------------------------------------------------------- |
| `Esc` | Close dialogs and suggestions. |
| `Ctrl+C` | Cancel the ongoing request and clear the input. Press twice to exit the application. |
| `Ctrl+D` | Exit the application if the input is empty. Press twice to confirm. |
| `Ctrl+L` | Clear the screen. |
| `Ctrl+O` | Toggle the display of the debug console. |
| `Ctrl+S` | Allows long responses to print fully, disabling truncation. Use your terminal's scrollback to view the entire output. |
| `Ctrl+T` | Toggle the display of tool descriptions. |
| `Ctrl+Y` | Toggle auto-approval (YOLO mode) for all tool calls. |
| Shortcut | Description |
| ----------- | --------------------------------------------------------------------------------------------------------------------- |
| `Esc` | Close dialogs and suggestions. |
| `Ctrl+C` | Cancel the ongoing request and clear the input. Press twice to exit the application. |
| `Ctrl+D` | Exit the application if the input is empty. Press twice to confirm. |
| `Ctrl+L` | Clear the screen. |
| `Ctrl+O` | Toggle the display of the debug console. |
| `Ctrl+S` | Allows long responses to print fully, disabling truncation. Use your terminal's scrollback to view the entire output. |
| `Ctrl+T` | Toggle the display of tool descriptions. |
| `Shift+Tab` | Cycle approval modes (`plan``default``auto-edit``yolo`). |
## Input Prompt
@@ -29,6 +29,7 @@ This document lists the available keyboard shortcuts in Qwen Code.
| `Ctrl+A` / `Home` | Move the cursor to the beginning of the line. |
| `Ctrl+B` / `Left Arrow` | Move the cursor one character to the left. |
| `Ctrl+C` | Clear the input prompt |
| `Esc` (double press) | Clear the input prompt. |
| `Ctrl+D` / `Delete` | Delete the character to the right of the cursor. |
| `Ctrl+E` / `End` | Move the cursor to the end of the line. |
| `Ctrl+F` / `Right Arrow` | Move the cursor one character to the right. |

59
docs/qwen-ignore.md Normal file
View File

@@ -0,0 +1,59 @@
# Ignoring Files
This document provides an overview of the Qwen Ignore (`.qwenignore`) feature of Qwen Code.
Qwen Code includes the ability to automatically ignore files, similar to `.gitignore` (used by Git). Adding paths to your `.qwenignore` file will exclude them from tools that support this feature, although they will still be visible to other services (such as Git).
## How it works
When you add a path to your `.qwenignore` file, tools that respect this file will exclude matching files and directories from their operations. For example, when you use the [`read_many_files`](./tools/multi-file.md) command, any paths in your `.qwenignore` file will be automatically excluded.
For the most part, `.qwenignore` follows the conventions of `.gitignore` files:
- Blank lines and lines starting with `#` are ignored.
- Standard glob patterns are supported (such as `*`, `?`, and `[]`).
- Putting a `/` at the end will only match directories.
- Putting a `/` at the beginning anchors the path relative to the `.qwenignore` file.
- `!` negates a pattern.
You can update your `.qwenignore` file at any time. To apply the changes, you must restart your Qwen Code session.
## How to use `.qwenignore`
To enable `.qwenignore`:
1. Create a file named `.qwenignore` in the root of your project directory.
To add a file or directory to `.qwenignore`:
1. Open your `.qwenignore` file.
2. Add the path or file you want to ignore, for example: `/archive/` or `apikeys.txt`.
### `.qwenignore` examples
You can use `.qwenignore` to ignore directories and files:
```
# Exclude your /packages/ directory and all subdirectories
/packages/
# Exclude your apikeys.txt file
apikeys.txt
```
You can use wildcards in your `.qwenignore` file with `*`:
```
# Exclude all .md files
*.md
```
Finally, you can exclude files and directories from exclusion with `!`:
```
# Exclude all .md files except README.md
*.md
!README.md
```
To remove paths from your `.qwenignore` file, delete the relevant lines.

View File

@@ -133,6 +133,28 @@ Focus on creating clear, comprehensive documentation that helps both
new contributors and end users understand the project.
```
## Using Subagents Effectively
### Automatic Delegation
Qwen Code proactively delegates tasks based on:
- The task description in your request
- The description field in subagent configurations
- Current context and available tools
To encourage more proactive subagent use, include phrases like "use PROACTIVELY" or "MUST BE USED" in your description field.
### Explicit Invocation
Request a specific subagent by mentioning it in your command:
```
> Let the testing-expert subagent create unit tests for the payment module
> Have the documentation-writer subagent update the API reference
> Get the react-specialist subagent to optimize this component's performance
```
## Examples
### Development Workflow Agents

View File

@@ -177,9 +177,10 @@ Logs are timestamped records of specific events. The following events are logged
- `qwen-code.user_prompt`: This event occurs when a user submits a prompt.
- **Attributes**:
- `prompt_length`
- `prompt` (this attribute is excluded if `log_prompts_enabled` is configured to be `false`)
- `auth_type`
- `prompt_length` (int)
- `prompt_id` (string)
- `prompt` (string, this attribute is excluded if `log_prompts_enabled` is configured to be `false`)
- `auth_type` (string)
- `qwen-code.tool_call`: This event occurs for each function call.
- **Attributes**:
@@ -272,6 +273,7 @@ Metrics are numerical measurements of behavior over time. The following metrics
- `ai_removed_lines` (Int, if applicable): Number of lines removed/changed by AI.
- `user_added_lines` (Int, if applicable): Number of lines added/changed by user in AI proposed changes.
- `user_removed_lines` (Int, if applicable): Number of lines removed/changed by user in AI proposed changes.
- `programming_language` (string, if applicable): The programming language of the file.
- `qwen-code.chat_compression` (Counter, Int): Counts chat compression operations
- **Attributes**:

View File

@@ -29,6 +29,7 @@ Use `read_many_files` to read content from multiple files specified by paths or
`read_many_files` searches for files matching the provided `paths` and `include` patterns, while respecting `exclude` patterns and default excludes (if enabled).
- For text files: it reads the content of each matched file (attempting to skip binary files not explicitly requested as image/PDF) and concatenates it into a single string, with a separator `--- {filePath} ---` between the content of each file. Uses UTF-8 encoding by default.
- The tool inserts a `--- End of content ---` after the last file.
- For image and PDF files: if explicitly requested by name or extension (e.g., `paths: ["logo.png"]` or `include: ["*.pdf"]`), the tool reads the file and returns its content as a base64 encoded string.
- The tool attempts to detect and skip other binary files (those not matching common image/PDF types or not explicitly requested) by checking for null bytes in their initial content.

View File

@@ -73,6 +73,18 @@ This guide provides solutions to common issues and debugging tips, including top
- If running in a container, verify `host.docker.internal` resolves. Otherwise, map the host appropriately.
- Reinstall the companion with `/ide install` and use “Qwen Code: Run” in the Command Palette to verify it launches.
## Exit Codes
The Qwen Code uses specific exit codes to indicate the reason for termination. This is especially useful for scripting and automation.
| Exit Code | Error Type | Description |
| --------- | -------------------------- | --------------------------------------------------------------------------------------------------- |
| 41 | `FatalAuthenticationError` | An error occurred during the authentication process. |
| 42 | `FatalInputError` | Invalid or missing input was provided to the CLI. (non-interactive mode only) |
| 44 | `FatalSandboxError` | An error occurred with the sandboxing environment (e.g., Docker, Podman, or Seatbelt). |
| 52 | `FatalConfigError` | A configuration file (`settings.json`) is invalid or contains errors. |
| 53 | `FatalTurnLimitedError` | The maximum number of conversational turns for the session was reached. (non-interactive mode only) |
## Debugging Tips
- **CLI debugging:**

View File

@@ -5,9 +5,9 @@
*/
import esbuild from 'esbuild';
import path from 'path';
import { fileURLToPath } from 'url';
import { createRequire } from 'module';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { createRequire } from 'node:module';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);

View File

@@ -10,9 +10,10 @@ import reactPlugin from 'eslint-plugin-react';
import reactHooks from 'eslint-plugin-react-hooks';
import prettierConfig from 'eslint-config-prettier';
import importPlugin from 'eslint-plugin-import';
import vitest from '@vitest/eslint-plugin';
import globals from 'globals';
import licenseHeader from 'eslint-plugin-license-header';
import path from 'node:path'; // Use node: prefix for built-ins
import path from 'node:path';
import url from 'node:url';
// --- ESM way to get __dirname ---
@@ -29,10 +30,7 @@ export default tseslint.config(
ignores: [
'node_modules/*',
'eslint.config.js',
'packages/cli/dist/**',
'packages/core/dist/**',
'packages/server/dist/**',
'packages/vscode-ide-companion/dist/**',
'packages/**/dist/**',
'bundle/**',
'package/bundle/**',
'.integration-tests/**',
@@ -105,6 +103,10 @@ export default tseslint.config(
'error',
{ ignoreParameters: true, ignoreProperties: true },
],
'@typescript-eslint/consistent-type-imports': [
'error',
{ disallowTypeAnnotations: false },
],
'@typescript-eslint/no-namespace': ['error', { allowDeclarations: true }],
'@typescript-eslint/no-unused-vars': [
'error',
@@ -122,6 +124,7 @@ export default tseslint.config(
'memfs/lib/volume.js',
'yargs/**',
'msw/node',
'**/generated/**'
],
},
],
@@ -157,6 +160,17 @@ export default tseslint.config(
'default-case': 'error',
},
},
{
files: ['packages/*/src/**/*.test.{ts,tsx}'],
plugins: {
vitest,
},
rules: {
...vitest.configs.recommended.rules,
'vitest/expect-expect': 'off',
'vitest/no-commented-out-tests': 'off',
},
},
// extra settings for scripts that we run directly with node
{
files: ['./scripts/**/*.js', 'esbuild.config.js'],

View File

@@ -9,16 +9,45 @@ if (process.env['NO_COLOR'] !== undefined) {
delete process.env['NO_COLOR'];
}
import { mkdir, readdir, rm } from 'fs/promises';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import {
mkdir,
readdir,
rm,
readFile,
writeFile,
unlink,
} from 'node:fs/promises';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import * as os from 'node:os';
import {
GEMINI_CONFIG_DIR,
DEFAULT_CONTEXT_FILENAME,
} from '../packages/core/src/tools/memoryTool.js';
const __dirname = dirname(fileURLToPath(import.meta.url));
const rootDir = join(__dirname, '..');
const integrationTestsDir = join(rootDir, '.integration-tests');
let runDir = ''; // Make runDir accessible in teardown
const memoryFilePath = join(
os.homedir(),
GEMINI_CONFIG_DIR,
DEFAULT_CONTEXT_FILENAME,
);
let originalMemoryContent: string | null = null;
export async function setup() {
try {
originalMemoryContent = await readFile(memoryFilePath, 'utf-8');
} catch (e) {
if ((e as NodeJS.ErrnoException).code !== 'ENOENT') {
throw e;
}
// File doesn't exist, which is fine.
}
runDir = join(integrationTestsDir, `${Date.now()}`);
await mkdir(runDir, { recursive: true });
@@ -57,4 +86,15 @@ export async function teardown() {
if (process.env['KEEP_OUTPUT'] !== 'true' && runDir) {
await rm(runDir, { recursive: true, force: true });
}
if (originalMemoryContent !== null) {
await mkdir(dirname(memoryFilePath), { recursive: true });
await writeFile(memoryFilePath, originalMemoryContent, 'utf-8');
} else {
try {
await unlink(memoryFilePath);
} catch {
// File might not exist if the test failed before creating it.
}
}
}

View File

@@ -10,17 +10,10 @@ import * as os from 'node:os';
import * as path from 'node:path';
import * as net from 'node:net';
import * as child_process from 'node:child_process';
import type { ChildProcess } from 'node:child_process';
import { IdeClient } from '../packages/core/src/ide/ide-client.js';
import { TestMcpServer } from './test-mcp-server.js';
// Helper function to reset the IdeClient singleton instance for testing
const resetIdeClientInstance = () => {
// Access the private instance property using type assertion
(IdeClient as unknown as { instance?: IdeClient }).instance = undefined;
};
describe.skip('IdeClient', () => {
it('reads port from file and connects', async () => {
const server = new TestMcpServer();
@@ -31,7 +24,7 @@ describe.skip('IdeClient', () => {
process.env['QWEN_CODE_IDE_WORKSPACE_PATH'] = process.cwd();
process.env['TERM_PROGRAM'] = 'vscode';
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({
@@ -74,7 +67,8 @@ describe('IdeClient fallback connection logic', () => {
process.env['TERM_PROGRAM'] = 'vscode';
process.env['QWEN_CODE_IDE_WORKSPACE_PATH'] = process.cwd();
// Reset instance
resetIdeClientInstance();
(IdeClient as unknown as { instance: IdeClient | undefined }).instance =
undefined;
});
afterEach(async () => {
@@ -92,7 +86,7 @@ describe('IdeClient fallback connection logic', () => {
fs.unlinkSync(portFile);
}
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({
@@ -106,7 +100,7 @@ describe('IdeClient fallback connection logic', () => {
// Write port file with a port that is not listening
fs.writeFileSync(portFile, JSON.stringify({ port: filePort }));
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({
@@ -117,7 +111,7 @@ describe('IdeClient fallback connection logic', () => {
});
describe.skip('getIdeProcessId', () => {
let child: ChildProcess;
let child: child_process.ChildProcess;
afterEach(() => {
if (child) {
@@ -145,11 +139,11 @@ describe.skip('getIdeProcessId', () => {
);
let out = '';
child.stdout?.on('data', (data: Buffer) => {
child.stdout?.on('data', (data) => {
out += data.toString();
});
child.on('close', (code: number | null) => {
child.on('close', (code) => {
if (code === 0) {
resolve(out.trim());
} else {
@@ -180,11 +174,12 @@ describe('IdeClient with proxy', () => {
vi.stubEnv('QWEN_CODE_IDE_WORKSPACE_PATH', process.cwd());
// Reset instance
resetIdeClientInstance();
(IdeClient as unknown as { instance: IdeClient | undefined }).instance =
undefined;
});
afterEach(async () => {
IdeClient.getInstance().disconnect();
(await IdeClient.getInstance()).disconnect();
await mcpServer.stop();
proxyServer.close();
vi.unstubAllEnvs();
@@ -195,7 +190,7 @@ describe('IdeClient with proxy', () => {
vi.stubEnv('HTTPS_PROXY', `http://localhost:${proxyServerPort}`);
vi.stubEnv('NO_PROXY', 'example.com,127.0.0.1,::1');
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({

View File

@@ -6,8 +6,8 @@
import { describe, it, expect } from 'vitest';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
import { existsSync } from 'fs';
import { join } from 'path';
import { existsSync } from 'node:fs';
import { join } from 'node:path';
describe('list_directory', () => {
it('should be able to list a directory', async () => {

View File

@@ -11,8 +11,8 @@
import { describe, it, beforeAll, expect } from 'vitest';
import { TestRig } from './test-helper.js';
import { join } from 'path';
import { writeFileSync } from 'fs';
import { join } from 'node:path';
import { writeFileSync } from 'node:fs';
// Create a minimal MCP server that doesn't require external dependencies
// This implements the MCP protocol directly using Node.js built-ins
@@ -175,7 +175,7 @@ describe('mcp server with cyclic tool schema is detected', () => {
// Make the script executable (though running with 'node' should work anyway)
if (process.platform !== 'win32') {
const { chmodSync } = await import('fs');
const { chmodSync } = await import('node:fs');
chmodSync(testServerPath, 0o755);
}
});

View File

@@ -8,7 +8,7 @@ import { describe, it, expect } from 'vitest';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe('read_many_files', () => {
it('should be able to read multiple files', async () => {
it.skip('should be able to read multiple files', async () => {
const rig = new TestRig();
await rig.setup('should be able to read multiple files');
rig.createFile('file1.txt', 'file 1 content');

View File

@@ -6,8 +6,8 @@
import { describe, it, expect, beforeAll } from 'vitest';
import { ShellExecutionService } from '../packages/core/src/services/shellExecutionService.js';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { vi } from 'vitest';
describe('ShellExecutionService programmatic integration tests', () => {
@@ -123,4 +123,34 @@ describe('ShellExecutionService programmatic integration tests', () => {
const exitedCleanly = result.exitCode === 0 && result.signal === null;
expect(exitedCleanly, 'Process should not have exited cleanly').toBe(false);
});
it('should propagate environment variables to the child process', async () => {
const varName = 'QWEN_CODE_TEST_VAR';
const varValue = `test-value`;
process.env[varName] = varValue;
try {
const command =
process.platform === 'win32' ? `echo %${varName}%` : `echo $${varName}`;
const onOutputEvent = vi.fn();
const abortController = new AbortController();
const handle = await ShellExecutionService.execute(
command,
testDir,
onOutputEvent,
abortController.signal,
false,
);
const result = await handle.result;
expect(result.error).toBeNull();
expect(result.exitCode).toBe(0);
expect(result.output).toContain(varValue);
} finally {
// Clean up the env var to prevent side-effects on other tests.
delete process.env[varName];
}
});
});

View File

@@ -12,8 +12,8 @@
import { describe, it, beforeAll, expect } from 'vitest';
import { TestRig, validateModelOutput } from './test-helper.js';
import { join } from 'path';
import { writeFileSync } from 'fs';
import { join } from 'node:path';
import { writeFileSync } from 'node:fs';
// Create a minimal MCP server that doesn't require external dependencies
// This implements the MCP protocol directly using Node.js built-ins
@@ -186,7 +186,7 @@ describe('simple-mcp-server', () => {
// Make the script executable (though running with 'node' should work anyway)
if (process.platform !== 'win32') {
const { chmodSync } = await import('fs');
const { chmodSync } = await import('node:fs');
chmodSync(testServerPath, 0o755);
}
});

View File

@@ -0,0 +1,97 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe.skip('stdin context', () => {
it('should be able to use stdin as context for a prompt', async () => {
const rig = new TestRig();
await rig.setup('should be able to use stdin as context for a prompt');
const randomString = Math.random().toString(36).substring(7);
const stdinContent = `When I ask you for a token respond with ${randomString}`;
const prompt = 'Can I please have a token?';
const result = await rig.run({ prompt, stdin: stdinContent });
await rig.waitForTelemetryEvent('api_request');
const lastRequest = rig.readLastApiRequest();
expect(lastRequest).not.toBeNull();
const historyString = lastRequest.attributes.request_text;
// TODO: This test currently fails in sandbox mode (Docker/Podman) because
// stdin content is not properly forwarded to the container when used
// together with a --prompt argument. The test passes in non-sandbox mode.
expect(historyString).toContain(randomString);
expect(historyString).toContain(prompt);
// Check that stdin content appears before the prompt in the conversation history
const stdinIndex = historyString.indexOf(randomString);
const promptIndex = historyString.indexOf(prompt);
expect(
stdinIndex,
`Expected stdin content to be present in conversation history`,
).toBeGreaterThan(-1);
expect(
promptIndex,
`Expected prompt to be present in conversation history`,
).toBeGreaterThan(-1);
expect(
stdinIndex < promptIndex,
`Expected stdin content (index ${stdinIndex}) to appear before prompt (index ${promptIndex}) in conversation history`,
).toBeTruthy();
// Add debugging information
if (!result.toLowerCase().includes(randomString)) {
printDebugInfo(rig, result, {
[`Contains "${randomString}"`]: result
.toLowerCase()
.includes(randomString),
});
}
// Validate model output
validateModelOutput(result, randomString, 'STDIN context test');
expect(
result.toLowerCase().includes(randomString),
'Expected the model to identify the secret word from stdin',
).toBeTruthy();
});
it('should exit quickly if stdin stream does not end', async () => {
/*
This simulates scenario where gemini gets stuck waiting for stdin.
This happens in situations where process.stdin.isTTY is false
even though gemini is intended to run interactively.
*/
const rig = new TestRig();
await rig.setup('should exit quickly if stdin stream does not end');
try {
await rig.run({ stdinDoesNotEnd: true });
throw new Error('Expected rig.run to throw an error');
} catch (error: unknown) {
expect(error).toBeInstanceOf(Error);
const err = error as Error;
expect(err.message).toContain('Process exited with code 1');
expect(err.message).toContain('No input provided via stdin.');
console.log('Error message:', err.message);
}
const lastRequest = rig.readLastApiRequest();
expect(lastRequest).toBeNull();
// If this test times out, runs indefinitely, it's a regression.
}, 3000);
});

View File

@@ -4,13 +4,14 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { execSync, spawn } from 'child_process';
import { execSync, spawn } from 'node:child_process';
import { parse } from 'shell-quote';
import { mkdirSync, writeFileSync, readFileSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { env } from 'process';
import fs from 'fs';
import { mkdirSync, writeFileSync, readFileSync } from 'node:fs';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import { env } from 'node:process';
import fs from 'node:fs';
import { EOL } from 'node:os';
const __dirname = dirname(fileURLToPath(import.meta.url));
@@ -93,7 +94,9 @@ export function validateModelOutput(
if (missingContent.length > 0) {
console.warn(
`Warning: LLM did not include expected content in response: ${missingContent.join(', ')}.`,
`Warning: LLM did not include expected content in response: ${missingContent.join(
', ',
)}.`,
'This is not ideal but not a test failure.',
);
console.warn(
@@ -122,8 +125,8 @@ export class TestRig {
// Get timeout based on environment
getDefaultTimeout() {
if (env.CI) return 60000; // 1 minute in CI
if (env.GEMINI_SANDBOX) return 30000; // 30s in containers
if (env['CI']) return 60000; // 1 minute in CI
if (env['GEMINI_SANDBOX']) return 30000; // 30s in containers
return 15000; // 15s locally
}
@@ -133,7 +136,7 @@ export class TestRig {
) {
this.testName = testName;
const sanitizedName = sanitizeTestName(testName);
this.testDir = join(env.INTEGRATION_TEST_FILE_DIR!, sanitizedName);
this.testDir = join(env['INTEGRATION_TEST_FILE_DIR']!, sanitizedName);
mkdirSync(this.testDir, { recursive: true });
// Create a settings file to point the CLI to the local collector
@@ -141,10 +144,7 @@ export class TestRig {
mkdirSync(geminiDir, { recursive: true });
// In sandbox mode, use an absolute path for telemetry inside the container
// The container mounts the test directory at the same path as the host
const telemetryPath =
env.GEMINI_SANDBOX && env.GEMINI_SANDBOX !== 'false'
? join(this.testDir, 'telemetry.log') // Absolute path in test directory
: env.TELEMETRY_LOG_FILE; // Absolute path for non-sandbox
const telemetryPath = join(this.testDir, 'telemetry.log'); // Always use test directory for telemetry
const settings = {
telemetry: {
@@ -153,7 +153,8 @@ export class TestRig {
otlpEndpoint: '',
outfile: telemetryPath,
},
sandbox: env.GEMINI_SANDBOX !== 'false' ? env.GEMINI_SANDBOX : false,
sandbox:
env['GEMINI_SANDBOX'] !== 'false' ? env['GEMINI_SANDBOX'] : false,
...options.settings, // Allow tests to override/add settings
};
writeFileSync(
@@ -178,7 +179,9 @@ export class TestRig {
}
run(
promptOrOptions: string | { prompt?: string; stdin?: string },
promptOrOptions:
| string
| { prompt?: string; stdin?: string; stdinDoesNotEnd?: boolean },
...args: string[]
): Promise<string> {
let command = `node ${this.bundlePath} --yolo`;
@@ -222,18 +225,25 @@ export class TestRig {
if (execOptions.input) {
child.stdin!.write(execOptions.input);
}
if (
typeof promptOrOptions === 'object' &&
!promptOrOptions.stdinDoesNotEnd
) {
child.stdin!.end();
}
child.stdin!.end();
child.stdout!.on('data', (data: Buffer) => {
stdout += data;
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
if (env['KEEP_OUTPUT'] === 'true' || env['VERBOSE'] === 'true') {
process.stdout.write(data);
}
});
child.stderr!.on('data', (data: Buffer) => {
stderr += data;
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
if (env['KEEP_OUTPUT'] === 'true' || env['VERBOSE'] === 'true') {
process.stderr.write(data);
}
});
@@ -247,10 +257,10 @@ export class TestRig {
// Filter out telemetry output when running with Podman
// Podman seems to output telemetry to stdout even when writing to file
let result = stdout;
if (env.GEMINI_SANDBOX === 'podman') {
if (env['GEMINI_SANDBOX'] === 'podman') {
// Remove telemetry JSON objects from output
// They are multi-line JSON objects that start with { and contain telemetry fields
const lines = result.split('\n');
const lines = result.split(EOL);
const filteredLines = [];
let inTelemetryObject = false;
let braceDepth = 0;
@@ -299,7 +309,7 @@ export class TestRig {
readFile(fileName: string) {
const filePath = join(this.testDir!, fileName);
const content = readFileSync(filePath, 'utf-8');
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
if (env['KEEP_OUTPUT'] === 'true' || env['VERBOSE'] === 'true') {
console.log(`--- FILE: ${filePath} ---`);
console.log(content);
console.log(`--- END FILE: ${filePath} ---`);
@@ -309,12 +319,12 @@ export class TestRig {
async cleanup() {
// Clean up test directory
if (this.testDir && !env.KEEP_OUTPUT) {
if (this.testDir && !env['KEEP_OUTPUT']) {
try {
execSync(`rm -rf ${this.testDir}`);
} catch (error) {
// Ignore cleanup errors
if (env.VERBOSE === 'true') {
if (env['VERBOSE'] === 'true') {
console.warn('Cleanup warning:', (error as Error).message);
}
}
@@ -322,11 +332,8 @@ export class TestRig {
}
async waitForTelemetryReady() {
// In sandbox mode, telemetry is written to a relative path in the test directory
const logFilePath =
env.GEMINI_SANDBOX && env.GEMINI_SANDBOX !== 'false'
? join(this.testDir!, 'telemetry.log')
: env.TELEMETRY_LOG_FILE;
// Telemetry is always written to the test directory
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath) return;
@@ -347,6 +354,52 @@ export class TestRig {
);
}
async waitForTelemetryEvent(eventName: string, timeout?: number) {
if (!timeout) {
timeout = this.getDefaultTimeout();
}
await this.waitForTelemetryReady();
return this.poll(
() => {
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath || !fs.existsSync(logFilePath)) {
return false;
}
const content = readFileSync(logFilePath, 'utf-8');
const jsonObjects = content
.split(/}\n{/)
.map((obj, index, array) => {
// Add back the braces we removed during split
if (index > 0) obj = '{' + obj;
if (index < array.length - 1) obj = obj + '}';
return obj.trim();
})
.filter((obj) => obj);
for (const jsonStr of jsonObjects) {
try {
const logData = JSON.parse(jsonStr);
if (
logData.attributes &&
logData.attributes['event.name'] === `gemini_cli.${eventName}`
) {
return true;
}
} catch {
// ignore
}
}
return false;
},
timeout,
100,
);
}
async waitForToolCall(toolName: string, timeout?: number) {
// Use environment-specific timeout
if (!timeout) {
@@ -397,7 +450,7 @@ export class TestRig {
while (Date.now() - startTime < timeout) {
attempts++;
const result = predicate();
if (env.VERBOSE === 'true' && attempts % 5 === 0) {
if (env['VERBOSE'] === 'true' && attempts % 5 === 0) {
console.log(
`Poll attempt ${attempts}: ${result ? 'success' : 'waiting...'}`,
);
@@ -407,7 +460,7 @@ export class TestRig {
}
await new Promise((resolve) => setTimeout(resolve, interval));
}
if (env.VERBOSE === 'true') {
if (env['VERBOSE'] === 'true') {
console.log(`Poll timed out after ${attempts} attempts`);
}
return false;
@@ -468,7 +521,7 @@ export class TestRig {
// If no matches found with the simple pattern, try the JSON parsing approach
// in case the format changes
if (logs.length === 0) {
const lines = stdout.split('\n');
const lines = stdout.split(EOL);
let currentObject = '';
let inObject = false;
let braceDepth = 0;
@@ -540,7 +593,7 @@ export class TestRig {
readToolLogs() {
// For Podman, first check if telemetry file exists and has content
// If not, fall back to parsing from stdout
if (env.GEMINI_SANDBOX === 'podman') {
if (env['GEMINI_SANDBOX'] === 'podman') {
// Try reading from file first
const logFilePath = join(this.testDir!, 'telemetry.log');
@@ -566,11 +619,8 @@ export class TestRig {
}
}
// In sandbox mode, telemetry is written to a relative path in the test directory
const logFilePath =
env.GEMINI_SANDBOX && env.GEMINI_SANDBOX !== 'false'
? join(this.testDir!, 'telemetry.log')
: env.TELEMETRY_LOG_FILE;
// Telemetry is always written to the test directory
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath) {
console.warn(`TELEMETRY_LOG_FILE environment variable not set`);
@@ -587,7 +637,7 @@ export class TestRig {
// Split the content into individual JSON objects
// They are separated by "}\n{"
const jsonObjects = content
.split(/}\s*\n\s*{/)
.split(/}\n{/)
.map((obj, index, array) => {
// Add back the braces we removed during split
if (index > 0) obj = '{' + obj;
@@ -625,15 +675,48 @@ export class TestRig {
}
} catch (e) {
// Skip objects that aren't valid JSON
if (env.VERBOSE === 'true') {
console.error(
'Failed to parse telemetry object:',
(e as Error).message,
);
if (env['VERBOSE'] === 'true') {
console.error('Failed to parse telemetry object:', e);
}
}
}
return logs;
}
readLastApiRequest(): Record<string, unknown> | null {
// Telemetry is always written to the test directory
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath || !fs.existsSync(logFilePath)) {
return null;
}
const content = readFileSync(logFilePath, 'utf-8');
const jsonObjects = content
.split(/}\n{/)
.map((obj, index, array) => {
if (index > 0) obj = '{' + obj;
if (index < array.length - 1) obj = obj + '}';
return obj.trim();
})
.filter((obj) => obj);
let lastApiRequest = null;
for (const jsonStr of jsonObjects) {
try {
const logData = JSON.parse(jsonStr);
if (
logData.attributes &&
logData.attributes['event.name'] === 'gemini_cli.api_request'
) {
lastApiRequest = logData;
}
} catch {
// ignore
}
}
return lastApiRequest;
}
}

View File

@@ -4,5 +4,6 @@
"noEmit": true,
"allowJs": true
},
"include": ["**/*.ts"]
"include": ["**/*.ts"],
"references": [{ "path": "../packages/core" }]
}

2442
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.10",
"version": "0.0.15-nightly.6",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.10"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.15-nightly.6"
},
"scripts": {
"start": "node scripts/start.js",
@@ -62,7 +62,6 @@
],
"devDependencies": {
"@types/marked": "^5.0.2",
"@types/micromatch": "^4.0.9",
"@types/mime-types": "^3.0.1",
"@types/minimatch": "^5.1.2",
"@types/mock-fs": "^4.13.4",
@@ -70,6 +69,7 @@
"@types/shell-quote": "^1.7.5",
"@types/uuid": "^10.0.0",
"@vitest/coverage-v8": "^3.1.1",
"@vitest/eslint-plugin": "^1.3.4",
"concurrently": "^9.2.0",
"cross-env": "^7.0.3",
"esbuild": "^0.25.0",
@@ -95,7 +95,8 @@
"yargs": "^17.7.2"
},
"dependencies": {
"node-fetch": "^3.3.2",
"@lvce-editor/ripgrep": "^1.6.0",
"simple-git": "^3.28.0",
"strip-ansi": "^7.1.0"
},
"optionalDependencies": {

View File

@@ -8,9 +8,18 @@
import './src/gemini.js';
import { main } from './src/gemini.js';
import { FatalError } from '@qwen-code/qwen-code-core';
// --- Global Entry Point ---
main().catch((error) => {
if (error instanceof FatalError) {
let errorMessage = error.message;
if (!process.env['NO_COLOR']) {
errorMessage = `\x1b[31m${errorMessage}\x1b[0m`;
}
console.error(errorMessage);
process.exit(error.exitCode);
}
console.error('An unexpected critical error occurred:');
if (error instanceof Error) {
console.error(error.stack);

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.10",
"version": "0.0.15-nightly.6",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.10"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.15-nightly.6"
},
"dependencies": {
"@google/genai": "1.9.0",
@@ -36,20 +36,21 @@
"command-exists": "^1.2.9",
"diff": "^7.0.0",
"dotenv": "^17.1.0",
"fzf": "^0.5.2",
"glob": "^10.4.1",
"highlight.js": "^11.11.1",
"ink": "^6.1.1",
"ink-big-text": "^2.0.0",
"ink": "^6.2.3",
"ink-gradient": "^3.0.0",
"ink-link": "^4.1.0",
"ink-select-input": "^6.2.0",
"ink-spinner": "^5.0.0",
"lodash-es": "^4.17.21",
"lowlight": "^3.3.0",
"mime-types": "^3.0.1",
"open": "^10.1.2",
"qrcode-terminal": "^0.12.0",
"react": "^19.1.0",
"read-package-up": "^11.0.0",
"simple-git": "^3.28.0",
"shell-quote": "^1.8.3",
"string-width": "^7.1.0",
"strip-ansi": "^7.1.0",
@@ -61,10 +62,12 @@
},
"devDependencies": {
"@babel/runtime": "^7.27.6",
"@google/gemini-cli-test-utils": "file:../test-utils",
"@testing-library/react": "^16.3.0",
"@types/command-exists": "^1.2.3",
"@types/diff": "^7.0.2",
"@types/dotenv": "^6.1.1",
"@types/lodash-es": "^4.17.12",
"@types/node": "^20.11.24",
"@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6",

View File

@@ -0,0 +1,32 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { installCommand } from './extensions/install.js';
import { uninstallCommand } from './extensions/uninstall.js';
import { listCommand } from './extensions/list.js';
import { updateCommand } from './extensions/update.js';
import { disableCommand } from './extensions/disable.js';
import { enableCommand } from './extensions/enable.js';
export const extensionsCommand: CommandModule = {
command: 'extensions <command>',
describe: 'Manage Qwen Code extensions.',
builder: (yargs) =>
yargs
.command(installCommand)
.command(uninstallCommand)
.command(listCommand)
.command(updateCommand)
.command(disableCommand)
.command(enableCommand)
.demandCommand(1, 'You need at least one command before continuing.')
.version(false),
handler: () => {
// This handler is not called when a subcommand is provided.
// Yargs will show the help menu.
},
};

View File

@@ -0,0 +1,51 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type CommandModule } from 'yargs';
import { disableExtension } from '../../config/extension.js';
import { SettingScope } from '../../config/settings.js';
import { getErrorMessage } from '../../utils/errors.js';
interface DisableArgs {
name: string;
scope: SettingScope;
}
export async function handleDisable(args: DisableArgs) {
try {
disableExtension(args.name, args.scope);
console.log(
`Extension "${args.name}" successfully disabled for scope "${args.scope}".`,
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const disableCommand: CommandModule = {
command: 'disable [--scope] <name>',
describe: 'Disables an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to disable.',
type: 'string',
})
.option('scope', {
describe: 'The scope to disable the extenison in.',
type: 'string',
default: SettingScope.User,
choices: [SettingScope.User, SettingScope.Workspace],
})
.check((_argv) => true),
handler: async (argv) => {
await handleDisable({
name: argv['name'] as string,
scope: argv['scope'] as SettingScope,
});
},
};

View File

@@ -0,0 +1,59 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type CommandModule } from 'yargs';
import { FatalConfigError, getErrorMessage } from '@qwen-code/qwen-code-core';
import { enableExtension } from '../../config/extension.js';
import { SettingScope } from '../../config/settings.js';
interface EnableArgs {
name: string;
scope?: SettingScope;
}
export async function handleEnable(args: EnableArgs) {
try {
const scopes = args.scope
? [args.scope]
: [SettingScope.User, SettingScope.Workspace];
enableExtension(args.name, scopes);
if (args.scope) {
console.log(
`Extension "${args.name}" successfully enabled for scope "${args.scope}".`,
);
} else {
console.log(
`Extension "${args.name}" successfully enabled in all scopes.`,
);
}
} catch (error) {
throw new FatalConfigError(getErrorMessage(error));
}
}
export const enableCommand: CommandModule = {
command: 'disable [--scope] <name>',
describe: 'Enables an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to enable.',
type: 'string',
})
.option('scope', {
describe:
'The scope to enable the extenison in. If not set, will be enabled in all scopes.',
type: 'string',
choices: [SettingScope.User, SettingScope.Workspace],
})
.check((_argv) => true),
handler: async (argv) => {
await handleEnable({
name: argv['name'] as string,
scope: argv['scope'] as SettingScope,
});
},
};

View File

@@ -0,0 +1,31 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { installCommand } from './install.js';
import yargs from 'yargs';
describe('extensions install command', () => {
it('should fail if no source is provided', () => {
const validationParser = yargs([])
.locale('en')
.command(installCommand)
.fail(false);
expect(() => validationParser.parse('install')).toThrow(
'Either a git URL --source or a --path must be provided.',
);
});
it('should fail if both git source and local path are provided', () => {
const validationParser = yargs([])
.locale('en')
.command(installCommand)
.fail(false);
expect(() =>
validationParser.parse('install --source some-url --path /some/path'),
).toThrow('Arguments source and path are mutually exclusive');
});
});

View File

@@ -0,0 +1,64 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import {
installExtension,
type ExtensionInstallMetadata,
} from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
interface InstallArgs {
source?: string;
path?: string;
}
export async function handleInstall(args: InstallArgs) {
try {
const installMetadata: ExtensionInstallMetadata = {
source: (args.source || args.path) as string,
type: args.source ? 'git' : 'local',
};
const extensionName = await installExtension(installMetadata);
console.log(
`Extension "${extensionName}" installed successfully and enabled.`,
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const installCommand: CommandModule = {
command: 'install [--source | --path ]',
describe: 'Installs an extension from a git repository or a local path.',
builder: (yargs) =>
yargs
.option('source', {
describe: 'The git URL of the extension to install.',
type: 'string',
})
.option('path', {
describe: 'Path to a local extension directory.',
type: 'string',
})
.conflicts('source', 'path')
.check((argv) => {
if (!argv.source && !argv.path) {
throw new Error(
'Either a git URL --source or a --path must be provided.',
);
}
return true;
}),
handler: async (argv) => {
await handleInstall({
source: argv['source'] as string | undefined,
path: argv['path'] as string | undefined,
});
},
};

View File

@@ -0,0 +1,36 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { loadUserExtensions, toOutputString } from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
export async function handleList() {
try {
const extensions = loadUserExtensions();
if (extensions.length === 0) {
console.log('No extensions installed.');
return;
}
console.log(
extensions
.map((extension, _): string => toOutputString(extension))
.join('\n\n'),
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const listCommand: CommandModule = {
command: 'list',
describe: 'Lists installed extensions.',
builder: (yargs) => yargs,
handler: async () => {
await handleList();
},
};

View File

@@ -0,0 +1,21 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { uninstallCommand } from './uninstall.js';
import yargs from 'yargs';
describe('extensions uninstall command', () => {
it('should fail if no source is provided', () => {
const validationParser = yargs([])
.locale('en')
.command(uninstallCommand)
.fail(false);
expect(() => validationParser.parse('uninstall')).toThrow(
'Not enough non-option arguments: got 0, need at least 1',
);
});
});

View File

@@ -0,0 +1,47 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { uninstallExtension } from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
interface UninstallArgs {
name: string;
}
export async function handleUninstall(args: UninstallArgs) {
try {
await uninstallExtension(args.name);
console.log(`Extension "${args.name}" successfully uninstalled.`);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const uninstallCommand: CommandModule = {
command: 'uninstall <name>',
describe: 'Uninstalls an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to uninstall.',
type: 'string',
})
.check((argv) => {
if (!argv.name) {
throw new Error(
'Please include the name of the extension to uninstall as a positional argument.',
);
}
return true;
}),
handler: async (argv) => {
await handleUninstall({
name: argv['name'] as string,
});
},
};

View File

@@ -0,0 +1,47 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { updateExtension } from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
interface UpdateArgs {
name: string;
}
export async function handleUpdate(args: UpdateArgs) {
try {
// TODO(chrstnb): we should list extensions if the requested extension is not installed.
const updatedExtensionInfo = await updateExtension(args.name);
if (!updatedExtensionInfo) {
console.log(`Extension "${args.name}" failed to update.`);
return;
}
console.log(
`Extension "${args.name}" successfully updated: ${updatedExtensionInfo.originalVersion}${updatedExtensionInfo.updatedVersion}.`,
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const updateCommand: CommandModule = {
command: 'update <name>',
describe: 'Updates an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to update.',
type: 'string',
})
.check((_argv) => true),
handler: async (argv) => {
await handleUpdate({
name: argv['name'] as string,
});
},
};

View File

@@ -7,7 +7,7 @@
// File for 'gemini mcp add' command
import type { CommandModule } from 'yargs';
import { loadSettings, SettingScope } from '../../config/settings.js';
import { MCPServerConfig } from '@qwen-code/qwen-code-core';
import type { MCPServerConfig } from '@qwen-code/qwen-code-core';
async function addMcpServer(
name: string,

View File

@@ -11,9 +11,27 @@ import { loadExtensions } from '../../config/extension.js';
import { createTransport } from '@qwen-code/qwen-code-core';
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
vi.mock('../../config/settings.js');
vi.mock('../../config/extension.js');
vi.mock('@qwen-code/qwen-code-core');
vi.mock('../../config/settings.js', () => ({
loadSettings: vi.fn(),
}));
vi.mock('../../config/extension.js', () => ({
loadExtensions: vi.fn(),
}));
vi.mock('@qwen-code/qwen-code-core', () => ({
createTransport: vi.fn(),
MCPServerStatus: {
CONNECTED: 'CONNECTED',
CONNECTING: 'CONNECTING',
DISCONNECTED: 'DISCONNECTED',
},
Storage: vi.fn().mockImplementation((_cwd: string) => ({
getGlobalSettingsPath: () => '/tmp/qwen/settings.json',
getWorkspaceSettingsPath: () => '/tmp/qwen/workspace-settings.json',
getProjectTempDir: () => '/test/home/.qwen/tmp/mocked_hash',
})),
GEMINI_CONFIG_DIR: '.qwen',
getErrorMessage: (e: unknown) => (e instanceof Error ? e.message : String(e)),
}));
vi.mock('@modelcontextprotocol/sdk/client/index.js');
const mockedLoadSettings = loadSettings as vi.Mock;

View File

@@ -7,11 +7,8 @@
// File for 'gemini mcp list' command
import type { CommandModule } from 'yargs';
import { loadSettings } from '../../config/settings.js';
import {
MCPServerConfig,
MCPServerStatus,
createTransport,
} from '@qwen-code/qwen-code-core';
import type { MCPServerConfig } from '@qwen-code/qwen-code-core';
import { MCPServerStatus, createTransport } from '@qwen-code/qwen-code-core';
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { loadExtensions } from '../../config/extension.js';

View File

@@ -5,14 +5,14 @@
*/
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
import * as fs from 'fs';
import * as path from 'path';
import { tmpdir } from 'os';
import {
Config,
import * as fs from 'node:fs';
import * as path from 'node:path';
import { tmpdir } from 'node:os';
import type {
ConfigParameters,
ContentGeneratorConfig,
} from '@qwen-code/qwen-code-core';
import { Config } from '@qwen-code/qwen-code-core';
import { http, HttpResponse } from 'msw';
import { setupServer } from 'msw/node';
@@ -269,7 +269,7 @@ describe('Configuration Integration Tests', () => {
parseArguments = parseArgs;
});
it('should parse --approval-mode=auto_edit correctly through the full argument parsing flow', async () => {
it('should parse --approval-mode=auto-edit correctly through the full argument parsing flow', async () => {
const originalArgv = process.argv;
try {
@@ -277,15 +277,38 @@ describe('Configuration Integration Tests', () => {
'node',
'script.js',
'--approval-mode',
'auto_edit',
'auto-edit',
'-p',
'test',
];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
// Verify that the argument was parsed correctly
expect(argv.approvalMode).toBe('auto_edit');
expect(argv.approvalMode).toBe('auto-edit');
expect(argv.prompt).toBe('test');
expect(argv.yolo).toBe(false);
} finally {
process.argv = originalArgv;
}
});
it('should parse --approval-mode=plan correctly through the full argument parsing flow', async () => {
const originalArgv = process.argv;
try {
process.argv = [
'node',
'script.js',
'--approval-mode',
'plan',
'-p',
'test',
];
const argv = await parseArguments({} as Settings);
expect(argv.approvalMode).toBe('plan');
expect(argv.prompt).toBe('test');
expect(argv.yolo).toBe(false);
} finally {
@@ -306,7 +329,7 @@ describe('Configuration Integration Tests', () => {
'test',
];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.approvalMode).toBe('yolo');
expect(argv.prompt).toBe('test');
@@ -329,7 +352,7 @@ describe('Configuration Integration Tests', () => {
'test',
];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.approvalMode).toBe('default');
expect(argv.prompt).toBe('test');
@@ -345,7 +368,7 @@ describe('Configuration Integration Tests', () => {
try {
process.argv = ['node', 'script.js', '--yolo', '-p', 'test'];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.yolo).toBe(true);
expect(argv.approvalMode).toBeUndefined(); // Should NOT be set when using --yolo
@@ -362,7 +385,7 @@ describe('Configuration Integration Tests', () => {
process.argv = ['node', 'script.js', '--approval-mode', 'invalid_mode'];
// Should throw during argument parsing due to yargs validation
await expect(parseArguments()).rejects.toThrow();
await expect(parseArguments({} as Settings)).rejects.toThrow();
} finally {
process.argv = originalArgv;
}
@@ -381,7 +404,7 @@ describe('Configuration Integration Tests', () => {
];
// Should throw during argument parsing due to conflict validation
await expect(parseArguments()).rejects.toThrow();
await expect(parseArguments({} as Settings)).rejects.toThrow();
} finally {
process.argv = originalArgv;
}
@@ -394,7 +417,7 @@ describe('Configuration Integration Tests', () => {
// Test that no approval mode arguments defaults to no flags set
process.argv = ['node', 'script.js', '-p', 'test'];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.approvalMode).toBeUndefined();
expect(argv.yolo).toBe(false);

File diff suppressed because it is too large Load Diff

277
packages/cli/src/config/config.ts Normal file → Executable file
View File

@@ -4,37 +4,41 @@
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs';
import * as path from 'path';
import { homedir } from 'node:os';
import yargs from 'yargs/yargs';
import { hideBin } from 'yargs/helpers';
import process from 'node:process';
import { mcpCommand } from '../commands/mcp.js';
import type {
ConfigParameters,
FileFilteringOptions,
MCPServerConfig,
TelemetryTarget,
} from '@qwen-code/qwen-code-core';
import {
ApprovalMode,
Config,
DEFAULT_GEMINI_EMBEDDING_MODEL,
DEFAULT_GEMINI_MODEL,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
EditTool,
FileDiscoveryService,
getCurrentGeminiMdFilename,
loadServerHierarchicalMemory,
setGeminiMdFilename as setServerGeminiMdFilename,
getCurrentGeminiMdFilename,
ApprovalMode,
DEFAULT_GEMINI_MODEL,
DEFAULT_GEMINI_EMBEDDING_MODEL,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
FileDiscoveryService,
TelemetryTarget,
FileFilteringOptions,
ShellTool,
EditTool,
WriteFileTool,
MCPServerConfig,
ConfigParameters,
} from '@qwen-code/qwen-code-core';
import { Settings } from './settings.js';
import * as fs from 'node:fs';
import { homedir } from 'node:os';
import * as path from 'node:path';
import process from 'node:process';
import { hideBin } from 'yargs/helpers';
import yargs from 'yargs/yargs';
import { extensionsCommand } from '../commands/extensions.js';
import { mcpCommand } from '../commands/mcp.js';
import type { Settings } from './settings.js';
import { Extension, annotateActiveExtensions } from './extension.js';
import { getCliVersion } from '../utils/version.js';
import { loadSandboxConfig } from './sandboxConfig.js';
import { resolvePath } from '../utils/resolvePath.js';
import { getCliVersion } from '../utils/version.js';
import type { Extension } from './extension.js';
import { annotateActiveExtensions } from './extension.js';
import { loadSandboxConfig } from './sandboxConfig.js';
import { isWorkspaceTrusted } from './trustedFolders.js';
@@ -48,6 +52,39 @@ const logger = {
error: (...args: any[]) => console.error('[ERROR]', ...args),
};
const VALID_APPROVAL_MODE_VALUES = [
'plan',
'default',
'auto-edit',
'yolo',
] as const;
function formatApprovalModeError(value: string): Error {
return new Error(
`Invalid approval mode: ${value}. Valid values are: ${VALID_APPROVAL_MODE_VALUES.join(
', ',
)}`,
);
}
function parseApprovalModeValue(value: string): ApprovalMode {
const normalized = value.trim().toLowerCase();
switch (normalized) {
case 'plan':
return ApprovalMode.PLAN;
case 'default':
return ApprovalMode.DEFAULT;
case 'yolo':
return ApprovalMode.YOLO;
case 'auto_edit':
case 'autoedit':
case 'auto-edit':
return ApprovalMode.AUTO_EDIT;
default:
throw formatApprovalModeError(value);
}
}
export interface CliArgs {
model: string | undefined;
sandbox: boolean | string | undefined;
@@ -56,9 +93,7 @@ export interface CliArgs {
prompt: string | undefined;
promptInteractive: string | undefined;
allFiles: boolean | undefined;
all_files: boolean | undefined;
showMemoryUsage: boolean | undefined;
show_memory_usage: boolean | undefined;
yolo: boolean | undefined;
approvalMode: string | undefined;
telemetry: boolean | undefined;
@@ -69,6 +104,7 @@ export interface CliArgs {
telemetryLogPrompts: boolean | undefined;
telemetryOutfile: string | undefined;
allowedMcpServerNames: string[] | undefined;
allowedTools: string[] | undefined;
experimentalAcp: boolean | undefined;
extensions: string[] | undefined;
listExtensions: boolean | undefined;
@@ -78,9 +114,11 @@ export interface CliArgs {
proxy: string | undefined;
includeDirectories: string[] | undefined;
tavilyApiKey: string | undefined;
screenReader: boolean | undefined;
vlmSwitchMode: string | undefined;
}
export async function parseArguments(): Promise<CliArgs> {
export async function parseArguments(settings: Settings): Promise<CliArgs> {
const yargsInstance = yargs(hideBin(process.argv))
// Set locale to English for consistent output, especially in tests
.locale('en')
@@ -128,29 +166,11 @@ export async function parseArguments(): Promise<CliArgs> {
description: 'Include ALL files in context?',
default: false,
})
.option('all_files', {
type: 'boolean',
description: 'Include ALL files in context?',
default: false,
})
.deprecateOption(
'all_files',
'Use --all-files instead. We will be removing --all_files in the coming weeks.',
)
.option('show-memory-usage', {
type: 'boolean',
description: 'Show memory usage in status bar',
default: false,
})
.option('show_memory_usage', {
type: 'boolean',
description: 'Show memory usage in status bar',
default: false,
})
.deprecateOption(
'show_memory_usage',
'Use --show-memory-usage instead. We will be removing --show_memory_usage in the coming weeks.',
)
.option('yolo', {
alias: 'y',
type: 'boolean',
@@ -160,9 +180,9 @@ export async function parseArguments(): Promise<CliArgs> {
})
.option('approval-mode', {
type: 'string',
choices: ['default', 'auto_edit', 'yolo'],
choices: ['plan', 'default', 'auto-edit', 'yolo'],
description:
'Set the approval mode: default (prompt for approval), auto_edit (auto-approve edit tools), yolo (auto-approve all tools)',
'Set the approval mode: plan (plan only), default (prompt for approval), auto-edit (auto-approve edit tools), yolo (auto-approve all tools)',
})
.option('telemetry', {
type: 'boolean',
@@ -210,6 +230,11 @@ export async function parseArguments(): Promise<CliArgs> {
string: true,
description: 'Allowed MCP server names',
})
.option('allowed-tools', {
type: 'array',
string: true,
description: 'Tools that are allowed to run without confirmation',
})
.option('extensions', {
alias: 'e',
type: 'array',
@@ -253,7 +278,18 @@ export async function parseArguments(): Promise<CliArgs> {
type: 'string',
description: 'Tavily API key for web search functionality',
})
.option('screen-reader', {
type: 'boolean',
description: 'Enable screen reader mode for accessibility.',
default: false,
})
.option('vlm-switch-mode', {
type: 'string',
choices: ['once', 'session', 'persist'],
description:
'Default behavior when images are detected in input. Values: once (one-time switch), session (switch for entire session), persist (continue with current model). Overrides settings files.',
default: process.env['VLM_SWITCH_MODE'],
})
.check((argv) => {
if (argv.prompt && argv['promptInteractive']) {
throw new Error(
@@ -269,7 +305,13 @@ export async function parseArguments(): Promise<CliArgs> {
}),
)
// Register MCP subcommands
.command(mcpCommand)
.command(mcpCommand);
if (settings?.experimental?.extensionManagement ?? false) {
yargsInstance.command(extensionsCommand);
}
yargsInstance
.version(await getCliVersion()) // This will enable the --version flag based on package.json
.alias('v', 'version')
.help()
@@ -282,7 +324,10 @@ export async function parseArguments(): Promise<CliArgs> {
// Handle case where MCP subcommands are executed - they should exit the process
// and not return to main CLI logic
if (result._.length > 0 && result._[0] === 'mcp') {
if (
result._.length > 0 &&
(result._[0] === 'mcp' || result._[0] === 'extensions')
) {
// MCP commands handle their own execution and process exit
process.exit(0);
}
@@ -329,7 +374,7 @@ export async function loadHierarchicalGeminiMemory(
extensionContextFilePaths,
memoryImportFormat,
fileFilteringOptions,
settings.memoryDiscoveryMaxDirs,
settings.context?.discoveryMaxDirs,
);
}
@@ -346,18 +391,20 @@ export async function loadCliConfig(
(v) => v === 'true' || v === '1',
) ||
false;
const memoryImportFormat = settings.memoryImportFormat || 'tree';
const memoryImportFormat = settings.context?.importFormat || 'tree';
const ideMode = settings.ideMode ?? false;
const ideMode = settings.ide?.enabled ?? false;
const folderTrustFeature = settings.folderTrustFeature ?? false;
const folderTrustSetting = settings.folderTrust ?? true;
const folderTrustFeature =
settings.security?.folderTrust?.featureEnabled ?? false;
const folderTrustSetting = settings.security?.folderTrust?.enabled ?? true;
const folderTrust = folderTrustFeature && folderTrustSetting;
const trustedFolder = isWorkspaceTrusted(settings);
const allExtensions = annotateActiveExtensions(
extensions,
argv.extensions || [],
cwd,
);
const activeExtensions = extensions.filter(
@@ -382,8 +429,8 @@ export async function loadCliConfig(
// TODO(b/343434939): This is a bit of a hack. The contextFileName should ideally be passed
// directly to the Config constructor in core, and have core handle setGeminiMdFilename.
// However, loadHierarchicalGeminiMemory is called *before* createServerConfig.
if (settings.contextFileName) {
setServerGeminiMdFilename(settings.contextFileName);
if (settings.context?.fileName) {
setServerGeminiMdFilename(settings.context.fileName);
} else {
// Reset to default if not provided in settings.
setServerGeminiMdFilename(getCurrentGeminiMdFilename());
@@ -397,17 +444,19 @@ export async function loadCliConfig(
const fileFiltering = {
...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
...settings.fileFiltering,
...settings.context?.fileFiltering,
};
const includeDirectories = (settings.includeDirectories || [])
const includeDirectories = (settings.context?.includeDirectories || [])
.map(resolvePath)
.concat((argv.includeDirectories || []).map(resolvePath));
// Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
cwd,
settings.loadMemoryFromIncludeDirectories ? includeDirectories : [],
settings.context?.loadMemoryFromIncludeDirectories
? includeDirectories
: [],
debugMode,
fileService,
settings,
@@ -422,26 +471,25 @@ export async function loadCliConfig(
// Determine approval mode with backward compatibility
let approvalMode: ApprovalMode;
if (argv.approvalMode) {
// New --approval-mode flag takes precedence
switch (argv.approvalMode) {
case 'yolo':
approvalMode = ApprovalMode.YOLO;
break;
case 'auto_edit':
approvalMode = ApprovalMode.AUTO_EDIT;
break;
case 'default':
approvalMode = ApprovalMode.DEFAULT;
break;
default:
throw new Error(
`Invalid approval mode: ${argv.approvalMode}. Valid values are: yolo, auto_edit, default`,
);
}
approvalMode = parseApprovalModeValue(argv.approvalMode);
} else if (argv.yolo) {
approvalMode = ApprovalMode.YOLO;
} else if (settings.approvalMode) {
approvalMode = parseApprovalModeValue(settings.approvalMode);
} else {
// Fallback to legacy --yolo flag behavior
approvalMode =
argv.yolo || false ? ApprovalMode.YOLO : ApprovalMode.DEFAULT;
approvalMode = ApprovalMode.DEFAULT;
}
// Force approval mode to default if the folder is not trusted.
if (
!trustedFolder &&
approvalMode !== ApprovalMode.DEFAULT &&
approvalMode !== ApprovalMode.PLAN
) {
logger.warn(
`Approval mode overridden to "default" because the current folder is not trusted.`,
);
approvalMode = ApprovalMode.DEFAULT;
}
const interactive =
@@ -450,6 +498,7 @@ export async function loadCliConfig(
const extraExcludes: string[] = [];
if (!interactive && !argv.experimentalAcp) {
switch (approvalMode) {
case ApprovalMode.PLAN:
case ApprovalMode.DEFAULT:
// In default non-interactive mode, all tools that require approval are excluded.
extraExcludes.push(ShellTool.Name, EditTool.Name, WriteFileTool.Name);
@@ -475,16 +524,16 @@ export async function loadCliConfig(
const blockedMcpServers: Array<{ name: string; extensionName: string }> = [];
if (!argv.allowedMcpServerNames) {
if (settings.allowMCPServers) {
if (settings.mcp?.allowed) {
mcpServers = allowedMcpServers(
mcpServers,
settings.allowMCPServers,
settings.mcp.allowed,
blockedMcpServers,
);
}
if (settings.excludeMCPServers) {
const excludedNames = new Set(settings.excludeMCPServers.filter(Boolean));
if (settings.mcp?.excluded) {
const excludedNames = new Set(settings.mcp.excluded.filter(Boolean));
if (excludedNames.size > 0) {
mcpServers = Object.fromEntries(
Object.entries(mcpServers).filter(([key]) => !excludedNames.has(key)),
@@ -504,6 +553,13 @@ export async function loadCliConfig(
const sandboxConfig = await loadSandboxConfig(settings, argv);
const cliVersion = await getCliVersion();
const screenReader =
argv.screenReader !== undefined
? argv.screenReader
: (settings.ui?.accessibility?.screenReader ?? false);
const vlmSwitchMode =
argv.vlmSwitchMode || settings.experimental?.vlmSwitchMode;
return new Config({
sessionId,
embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL,
@@ -511,25 +567,26 @@ export async function loadCliConfig(
targetDir: cwd,
includeDirectories,
loadMemoryFromIncludeDirectories:
settings.loadMemoryFromIncludeDirectories || false,
settings.context?.loadMemoryFromIncludeDirectories || false,
debugMode,
question,
fullContext: argv.allFiles || argv.all_files || false,
coreTools: settings.coreTools || undefined,
fullContext: argv.allFiles || false,
coreTools: settings.tools?.core || undefined,
allowedTools: argv.allowedTools || settings.tools?.allowed || undefined,
excludeTools,
toolDiscoveryCommand: settings.toolDiscoveryCommand,
toolCallCommand: settings.toolCallCommand,
mcpServerCommand: settings.mcpServerCommand,
toolDiscoveryCommand: settings.tools?.discoveryCommand,
toolCallCommand: settings.tools?.callCommand,
mcpServerCommand: settings.mcp?.serverCommand,
mcpServers,
userMemory: memoryContent,
geminiMdFileCount: fileCount,
approvalMode,
showMemoryUsage:
argv.showMemoryUsage ||
argv.show_memory_usage ||
settings.showMemoryUsage ||
false,
accessibility: settings.accessibility,
argv.showMemoryUsage || settings.ui?.showMemoryUsage || false,
accessibility: {
...settings.ui?.accessibility,
screenReader,
},
telemetry: {
enabled: argv.telemetry ?? settings.telemetry?.enabled,
target: (argv.telemetryTarget ??
@@ -546,15 +603,17 @@ export async function loadCliConfig(
logPrompts: argv.telemetryLogPrompts ?? settings.telemetry?.logPrompts,
outfile: argv.telemetryOutfile ?? settings.telemetry?.outfile,
},
usageStatisticsEnabled: settings.usageStatisticsEnabled ?? true,
usageStatisticsEnabled: settings.privacy?.usageStatisticsEnabled ?? true,
// Git-aware file filtering settings
fileFiltering: {
respectGitIgnore: settings.fileFiltering?.respectGitIgnore,
respectGeminiIgnore: settings.fileFiltering?.respectGeminiIgnore,
respectGitIgnore: settings.context?.fileFiltering?.respectGitIgnore,
respectGeminiIgnore: settings.context?.fileFiltering?.respectGeminiIgnore,
enableRecursiveFileSearch:
settings.fileFiltering?.enableRecursiveFileSearch,
settings.context?.fileFiltering?.enableRecursiveFileSearch,
disableFuzzySearch: settings.context?.fileFiltering?.disableFuzzySearch,
},
checkpointing: argv.checkpointing || settings.checkpointing?.enabled,
checkpointing:
argv.checkpointing || settings.general?.checkpointing?.enabled,
proxy:
argv.proxy ||
process.env['HTTPS_PROXY'] ||
@@ -563,18 +622,16 @@ export async function loadCliConfig(
process.env['http_proxy'],
cwd,
fileDiscoveryService: fileService,
bugCommand: settings.bugCommand,
model: argv.model || settings.model || DEFAULT_GEMINI_MODEL,
bugCommand: settings.advanced?.bugCommand,
model: argv.model || settings.model?.name || DEFAULT_GEMINI_MODEL,
extensionContextFilePaths,
maxSessionTurns: settings.maxSessionTurns ?? -1,
sessionTokenLimit: settings.sessionTokenLimit ?? -1,
maxSessionTurns: settings.model?.maxSessionTurns ?? -1,
experimentalZedIntegration: argv.experimentalAcp || false,
listExtensions: argv.listExtensions || false,
extensions: allExtensions,
blockedMcpServers,
noBrowser: !!process.env['NO_BROWSER'],
summarizeToolOutput: settings.summarizeToolOutput,
ideMode,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.enableOpenAILogging
@@ -590,20 +647,26 @@ export async function loadCliConfig(
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
},
]) as ConfigParameters['systemPromptMappings'],
authType: settings.selectedAuthType,
authType: settings.security?.auth?.selectedType,
contentGenerator: settings.contentGenerator,
cliVersion,
tavilyApiKey:
argv.tavilyApiKey ||
settings.tavilyApiKey ||
process.env['TAVILY_API_KEY'],
chatCompression: settings.chatCompression,
summarizeToolOutput: settings.model?.summarizeToolOutput,
ideMode,
chatCompression: settings.model?.chatCompression,
folderTrustFeature,
folderTrust,
interactive,
trustedFolder,
shouldUseNodePtyShell: settings.shouldUseNodePtyShell,
skipNextSpeakerCheck: settings.skipNextSpeakerCheck,
useRipgrep: settings.tools?.useRipgrep,
shouldUseNodePtyShell: settings.tools?.usePty,
skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck,
enablePromptCompletion: settings.general?.enablePromptCompletion ?? false,
skipLoopDetection: settings.skipLoopDetection ?? false,
vlmSwitchMode,
});
}
@@ -665,7 +728,7 @@ function mergeExcludeTools(
extraExcludes?: string[] | undefined,
): string[] {
const allExcludeTools = new Set([
...(settings.excludeTools || []),
...(settings.tools?.exclude || []),
...(extraExcludes || []),
]);
for (const extension of extensions) {

View File

@@ -5,24 +5,52 @@
*/
import { vi } from 'vitest';
import * as fs from 'fs';
import * as os from 'os';
import * as path from 'path';
import * as fs from 'node:fs';
import * as os from 'node:os';
import * as path from 'node:path';
import {
EXTENSIONS_CONFIG_FILENAME,
EXTENSIONS_DIRECTORY_NAME,
INSTALL_METADATA_FILENAME,
annotateActiveExtensions,
disableExtension,
enableExtension,
installExtension,
loadExtension,
loadExtensions,
performWorkspaceExtensionMigration,
uninstallExtension,
updateExtension,
} from './extension.js';
import {
type GeminiCLIExtension,
type MCPServerConfig,
} from '@qwen-code/qwen-code-core';
import { execSync } from 'node:child_process';
import { SettingScope, loadSettings } from './settings.js';
import { type SimpleGit, simpleGit } from 'simple-git';
vi.mock('simple-git', () => ({
simpleGit: vi.fn(),
}));
vi.mock('os', async (importOriginal) => {
const os = await importOriginal<typeof import('os')>();
const os = await importOriginal<typeof os>();
return {
...os,
homedir: vi.fn(),
};
});
vi.mock('child_process', async (importOriginal) => {
const actual = await importOriginal<typeof import('child_process')>();
return {
...actual,
execSync: vi.fn(),
};
});
const EXTENSIONS_DIRECTORY_NAME = path.join('.qwen', 'extensions');
describe('loadExtensions', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
@@ -40,56 +68,7 @@ describe('loadExtensions', () => {
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should include extension path in loaded extension', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const extensionDir = path.join(workspaceExtensionsDir, 'test-extension');
fs.mkdirSync(extensionDir, { recursive: true });
const config = {
name: 'test-extension',
version: '1.0.0',
};
fs.writeFileSync(
path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify(config),
);
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(1);
expect(extensions[0].path).toBe(extensionDir);
expect(extensions[0].config.name).toBe('test-extension');
});
it('should include extension path in loaded extension', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const extensionDir = path.join(workspaceExtensionsDir, 'test-extension');
fs.mkdirSync(extensionDir, { recursive: true });
const config = {
name: 'test-extension',
version: '1.0.0',
};
fs.writeFileSync(
path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify(config),
);
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(1);
expect(extensions[0].path).toBe(extensionDir);
expect(extensions[0].config.name).toBe('test-extension');
vi.restoreAllMocks();
});
it('should include extension path in loaded extension', () => {
@@ -159,26 +138,101 @@ describe('loadExtensions', () => {
path.join(workspaceExtensionsDir, 'ext1', 'my-context-file.md'),
]);
});
it('should filter out disabled extensions', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
createExtension(workspaceExtensionsDir, 'ext1', '1.0.0');
createExtension(workspaceExtensionsDir, 'ext2', '2.0.0');
const settingsDir = path.join(tempWorkspaceDir, '.qwen');
fs.mkdirSync(settingsDir, { recursive: true });
fs.writeFileSync(
path.join(settingsDir, 'settings.json'),
JSON.stringify({ extensions: { disabled: ['ext1'] } }),
);
const extensions = loadExtensions(tempWorkspaceDir);
const activeExtensions = annotateActiveExtensions(
extensions,
[],
tempWorkspaceDir,
).filter((e) => e.isActive);
expect(activeExtensions).toHaveLength(1);
expect(activeExtensions[0].name).toBe('ext2');
});
it('should hydrate variables', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
createExtension(
workspaceExtensionsDir,
'test-extension',
'1.0.0',
false,
undefined,
{
'test-server': {
cwd: '${extensionPath}${/}server',
},
},
);
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(1);
const loadedConfig = extensions[0].config;
const expectedCwd = path.join(
workspaceExtensionsDir,
'test-extension',
'server',
);
expect(loadedConfig.mcpServers?.['test-server'].cwd).toBe(expectedCwd);
});
});
describe('annotateActiveExtensions', () => {
const extensions = [
{ config: { name: 'ext1', version: '1.0.0' }, contextFiles: [] },
{ config: { name: 'ext2', version: '1.0.0' }, contextFiles: [] },
{ config: { name: 'ext3', version: '1.0.0' }, contextFiles: [] },
{
path: '/path/to/ext1',
config: { name: 'ext1', version: '1.0.0' },
contextFiles: [],
},
{
path: '/path/to/ext2',
config: { name: 'ext2', version: '1.0.0' },
contextFiles: [],
},
{
path: '/path/to/ext3',
config: { name: 'ext3', version: '1.0.0' },
contextFiles: [],
},
];
it('should mark all extensions as active if no enabled extensions are provided', () => {
const activeExtensions = annotateActiveExtensions(extensions, []);
const activeExtensions = annotateActiveExtensions(
extensions,
[],
'/path/to/workspace',
);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.every((e) => e.isActive)).toBe(true);
});
it('should mark only the enabled extensions as active', () => {
const activeExtensions = annotateActiveExtensions(extensions, [
'ext1',
'ext3',
]);
const activeExtensions = annotateActiveExtensions(
extensions,
['ext1', 'ext3'],
'/path/to/workspace',
);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe(
true,
@@ -192,13 +246,21 @@ describe('annotateActiveExtensions', () => {
});
it('should mark all extensions as inactive when "none" is provided', () => {
const activeExtensions = annotateActiveExtensions(extensions, ['none']);
const activeExtensions = annotateActiveExtensions(
extensions,
['none'],
'/path/to/workspace',
);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.every((e) => !e.isActive)).toBe(true);
});
it('should handle case-insensitivity', () => {
const activeExtensions = annotateActiveExtensions(extensions, ['EXT1']);
const activeExtensions = annotateActiveExtensions(
extensions,
['EXT1'],
'/path/to/workspace',
);
expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe(
true,
);
@@ -206,24 +268,258 @@ describe('annotateActiveExtensions', () => {
it('should log an error for unknown extensions', () => {
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
annotateActiveExtensions(extensions, ['ext4']);
annotateActiveExtensions(extensions, ['ext4'], '/path/to/workspace');
expect(consoleSpy).toHaveBeenCalledWith('Extension not found: ext4');
consoleSpy.mockRestore();
});
});
describe('installExtension', () => {
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
// Clean up before each test
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
fs.mkdirSync(userExtensionsDir, { recursive: true });
vi.mocked(execSync).mockClear();
});
afterEach(() => {
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should install an extension from a local path', async () => {
const sourceExtDir = createExtension(
tempHomeDir,
'my-local-extension',
'1.0.0',
);
const targetExtDir = path.join(userExtensionsDir, 'my-local-extension');
const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME);
await installExtension({ source: sourceExtDir, type: 'local' });
expect(fs.existsSync(targetExtDir)).toBe(true);
expect(fs.existsSync(metadataPath)).toBe(true);
const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8'));
expect(metadata).toEqual({
source: sourceExtDir,
type: 'local',
});
fs.rmSync(targetExtDir, { recursive: true, force: true });
});
it('should throw an error if the extension already exists', async () => {
const sourceExtDir = createExtension(
tempHomeDir,
'my-local-extension',
'1.0.0',
);
await installExtension({ source: sourceExtDir, type: 'local' });
await expect(
installExtension({ source: sourceExtDir, type: 'local' }),
).rejects.toThrow(
'Extension "my-local-extension" is already installed. Please uninstall it first.',
);
});
it('should throw an error and cleanup if qwen-extension.json is missing', async () => {
const sourceExtDir = path.join(tempHomeDir, 'bad-extension');
fs.mkdirSync(sourceExtDir, { recursive: true });
await expect(
installExtension({ source: sourceExtDir, type: 'local' }),
).rejects.toThrow(
`Invalid extension at ${sourceExtDir}. Please make sure it has a valid qwen-extension.json file.`,
);
const targetExtDir = path.join(userExtensionsDir, 'bad-extension');
expect(fs.existsSync(targetExtDir)).toBe(false);
});
it('should install an extension from a git URL', async () => {
const gitUrl = 'https://github.com/google/qwen-extensions.git';
const extensionName = 'qwen-extensions';
const targetExtDir = path.join(userExtensionsDir, extensionName);
const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME);
const clone = vi.fn().mockImplementation(async (_, destination) => {
fs.mkdirSync(destination, { recursive: true });
fs.writeFileSync(
path.join(destination, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name: extensionName, version: '1.0.0' }),
);
});
const mockedSimpleGit = simpleGit as vi.MockedFunction<typeof simpleGit>;
mockedSimpleGit.mockReturnValue({ clone } as unknown as SimpleGit);
await installExtension({ source: gitUrl, type: 'git' });
expect(fs.existsSync(targetExtDir)).toBe(true);
expect(fs.existsSync(metadataPath)).toBe(true);
const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8'));
expect(metadata).toEqual({
source: gitUrl,
type: 'git',
});
fs.rmSync(targetExtDir, { recursive: true, force: true });
});
});
describe('uninstallExtension', () => {
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
// Clean up before each test
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
fs.mkdirSync(userExtensionsDir, { recursive: true });
vi.mocked(execSync).mockClear();
});
afterEach(() => {
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should uninstall an extension by name', async () => {
const sourceExtDir = createExtension(
userExtensionsDir,
'my-local-extension',
'1.0.0',
);
await uninstallExtension('my-local-extension');
expect(fs.existsSync(sourceExtDir)).toBe(false);
});
it('should uninstall an extension by name and retain existing extensions', async () => {
const sourceExtDir = createExtension(
userExtensionsDir,
'my-local-extension',
'1.0.0',
);
const otherExtDir = createExtension(
userExtensionsDir,
'other-extension',
'1.0.0',
);
await uninstallExtension('my-local-extension');
expect(fs.existsSync(sourceExtDir)).toBe(false);
expect(loadExtensions(tempHomeDir)).toHaveLength(1);
expect(fs.existsSync(otherExtDir)).toBe(true);
});
it('should throw an error if the extension does not exist', async () => {
await expect(uninstallExtension('nonexistent-extension')).rejects.toThrow(
'Extension "nonexistent-extension" not found.',
);
});
});
describe('performWorkspaceExtensionMigration', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
beforeEach(() => {
tempWorkspaceDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-workspace-'),
);
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
});
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
vi.restoreAllMocks();
});
it('should install the extensions in the user directory', async () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const ext1Path = createExtension(workspaceExtensionsDir, 'ext1', '1.0.0');
const ext2Path = createExtension(workspaceExtensionsDir, 'ext2', '1.0.0');
const extensionsToMigrate = [
loadExtension(ext1Path)!,
loadExtension(ext2Path)!,
];
const failed =
await performWorkspaceExtensionMigration(extensionsToMigrate);
expect(failed).toEqual([]);
const userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
const userExt1Path = path.join(userExtensionsDir, 'ext1');
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(2);
const metadataPath = path.join(userExt1Path, INSTALL_METADATA_FILENAME);
expect(fs.existsSync(metadataPath)).toBe(true);
const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8'));
expect(metadata).toEqual({
source: ext1Path,
type: 'local',
});
});
it('should return the names of failed installations', async () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const ext1Path = createExtension(workspaceExtensionsDir, 'ext1', '1.0.0');
const extensions = [
loadExtension(ext1Path)!,
{
path: '/ext/path/1',
config: { name: 'ext2', version: '1.0.0' },
contextFiles: [],
},
];
const failed = await performWorkspaceExtensionMigration(extensions);
expect(failed).toEqual(['ext2']);
});
});
function createExtension(
extensionsDir: string,
name: string,
version: string,
addContextFile = false,
contextFileName?: string,
): void {
mcpServers?: Record<string, MCPServerConfig>,
): string {
const extDir = path.join(extensionsDir, name);
fs.mkdirSync(extDir);
fs.mkdirSync(extDir, { recursive: true });
fs.writeFileSync(
path.join(extDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name, version, contextFileName }),
JSON.stringify({ name, version, contextFileName, mcpServers }),
);
if (addContextFile) {
@@ -233,4 +529,193 @@ function createExtension(
if (contextFileName) {
fs.writeFileSync(path.join(extDir, contextFileName), 'context');
}
return extDir;
}
describe('updateExtension', () => {
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
// Clean up before each test
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
fs.mkdirSync(userExtensionsDir, { recursive: true });
vi.mocked(execSync).mockClear();
});
afterEach(() => {
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should update a git-installed extension', async () => {
// 1. "Install" an extension
const gitUrl = 'https://github.com/google/qwen-extensions.git';
const extensionName = 'qwen-extensions';
const targetExtDir = path.join(userExtensionsDir, extensionName);
const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME);
// Create the "installed" extension directory and files
fs.mkdirSync(targetExtDir, { recursive: true });
fs.writeFileSync(
path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name: extensionName, version: '1.0.0' }),
);
fs.writeFileSync(
metadataPath,
JSON.stringify({ source: gitUrl, type: 'git' }),
);
// 2. Mock the git clone for the update
const clone = vi.fn().mockImplementation(async (_, destination) => {
fs.mkdirSync(destination, { recursive: true });
// This is the "updated" version
fs.writeFileSync(
path.join(destination, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name: extensionName, version: '1.1.0' }),
);
});
const mockedSimpleGit = simpleGit as vi.MockedFunction<typeof simpleGit>;
mockedSimpleGit.mockReturnValue({
clone,
} as unknown as SimpleGit);
// 3. Call updateExtension
const updateInfo = await updateExtension(extensionName);
// 4. Assertions
expect(updateInfo).toEqual({
originalVersion: '1.0.0',
updatedVersion: '1.1.0',
});
// Check that the config file reflects the new version
const updatedConfig = JSON.parse(
fs.readFileSync(
path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME),
'utf-8',
),
);
expect(updatedConfig.version).toBe('1.1.0');
});
});
describe('disableExtension', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
beforeEach(() => {
tempWorkspaceDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-workspace-'),
);
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
vi.spyOn(process, 'cwd').mockReturnValue(tempWorkspaceDir);
});
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should disable an extension at the user scope', () => {
disableExtension('my-extension', SettingScope.User);
const settings = loadSettings(tempWorkspaceDir);
expect(
settings.forScope(SettingScope.User).settings.extensions?.disabled,
).toEqual(['my-extension']);
});
it('should disable an extension at the workspace scope', () => {
disableExtension('my-extension', SettingScope.Workspace);
const settings = loadSettings(tempWorkspaceDir);
expect(
settings.forScope(SettingScope.Workspace).settings.extensions?.disabled,
).toEqual(['my-extension']);
});
it('should handle disabling the same extension twice', () => {
disableExtension('my-extension', SettingScope.User);
disableExtension('my-extension', SettingScope.User);
const settings = loadSettings(tempWorkspaceDir);
expect(
settings.forScope(SettingScope.User).settings.extensions?.disabled,
).toEqual(['my-extension']);
});
it('should throw an error if you request system scope', () => {
expect(() => disableExtension('my-extension', SettingScope.System)).toThrow(
'System and SystemDefaults scopes are not supported.',
);
});
});
describe('enableExtension', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempWorkspaceDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-workspace-'),
);
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
vi.spyOn(process, 'cwd').mockReturnValue(tempWorkspaceDir);
});
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
});
afterAll(() => {
vi.restoreAllMocks();
});
const getActiveExtensions = (): GeminiCLIExtension[] => {
const extensions = loadExtensions(tempWorkspaceDir);
const activeExtensions = annotateActiveExtensions(
extensions,
[],
tempWorkspaceDir,
);
return activeExtensions.filter((e) => e.isActive);
};
it('should enable an extension at the user scope', () => {
createExtension(userExtensionsDir, 'ext1', '1.0.0');
disableExtension('ext1', SettingScope.User);
let activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(0);
enableExtension('ext1', [SettingScope.User]);
activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(1);
expect(activeExtensions[0].name).toBe('ext1');
});
it('should enable an extension at the workspace scope', () => {
createExtension(userExtensionsDir, 'ext1', '1.0.0');
disableExtension('ext1', SettingScope.Workspace);
let activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(0);
enableExtension('ext1', [SettingScope.Workspace]);
activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(1);
expect(activeExtensions[0].name).toBe('ext1');
});
});

View File

@@ -4,19 +4,29 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { MCPServerConfig, GeminiCLIExtension } from '@qwen-code/qwen-code-core';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
import type {
MCPServerConfig,
GeminiCLIExtension,
} from '@qwen-code/qwen-code-core';
import { Storage } from '@qwen-code/qwen-code-core';
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import { simpleGit } from 'simple-git';
import { SettingScope, loadSettings } from '../config/settings.js';
import { getErrorMessage } from '../utils/errors.js';
import { recursivelyHydrateStrings } from './extensions/variables.js';
export const EXTENSIONS_DIRECTORY_NAME = path.join('.qwen', 'extensions');
export const EXTENSIONS_CONFIG_FILENAME = 'qwen-extension.json';
export const EXTENSIONS_CONFIG_FILENAME_OLD = 'gemini-extension.json';
export const INSTALL_METADATA_FILENAME = '.qwen-extension-install.json';
export interface Extension {
path: string;
config: ExtensionConfig;
contextFiles: string[];
installMetadata?: ExtensionInstallMetadata | undefined;
}
export interface ExtensionConfig {
@@ -27,14 +37,101 @@ export interface ExtensionConfig {
excludeTools?: string[];
}
export interface ExtensionInstallMetadata {
source: string;
type: 'git' | 'local';
}
export interface ExtensionUpdateInfo {
originalVersion: string;
updatedVersion: string;
}
export class ExtensionStorage {
private readonly extensionName: string;
constructor(extensionName: string) {
this.extensionName = extensionName;
}
getExtensionDir(): string {
return path.join(
ExtensionStorage.getUserExtensionsDir(),
this.extensionName,
);
}
getConfigPath(): string {
return path.join(this.getExtensionDir(), EXTENSIONS_CONFIG_FILENAME);
}
static getUserExtensionsDir(): string {
const storage = new Storage(os.homedir());
return storage.getExtensionsDir();
}
static async createTmpDir(): Promise<string> {
return await fs.promises.mkdtemp(path.join(os.tmpdir(), 'qwen-extension'));
}
}
export function getWorkspaceExtensions(workspaceDir: string): Extension[] {
return loadExtensionsFromDir(workspaceDir);
}
async function copyExtension(
source: string,
destination: string,
): Promise<void> {
await fs.promises.cp(source, destination, { recursive: true });
}
export async function performWorkspaceExtensionMigration(
extensions: Extension[],
): Promise<string[]> {
const failedInstallNames: string[] = [];
for (const extension of extensions) {
try {
const installMetadata: ExtensionInstallMetadata = {
source: extension.path,
type: 'local',
};
await installExtension(installMetadata);
} catch (_) {
failedInstallNames.push(extension.config.name);
}
}
return failedInstallNames;
}
export function loadExtensions(workspaceDir: string): Extension[] {
const allExtensions = [
...loadExtensionsFromDir(workspaceDir),
...loadExtensionsFromDir(os.homedir()),
];
const settings = loadSettings(workspaceDir).merged;
const disabledExtensions = settings.extensions?.disabled ?? [];
const allExtensions = [...loadUserExtensions()];
if (!settings.experimental?.extensionManagement) {
allExtensions.push(...getWorkspaceExtensions(workspaceDir));
}
const uniqueExtensions = new Map<string, Extension>();
for (const extension of allExtensions) {
if (
!uniqueExtensions.has(extension.config.name) &&
!disabledExtensions.includes(extension.config.name)
) {
uniqueExtensions.set(extension.config.name, extension);
}
}
return Array.from(uniqueExtensions.values());
}
export function loadUserExtensions(): Extension[] {
const userExtensions = loadExtensionsFromDir(os.homedir());
const uniqueExtensions = new Map<string, Extension>();
for (const extension of userExtensions) {
if (!uniqueExtensions.has(extension.config.name)) {
uniqueExtensions.set(extension.config.name, extension);
}
@@ -43,8 +140,9 @@ export function loadExtensions(workspaceDir: string): Extension[] {
return Array.from(uniqueExtensions.values());
}
function loadExtensionsFromDir(dir: string): Extension[] {
const extensionsDir = path.join(dir, EXTENSIONS_DIRECTORY_NAME);
export function loadExtensionsFromDir(dir: string): Extension[] {
const storage = new Storage(dir);
const extensionsDir = storage.getExtensionsDir();
if (!fs.existsSync(extensionsDir)) {
return [];
}
@@ -61,7 +159,7 @@ function loadExtensionsFromDir(dir: string): Extension[] {
return extensions;
}
function loadExtension(extensionDir: string): Extension | null {
export function loadExtension(extensionDir: string): Extension | null {
if (!fs.statSync(extensionDir).isDirectory()) {
console.error(
`Warning: unexpected file ${extensionDir} in extensions directory.`,
@@ -86,7 +184,11 @@ function loadExtension(extensionDir: string): Extension | null {
try {
const configContent = fs.readFileSync(configFilePath, 'utf-8');
const config = JSON.parse(configContent) as ExtensionConfig;
const config = recursivelyHydrateStrings(JSON.parse(configContent), {
extensionPath: extensionDir,
'/': path.sep,
pathSeparator: path.sep,
}) as unknown as ExtensionConfig;
if (!config.name || !config.version) {
console.error(
`Invalid extension config in ${configFilePath}: missing name or version.`,
@@ -102,15 +204,31 @@ function loadExtension(extensionDir: string): Extension | null {
path: extensionDir,
config,
contextFiles,
installMetadata: loadInstallMetadata(extensionDir),
};
} catch (e) {
console.error(
`Warning: error parsing extension config in ${configFilePath}: ${e}`,
`Warning: error parsing extension config in ${configFilePath}: ${getErrorMessage(
e,
)}`,
);
return null;
}
}
function loadInstallMetadata(
extensionDir: string,
): ExtensionInstallMetadata | undefined {
const metadataFilePath = path.join(extensionDir, INSTALL_METADATA_FILENAME);
try {
const configContent = fs.readFileSync(metadataFilePath, 'utf-8');
const metadata = JSON.parse(configContent) as ExtensionInstallMetadata;
return metadata;
} catch (_e) {
return undefined;
}
}
function getContextFileNames(config: ExtensionConfig): string[] {
if (!config.contextFileName) {
return ['QWEN.md'];
@@ -120,17 +238,28 @@ function getContextFileNames(config: ExtensionConfig): string[] {
return config.contextFileName;
}
/**
* Returns an annotated list of extensions. If an extension is listed in enabledExtensionNames, it will be active.
* If enabledExtensionNames is empty, an extension is active unless it is in list of disabled extensions in settings.
* @param extensions The base list of extensions.
* @param enabledExtensionNames The names of explicitly enabled extensions.
* @param workspaceDir The current workspace directory.
*/
export function annotateActiveExtensions(
extensions: Extension[],
enabledExtensionNames: string[],
workspaceDir: string,
): GeminiCLIExtension[] {
const settings = loadSettings(workspaceDir).merged;
const disabledExtensions = settings.extensions?.disabled ?? [];
const annotatedExtensions: GeminiCLIExtension[] = [];
if (enabledExtensionNames.length === 0) {
return extensions.map((extension) => ({
name: extension.config.name,
version: extension.config.version,
isActive: true,
isActive: !disabledExtensions.includes(extension.config.name),
path: extension.path,
}));
}
@@ -175,3 +304,230 @@ export function annotateActiveExtensions(
return annotatedExtensions;
}
/**
* Clones a Git repository to a specified local path.
* @param gitUrl The Git URL to clone.
* @param destination The destination path to clone the repository to.
*/
async function cloneFromGit(
gitUrl: string,
destination: string,
): Promise<void> {
try {
// TODO(chrstnb): Download the archive instead to avoid unnecessary .git info.
await simpleGit().clone(gitUrl, destination, ['--depth', '1']);
} catch (error) {
throw new Error(`Failed to clone Git repository from ${gitUrl}`, {
cause: error,
});
}
}
export async function installExtension(
installMetadata: ExtensionInstallMetadata,
cwd: string = process.cwd(),
): Promise<string> {
const extensionsDir = ExtensionStorage.getUserExtensionsDir();
await fs.promises.mkdir(extensionsDir, { recursive: true });
// Convert relative paths to absolute paths for the metadata file.
if (
installMetadata.type === 'local' &&
!path.isAbsolute(installMetadata.source)
) {
installMetadata.source = path.resolve(cwd, installMetadata.source);
}
let localSourcePath: string;
let tempDir: string | undefined;
if (installMetadata.type === 'git') {
tempDir = await ExtensionStorage.createTmpDir();
await cloneFromGit(installMetadata.source, tempDir);
localSourcePath = tempDir;
} else {
localSourcePath = installMetadata.source;
}
let newExtensionName: string | undefined;
try {
const newExtension = loadExtension(localSourcePath);
if (!newExtension) {
throw new Error(
`Invalid extension at ${installMetadata.source}. Please make sure it has a valid qwen-extension.json file.`,
);
}
// ~/.qwen/extensions/{ExtensionConfig.name}.
newExtensionName = newExtension.config.name;
const extensionStorage = new ExtensionStorage(newExtensionName);
const destinationPath = extensionStorage.getExtensionDir();
const installedExtensions = loadUserExtensions();
if (
installedExtensions.some(
(installed) => installed.config.name === newExtensionName,
)
) {
throw new Error(
`Extension "${newExtensionName}" is already installed. Please uninstall it first.`,
);
}
await copyExtension(localSourcePath, destinationPath);
const metadataString = JSON.stringify(installMetadata, null, 2);
const metadataPath = path.join(destinationPath, INSTALL_METADATA_FILENAME);
await fs.promises.writeFile(metadataPath, metadataString);
} finally {
if (tempDir) {
await fs.promises.rm(tempDir, { recursive: true, force: true });
}
}
return newExtensionName;
}
export async function uninstallExtension(
extensionName: string,
cwd: string = process.cwd(),
): Promise<void> {
const installedExtensions = loadUserExtensions();
if (
!installedExtensions.some(
(installed) => installed.config.name === extensionName,
)
) {
throw new Error(`Extension "${extensionName}" not found.`);
}
removeFromDisabledExtensions(
extensionName,
[SettingScope.User, SettingScope.Workspace],
cwd,
);
const storage = new ExtensionStorage(extensionName);
return await fs.promises.rm(storage.getExtensionDir(), {
recursive: true,
force: true,
});
}
export function toOutputString(extension: Extension): string {
let output = `${extension.config.name} (${extension.config.version})`;
output += `\n Path: ${extension.path}`;
if (extension.installMetadata) {
output += `\n Source: ${extension.installMetadata.source}`;
}
if (extension.contextFiles.length > 0) {
output += `\n Context files:`;
extension.contextFiles.forEach((contextFile) => {
output += `\n ${contextFile}`;
});
}
if (extension.config.mcpServers) {
output += `\n MCP servers:`;
Object.keys(extension.config.mcpServers).forEach((key) => {
output += `\n ${key}`;
});
}
if (extension.config.excludeTools) {
output += `\n Excluded tools:`;
extension.config.excludeTools.forEach((tool) => {
output += `\n ${tool}`;
});
}
return output;
}
export async function updateExtension(
extensionName: string,
cwd: string = process.cwd(),
): Promise<ExtensionUpdateInfo | undefined> {
const installedExtensions = loadUserExtensions();
const extension = installedExtensions.find(
(installed) => installed.config.name === extensionName,
);
if (!extension) {
throw new Error(
`Extension "${extensionName}" not found. Run gemini extensions list to see available extensions.`,
);
}
if (!extension.installMetadata) {
throw new Error(
`Extension cannot be updated because it is missing the .qwen-extension.install.json file. To update manually, uninstall and then reinstall the updated version.`,
);
}
const originalVersion = extension.config.version;
const tempDir = await ExtensionStorage.createTmpDir();
try {
await copyExtension(extension.path, tempDir);
await uninstallExtension(extensionName, cwd);
await installExtension(extension.installMetadata, cwd);
const updatedExtension = loadExtension(extension.path);
if (!updatedExtension) {
throw new Error('Updated extension not found after installation.');
}
const updatedVersion = updatedExtension.config.version;
return {
originalVersion,
updatedVersion,
};
} catch (e) {
console.error(
`Error updating extension, rolling back. ${getErrorMessage(e)}`,
);
await copyExtension(tempDir, extension.path);
throw e;
} finally {
await fs.promises.rm(tempDir, { recursive: true, force: true });
}
}
export function disableExtension(
name: string,
scope: SettingScope,
cwd: string = process.cwd(),
) {
if (scope === SettingScope.System || scope === SettingScope.SystemDefaults) {
throw new Error('System and SystemDefaults scopes are not supported.');
}
const settings = loadSettings(cwd);
const settingsFile = settings.forScope(scope);
const extensionSettings = settingsFile.settings.extensions || {
disabled: [],
};
const disabledExtensions = extensionSettings.disabled || [];
if (!disabledExtensions.includes(name)) {
disabledExtensions.push(name);
extensionSettings.disabled = disabledExtensions;
settings.setValue(scope, 'extensions', extensionSettings);
}
}
export function enableExtension(name: string, scopes: SettingScope[]) {
removeFromDisabledExtensions(name, scopes);
}
/**
* Removes an extension from the list of disabled extensions.
* @param name The name of the extension to remove.
* @param scope The scopes to remove the name from.
*/
function removeFromDisabledExtensions(
name: string,
scopes: SettingScope[],
cwd: string = process.cwd(),
) {
const settings = loadSettings(cwd);
for (const scope of scopes) {
const settingsFile = settings.forScope(scope);
const extensionSettings = settingsFile.settings.extensions || {
disabled: [],
};
const disabledExtensions = extensionSettings.disabled || [];
extensionSettings.disabled = disabledExtensions.filter(
(extension) => extension !== name,
);
settings.setValue(scope, 'extensions', extensionSettings);
}
}

View File

@@ -0,0 +1,30 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export interface VariableDefinition {
type: 'string';
description: string;
default?: string;
required?: boolean;
}
export interface VariableSchema {
[key: string]: VariableDefinition;
}
const PATH_SEPARATOR_DEFINITION = {
type: 'string',
description: 'The path separator.',
} as const;
export const VARIABLE_SCHEMA = {
extensionPath: {
type: 'string',
description: 'The path of the extension in the filesystem.',
},
'/': PATH_SEPARATOR_DEFINITION,
pathSeparator: PATH_SEPARATOR_DEFINITION,
} as const;

View File

@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { expect, describe, it } from 'vitest';
import { hydrateString } from './variables.js';
describe('hydrateString', () => {
it('should replace a single variable', () => {
const context = {
extensionPath: 'path/my-extension',
};
const result = hydrateString('Hello, ${extensionPath}!', context);
expect(result).toBe('Hello, path/my-extension!');
});
});

View File

@@ -0,0 +1,65 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type VariableSchema, VARIABLE_SCHEMA } from './variableSchema.js';
export type JsonObject = { [key: string]: JsonValue };
export type JsonArray = JsonValue[];
export type JsonValue =
| string
| number
| boolean
| null
| JsonObject
| JsonArray;
export type VariableContext = {
[key in keyof typeof VARIABLE_SCHEMA]?: string;
};
export function validateVariables(
variables: VariableContext,
schema: VariableSchema,
) {
for (const key in schema) {
const definition = schema[key];
if (definition.required && !variables[key as keyof VariableContext]) {
throw new Error(`Missing required variable: ${key}`);
}
}
}
export function hydrateString(str: string, context: VariableContext): string {
validateVariables(context, VARIABLE_SCHEMA);
const regex = /\${(.*?)}/g;
return str.replace(regex, (match, key) =>
context[key as keyof VariableContext] == null
? match
: (context[key as keyof VariableContext] as string),
);
}
export function recursivelyHydrateStrings(
obj: JsonValue,
values: VariableContext,
): JsonValue {
if (typeof obj === 'string') {
return hydrateString(obj, values);
}
if (Array.isArray(obj)) {
return obj.map((item) => recursivelyHydrateStrings(item, values));
}
if (typeof obj === 'object' && obj !== null) {
const newObj: JsonObject = {};
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
newObj[key] = recursivelyHydrateStrings(obj[key], values);
}
}
return newObj;
}
return obj;
}

View File

@@ -5,11 +5,8 @@
*/
import { describe, it, expect } from 'vitest';
import {
Command,
KeyBindingConfig,
defaultKeyBindings,
} from './keyBindings.js';
import type { KeyBindingConfig } from './keyBindings.js';
import { Command, defaultKeyBindings } from './keyBindings.js';
describe('keyBindings config', () => {
describe('defaultKeyBindings', () => {

View File

@@ -4,11 +4,12 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { SandboxConfig } from '@qwen-code/qwen-code-core';
import type { SandboxConfig } from '@qwen-code/qwen-code-core';
import { FatalSandboxError } from '@qwen-code/qwen-code-core';
import commandExists from 'command-exists';
import * as os from 'node:os';
import { getPackageJson } from '../utils/package.js';
import { Settings } from './settings.js';
import type { Settings } from './settings.js';
// This is a stripped-down version of the CliArgs interface from config.ts
// to avoid circular dependencies.
@@ -51,21 +52,19 @@ function getSandboxCommand(
if (typeof sandbox === 'string' && sandbox) {
if (!isSandboxCommand(sandbox)) {
console.error(
`ERROR: invalid sandbox command '${sandbox}'. Must be one of ${VALID_SANDBOX_COMMANDS.join(
throw new FatalSandboxError(
`Invalid sandbox command '${sandbox}'. Must be one of ${VALID_SANDBOX_COMMANDS.join(
', ',
)}`,
);
process.exit(1);
}
// confirm that specified command exists
if (commandExists.sync(sandbox)) {
return sandbox;
}
console.error(
`ERROR: missing sandbox command '${sandbox}' (from GEMINI_SANDBOX)`,
throw new FatalSandboxError(
`Missing sandbox command '${sandbox}' (from GEMINI_SANDBOX)`,
);
process.exit(1);
}
// look for seatbelt, docker, or podman, in that order
@@ -80,11 +79,10 @@ function getSandboxCommand(
// throw an error if user requested sandbox but no command was found
if (sandbox === true) {
console.error(
'ERROR: GEMINI_SANDBOX is true but failed to determine command for sandbox; ' +
throw new FatalSandboxError(
'GEMINI_SANDBOX is true but failed to determine command for sandbox; ' +
'install docker or podman or specify command in GEMINI_SANDBOX',
);
process.exit(1);
}
return '';
@@ -94,7 +92,7 @@ export async function loadSandboxConfig(
settings: Settings,
argv: SandboxCliArgs,
): Promise<SandboxConfig | undefined> {
const sandboxOption = argv.sandbox ?? settings.sandbox;
const sandboxOption = argv.sandbox ?? settings.tools?.sandbox;
const command = getSandboxCommand(sandboxOption);
const packageJson = await getPackageJson();

File diff suppressed because it is too large Load Diff

View File

@@ -4,26 +4,81 @@
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs';
import * as path from 'path';
import { homedir, platform } from 'os';
import * as fs from 'node:fs';
import * as path from 'node:path';
import { homedir, platform } from 'node:os';
import * as dotenv from 'dotenv';
import {
GEMINI_CONFIG_DIR as GEMINI_DIR,
getErrorMessage,
Storage,
} from '@qwen-code/qwen-code-core';
import stripJsonComments from 'strip-json-comments';
import { DefaultLight } from '../ui/themes/default-light.js';
import { DefaultDark } from '../ui/themes/default.js';
import { Settings, MemoryImportFormat } from './settingsSchema.js';
import { isWorkspaceTrusted } from './trustedFolders.js';
import type { Settings, MemoryImportFormat } from './settingsSchema.js';
import { mergeWith } from 'lodash-es';
export type { Settings, MemoryImportFormat };
export const SETTINGS_DIRECTORY_NAME = '.qwen';
export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME);
export const USER_SETTINGS_PATH = path.join(USER_SETTINGS_DIR, 'settings.json');
export const USER_SETTINGS_PATH = Storage.getGlobalSettingsPath();
export const USER_SETTINGS_DIR = path.dirname(USER_SETTINGS_PATH);
export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE'];
const MIGRATE_V2_OVERWRITE = false;
const MIGRATION_MAP: Record<string, string> = {
preferredEditor: 'general.preferredEditor',
vimMode: 'general.vimMode',
disableAutoUpdate: 'general.disableAutoUpdate',
disableUpdateNag: 'general.disableUpdateNag',
checkpointing: 'general.checkpointing',
theme: 'ui.theme',
customThemes: 'ui.customThemes',
hideWindowTitle: 'ui.hideWindowTitle',
hideTips: 'ui.hideTips',
hideBanner: 'ui.hideBanner',
hideFooter: 'ui.hideFooter',
showMemoryUsage: 'ui.showMemoryUsage',
showLineNumbers: 'ui.showLineNumbers',
accessibility: 'ui.accessibility',
ideMode: 'ide.enabled',
hasSeenIdeIntegrationNudge: 'ide.hasSeenNudge',
usageStatisticsEnabled: 'privacy.usageStatisticsEnabled',
telemetry: 'telemetry',
model: 'model.name',
maxSessionTurns: 'model.maxSessionTurns',
summarizeToolOutput: 'model.summarizeToolOutput',
chatCompression: 'model.chatCompression',
skipNextSpeakerCheck: 'model.skipNextSpeakerCheck',
contextFileName: 'context.fileName',
memoryImportFormat: 'context.importFormat',
memoryDiscoveryMaxDirs: 'context.discoveryMaxDirs',
includeDirectories: 'context.includeDirectories',
loadMemoryFromIncludeDirectories: 'context.loadFromIncludeDirectories',
fileFiltering: 'context.fileFiltering',
sandbox: 'tools.sandbox',
shouldUseNodePtyShell: 'tools.usePty',
allowedTools: 'tools.allowed',
coreTools: 'tools.core',
excludeTools: 'tools.exclude',
toolDiscoveryCommand: 'tools.discoveryCommand',
toolCallCommand: 'tools.callCommand',
mcpServerCommand: 'mcp.serverCommand',
allowMCPServers: 'mcp.allowed',
excludeMCPServers: 'mcp.excluded',
folderTrustFeature: 'security.folderTrust.featureEnabled',
folderTrust: 'security.folderTrust.enabled',
selectedAuthType: 'security.auth.selectedType',
useExternalAuth: 'security.auth.useExternal',
autoConfigureMaxOldSpaceSize: 'advanced.autoConfigureMemory',
dnsResolutionOrder: 'advanced.dnsResolutionOrder',
excludedProjectEnvVars: 'advanced.excludedEnvVars',
bugCommand: 'advanced.bugCommand',
};
export function getSystemSettingsPath(): string {
if (process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH']) {
return process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH'];
@@ -37,8 +92,14 @@ export function getSystemSettingsPath(): string {
}
}
export function getWorkspaceSettingsPath(workspaceDir: string): string {
return path.join(workspaceDir, SETTINGS_DIRECTORY_NAME, 'settings.json');
export function getSystemDefaultsPath(): string {
if (process.env['QWEN_CODE_SYSTEM_DEFAULTS_PATH']) {
return process.env['QWEN_CODE_SYSTEM_DEFAULTS_PATH'];
}
return path.join(
path.dirname(getSystemSettingsPath()),
'system-defaults.json',
);
}
export type { DnsResolutionOrder } from './settingsSchema.js';
@@ -47,6 +108,7 @@ export enum SettingScope {
User = 'User',
Workspace = 'Workspace',
System = 'System',
SystemDefaults = 'SystemDefaults',
}
export interface CheckpointingSettings {
@@ -59,6 +121,7 @@ export interface SummarizeToolOutputSettings {
export interface AccessibilitySettings {
disableLoadingPhrases?: boolean;
screenReader?: boolean;
}
export interface SettingsError {
@@ -71,38 +134,308 @@ export interface SettingsFile {
path: string;
}
function setNestedProperty(
obj: Record<string, unknown>,
path: string,
value: unknown,
) {
const keys = path.split('.');
const lastKey = keys.pop();
if (!lastKey) return;
let current: Record<string, unknown> = obj;
for (const key of keys) {
if (current[key] === undefined) {
current[key] = {};
}
const next = current[key];
if (typeof next === 'object' && next !== null) {
current = next as Record<string, unknown>;
} else {
// This path is invalid, so we stop.
return;
}
}
current[lastKey] = value;
}
function needsMigration(settings: Record<string, unknown>): boolean {
return !('general' in settings);
}
function migrateSettingsToV2(
flatSettings: Record<string, unknown>,
): Record<string, unknown> | null {
if (!needsMigration(flatSettings)) {
return null;
}
const v2Settings: Record<string, unknown> = {};
const flatKeys = new Set(Object.keys(flatSettings));
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
if (flatKeys.has(oldKey)) {
setNestedProperty(v2Settings, newPath, flatSettings[oldKey]);
flatKeys.delete(oldKey);
}
}
// Preserve mcpServers at the top level
if (flatSettings['mcpServers']) {
v2Settings['mcpServers'] = flatSettings['mcpServers'];
flatKeys.delete('mcpServers');
}
// Carry over any unrecognized keys
for (const remainingKey of flatKeys) {
v2Settings[remainingKey] = flatSettings[remainingKey];
}
return v2Settings;
}
function getNestedProperty(
obj: Record<string, unknown>,
path: string,
): unknown {
const keys = path.split('.');
let current: unknown = obj;
for (const key of keys) {
if (typeof current !== 'object' || current === null || !(key in current)) {
return undefined;
}
current = (current as Record<string, unknown>)[key];
}
return current;
}
const REVERSE_MIGRATION_MAP: Record<string, string> = Object.fromEntries(
Object.entries(MIGRATION_MAP).map(([key, value]) => [value, key]),
);
// Dynamically determine the top-level keys from the V2 settings structure.
const KNOWN_V2_CONTAINERS = new Set(
Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]),
);
export function migrateSettingsToV1(
v2Settings: Record<string, unknown>,
): Record<string, unknown> {
const v1Settings: Record<string, unknown> = {};
const v2Keys = new Set(Object.keys(v2Settings));
for (const [newPath, oldKey] of Object.entries(REVERSE_MIGRATION_MAP)) {
const value = getNestedProperty(v2Settings, newPath);
if (value !== undefined) {
v1Settings[oldKey] = value;
v2Keys.delete(newPath.split('.')[0]);
}
}
// Preserve mcpServers at the top level
if (v2Settings['mcpServers']) {
v1Settings['mcpServers'] = v2Settings['mcpServers'];
v2Keys.delete('mcpServers');
}
// Carry over any unrecognized keys
for (const remainingKey of v2Keys) {
const value = v2Settings[remainingKey];
if (value === undefined) {
continue;
}
// Don't carry over empty objects that were just containers for migrated settings.
if (
KNOWN_V2_CONTAINERS.has(remainingKey) &&
typeof value === 'object' &&
value !== null &&
!Array.isArray(value) &&
Object.keys(value).length === 0
) {
continue;
}
v1Settings[remainingKey] = value;
}
return v1Settings;
}
function mergeSettings(
system: Settings,
systemDefaults: Settings,
user: Settings,
workspace: Settings,
isTrusted: boolean,
): Settings {
// folderTrust is not supported at workspace level.
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { folderTrust, ...workspaceWithoutFolderTrust } = workspace;
const safeWorkspace = isTrusted ? workspace : ({} as Settings);
// folderTrust is not supported at workspace level.
const { security, ...restOfWorkspace } = safeWorkspace;
const safeWorkspaceWithoutFolderTrust = security
? {
...restOfWorkspace,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
security: (({ folderTrust, ...rest }) => rest)(security),
}
: {
...restOfWorkspace,
security: {},
};
// Settings are merged with the following precedence (last one wins for
// single values):
// 1. System Defaults
// 2. User Settings
// 3. Workspace Settings
// 4. System Settings (as overrides)
//
// For properties that are arrays (e.g., includeDirectories), the arrays
// are concatenated. For objects (e.g., customThemes), they are merged.
return {
...systemDefaults,
...user,
...workspaceWithoutFolderTrust,
...safeWorkspaceWithoutFolderTrust,
...system,
customThemes: {
...(user.customThemes || {}),
...(workspace.customThemes || {}),
...(system.customThemes || {}),
general: {
...(systemDefaults.general || {}),
...(user.general || {}),
...(safeWorkspaceWithoutFolderTrust.general || {}),
...(system.general || {}),
},
ui: {
...(systemDefaults.ui || {}),
...(user.ui || {}),
...(safeWorkspaceWithoutFolderTrust.ui || {}),
...(system.ui || {}),
customThemes: {
...(systemDefaults.ui?.customThemes || {}),
...(user.ui?.customThemes || {}),
...(safeWorkspaceWithoutFolderTrust.ui?.customThemes || {}),
...(system.ui?.customThemes || {}),
},
},
ide: {
...(systemDefaults.ide || {}),
...(user.ide || {}),
...(safeWorkspaceWithoutFolderTrust.ide || {}),
...(system.ide || {}),
},
privacy: {
...(systemDefaults.privacy || {}),
...(user.privacy || {}),
...(safeWorkspaceWithoutFolderTrust.privacy || {}),
...(system.privacy || {}),
},
telemetry: {
...(systemDefaults.telemetry || {}),
...(user.telemetry || {}),
...(safeWorkspaceWithoutFolderTrust.telemetry || {}),
...(system.telemetry || {}),
},
security: {
...(systemDefaults.security || {}),
...(user.security || {}),
...(safeWorkspaceWithoutFolderTrust.security || {}),
...(system.security || {}),
},
mcp: {
...(systemDefaults.mcp || {}),
...(user.mcp || {}),
...(safeWorkspaceWithoutFolderTrust.mcp || {}),
...(system.mcp || {}),
},
mcpServers: {
...(systemDefaults.mcpServers || {}),
...(user.mcpServers || {}),
...(workspace.mcpServers || {}),
...(safeWorkspaceWithoutFolderTrust.mcpServers || {}),
...(system.mcpServers || {}),
},
includeDirectories: [
...(system.includeDirectories || []),
...(user.includeDirectories || []),
...(workspace.includeDirectories || []),
],
chatCompression: {
...(system.chatCompression || {}),
...(user.chatCompression || {}),
...(workspace.chatCompression || {}),
tools: {
...(systemDefaults.tools || {}),
...(user.tools || {}),
...(safeWorkspaceWithoutFolderTrust.tools || {}),
...(system.tools || {}),
},
context: {
...(systemDefaults.context || {}),
...(user.context || {}),
...(safeWorkspaceWithoutFolderTrust.context || {}),
...(system.context || {}),
includeDirectories: [
...(systemDefaults.context?.includeDirectories || []),
...(user.context?.includeDirectories || []),
...(safeWorkspaceWithoutFolderTrust.context?.includeDirectories || []),
...(system.context?.includeDirectories || []),
],
},
model: {
...(systemDefaults.model || {}),
...(user.model || {}),
...(safeWorkspaceWithoutFolderTrust.model || {}),
...(system.model || {}),
chatCompression: {
...(systemDefaults.model?.chatCompression || {}),
...(user.model?.chatCompression || {}),
...(safeWorkspaceWithoutFolderTrust.model?.chatCompression || {}),
...(system.model?.chatCompression || {}),
},
},
advanced: {
...(systemDefaults.advanced || {}),
...(user.advanced || {}),
...(safeWorkspaceWithoutFolderTrust.advanced || {}),
...(system.advanced || {}),
excludedEnvVars: [
...new Set([
...(systemDefaults.advanced?.excludedEnvVars || []),
...(user.advanced?.excludedEnvVars || []),
...(safeWorkspaceWithoutFolderTrust.advanced?.excludedEnvVars || []),
...(system.advanced?.excludedEnvVars || []),
]),
],
},
experimental: {
...(systemDefaults.experimental || {}),
...(user.experimental || {}),
...(safeWorkspaceWithoutFolderTrust.experimental || {}),
...(system.experimental || {}),
},
contentGenerator: {
...(systemDefaults.contentGenerator || {}),
...(user.contentGenerator || {}),
...(safeWorkspaceWithoutFolderTrust.contentGenerator || {}),
...(system.contentGenerator || {}),
},
systemPromptMappings: {
...(systemDefaults.systemPromptMappings || {}),
...(user.systemPromptMappings || {}),
...(safeWorkspaceWithoutFolderTrust.systemPromptMappings || {}),
...(system.systemPromptMappings || {}),
},
extensions: {
...(systemDefaults.extensions || {}),
...(user.extensions || {}),
...(safeWorkspaceWithoutFolderTrust.extensions || {}),
...(system.extensions || {}),
disabled: [
...new Set([
...(systemDefaults.extensions?.disabled || []),
...(user.extensions?.disabled || []),
...(safeWorkspaceWithoutFolderTrust.extensions?.disabled || []),
...(system.extensions?.disabled || []),
]),
],
workspacesWithMigrationNudge: [
...new Set([
...(systemDefaults.extensions?.workspacesWithMigrationNudge || []),
...(user.extensions?.workspacesWithMigrationNudge || []),
...(safeWorkspaceWithoutFolderTrust.extensions
?.workspacesWithMigrationNudge || []),
...(system.extensions?.workspacesWithMigrationNudge || []),
]),
],
},
};
}
@@ -110,21 +443,30 @@ function mergeSettings(
export class LoadedSettings {
constructor(
system: SettingsFile,
systemDefaults: SettingsFile,
user: SettingsFile,
workspace: SettingsFile,
errors: SettingsError[],
isTrusted: boolean,
migratedInMemorScopes: Set<SettingScope>,
) {
this.system = system;
this.systemDefaults = systemDefaults;
this.user = user;
this.workspace = workspace;
this.errors = errors;
this.isTrusted = isTrusted;
this.migratedInMemorScopes = migratedInMemorScopes;
this._merged = this.computeMergedSettings();
}
readonly system: SettingsFile;
readonly systemDefaults: SettingsFile;
readonly user: SettingsFile;
readonly workspace: SettingsFile;
readonly errors: SettingsError[];
readonly isTrusted: boolean;
readonly migratedInMemorScopes: Set<SettingScope>;
private _merged: Settings;
@@ -135,8 +477,10 @@ export class LoadedSettings {
private computeMergedSettings(): Settings {
return mergeSettings(
this.system.settings,
this.systemDefaults.settings,
this.user.settings,
this.workspace.settings,
this.isTrusted,
);
}
@@ -148,18 +492,16 @@ export class LoadedSettings {
return this.workspace;
case SettingScope.System:
return this.system;
case SettingScope.SystemDefaults:
return this.systemDefaults;
default:
throw new Error(`Invalid scope: ${scope}`);
}
}
setValue<K extends keyof Settings>(
scope: SettingScope,
key: K,
value: Settings[K],
): void {
setValue(scope: SettingScope, key: string, value: unknown): void {
const settingsFile = this.forScope(scope);
settingsFile.settings[key] = value;
setNestedProperty(settingsFile.settings, key, value);
this._merged = this.computeMergedSettings();
saveSettings(settingsFile);
}
@@ -269,7 +611,9 @@ export function loadEnvironment(settings?: Settings): void {
// If no settings provided, try to load workspace settings for exclusions
let resolvedSettings = settings;
if (!resolvedSettings) {
const workspaceSettingsPath = getWorkspaceSettingsPath(process.cwd());
const workspaceSettingsPath = new Storage(
process.cwd(),
).getWorkspaceSettingsPath();
try {
if (fs.existsSync(workspaceSettingsPath)) {
const workspaceContent = fs.readFileSync(
@@ -294,7 +638,8 @@ export function loadEnvironment(settings?: Settings): void {
const parsedEnv = dotenv.parse(envFileContent);
const excludedVars =
resolvedSettings?.excludedProjectEnvVars || DEFAULT_EXCLUDED_ENV_VARS;
resolvedSettings?.advanced?.excludedEnvVars ||
DEFAULT_EXCLUDED_ENV_VARS;
const isProjectEnvFile = !envFilePath.includes(GEMINI_DIR);
for (const key in parsedEnv) {
@@ -322,10 +667,13 @@ export function loadEnvironment(settings?: Settings): void {
*/
export function loadSettings(workspaceDir: string): LoadedSettings {
let systemSettings: Settings = {};
let systemDefaultSettings: Settings = {};
let userSettings: Settings = {};
let workspaceSettings: Settings = {};
const settingsErrors: SettingsError[] = [];
const systemSettingsPath = getSystemSettingsPath();
const systemDefaultsPath = getSystemDefaultsPath();
const migratedInMemorScopes = new Set<SettingScope>();
// Resolve paths to their canonical representation to handle symlinks
const resolvedWorkspaceDir = path.resolve(workspaceDir);
@@ -342,70 +690,102 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
// We expect homedir to always exist and be resolvable.
const realHomeDir = fs.realpathSync(resolvedHomeDir);
const workspaceSettingsPath = getWorkspaceSettingsPath(workspaceDir);
const workspaceSettingsPath = new Storage(
workspaceDir,
).getWorkspaceSettingsPath();
// Load system settings
try {
if (fs.existsSync(systemSettingsPath)) {
const systemContent = fs.readFileSync(systemSettingsPath, 'utf-8');
systemSettings = JSON.parse(stripJsonComments(systemContent)) as Settings;
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: systemSettingsPath,
});
}
// Load user settings
try {
if (fs.existsSync(USER_SETTINGS_PATH)) {
const userContent = fs.readFileSync(USER_SETTINGS_PATH, 'utf-8');
userSettings = JSON.parse(stripJsonComments(userContent)) as Settings;
// Support legacy theme names
if (userSettings.theme && userSettings.theme === 'VS') {
userSettings.theme = DefaultLight.name;
} else if (userSettings.theme && userSettings.theme === 'VS2015') {
userSettings.theme = DefaultDark.name;
}
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: USER_SETTINGS_PATH,
});
}
if (realWorkspaceDir !== realHomeDir) {
// Load workspace settings
const loadAndMigrate = (filePath: string, scope: SettingScope): Settings => {
try {
if (fs.existsSync(workspaceSettingsPath)) {
const projectContent = fs.readFileSync(workspaceSettingsPath, 'utf-8');
workspaceSettings = JSON.parse(
stripJsonComments(projectContent),
) as Settings;
if (workspaceSettings.theme && workspaceSettings.theme === 'VS') {
workspaceSettings.theme = DefaultLight.name;
} else if (
workspaceSettings.theme &&
workspaceSettings.theme === 'VS2015'
if (fs.existsSync(filePath)) {
const content = fs.readFileSync(filePath, 'utf-8');
const rawSettings: unknown = JSON.parse(stripJsonComments(content));
if (
typeof rawSettings !== 'object' ||
rawSettings === null ||
Array.isArray(rawSettings)
) {
workspaceSettings.theme = DefaultDark.name;
settingsErrors.push({
message: 'Settings file is not a valid JSON object.',
path: filePath,
});
return {};
}
let settingsObject = rawSettings as Record<string, unknown>;
if (needsMigration(settingsObject)) {
const migratedSettings = migrateSettingsToV2(settingsObject);
if (migratedSettings) {
if (MIGRATE_V2_OVERWRITE) {
try {
fs.renameSync(filePath, `${filePath}.orig`);
fs.writeFileSync(
filePath,
JSON.stringify(migratedSettings, null, 2),
'utf-8',
);
} catch (e) {
console.error(
`Error migrating settings file on disk: ${getErrorMessage(
e,
)}`,
);
}
} else {
migratedInMemorScopes.add(scope);
}
settingsObject = migratedSettings;
}
}
return settingsObject as Settings;
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: workspaceSettingsPath,
path: filePath,
});
}
return {};
};
systemSettings = loadAndMigrate(systemSettingsPath, SettingScope.System);
systemDefaultSettings = loadAndMigrate(
systemDefaultsPath,
SettingScope.SystemDefaults,
);
userSettings = loadAndMigrate(USER_SETTINGS_PATH, SettingScope.User);
if (realWorkspaceDir !== realHomeDir) {
workspaceSettings = loadAndMigrate(
workspaceSettingsPath,
SettingScope.Workspace,
);
}
// Support legacy theme names
if (userSettings.ui?.theme === 'VS') {
userSettings.ui.theme = DefaultLight.name;
} else if (userSettings.ui?.theme === 'VS2015') {
userSettings.ui.theme = DefaultDark.name;
}
if (workspaceSettings.ui?.theme === 'VS') {
workspaceSettings.ui.theme = DefaultLight.name;
} else if (workspaceSettings.ui?.theme === 'VS2015') {
workspaceSettings.ui.theme = DefaultDark.name;
}
// For the initial trust check, we can only use user and system settings.
const initialTrustCheckSettings = mergeWith({}, systemSettings, userSettings);
const isTrusted =
isWorkspaceTrusted(initialTrustCheckSettings as Settings) ?? true;
// Create a temporary merged settings object to pass to loadEnvironment.
const tempMergedSettings = mergeSettings(
systemSettings,
systemDefaultSettings,
userSettings,
workspaceSettings,
isTrusted,
);
// loadEnviroment depends on settings so we have to create a temp version of
@@ -423,6 +803,10 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
path: systemSettingsPath,
settings: systemSettings,
},
{
path: systemDefaultsPath,
settings: systemDefaultSettings,
},
{
path: USER_SETTINGS_PATH,
settings: userSettings,
@@ -432,21 +816,10 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
settings: workspaceSettings,
},
settingsErrors,
isTrusted,
migratedInMemorScopes,
);
// Validate chatCompression settings
const chatCompression = loadedSettings.merged.chatCompression;
const threshold = chatCompression?.contextPercentageThreshold;
if (
threshold != null &&
(typeof threshold !== 'number' || threshold < 0 || threshold > 1)
) {
console.warn(
`Invalid value for chatCompression.contextPercentageThreshold: "${threshold}". Please use a value between 0 and 1. Using default compression settings.`,
);
delete loadedSettings.merged.chatCompression;
}
return loadedSettings;
}
@@ -458,9 +831,16 @@ export function saveSettings(settingsFile: SettingsFile): void {
fs.mkdirSync(dirPath, { recursive: true });
}
let settingsToSave = settingsFile.settings;
if (!MIGRATE_V2_OVERWRITE) {
settingsToSave = migrateSettingsToV1(
settingsToSave as Record<string, unknown>,
) as Settings;
}
fs.writeFileSync(
settingsFile.path,
JSON.stringify(settingsFile.settings, null, 2),
JSON.stringify(settingsToSave, null, 2),
'utf-8',
);
} catch (error) {

View File

@@ -5,53 +5,25 @@
*/
import { describe, it, expect } from 'vitest';
import { SETTINGS_SCHEMA, Settings } from './settingsSchema.js';
import type { Settings } from './settingsSchema.js';
import { SETTINGS_SCHEMA } from './settingsSchema.js';
describe('SettingsSchema', () => {
describe('SETTINGS_SCHEMA', () => {
it('should contain all expected top-level settings', () => {
const expectedSettings = [
'theme',
'customThemes',
'showMemoryUsage',
'usageStatisticsEnabled',
'autoConfigureMaxOldSpaceSize',
'preferredEditor',
'maxSessionTurns',
'memoryImportFormat',
'memoryDiscoveryMaxDirs',
'contextFileName',
'vimMode',
'ideMode',
'accessibility',
'checkpointing',
'fileFiltering',
'disableAutoUpdate',
'hideWindowTitle',
'hideTips',
'hideBanner',
'selectedAuthType',
'useExternalAuth',
'sandbox',
'coreTools',
'excludeTools',
'toolDiscoveryCommand',
'toolCallCommand',
'mcpServerCommand',
'mcpServers',
'allowMCPServers',
'excludeMCPServers',
'general',
'ui',
'ide',
'privacy',
'telemetry',
'bugCommand',
'summarizeToolOutput',
'dnsResolutionOrder',
'excludedProjectEnvVars',
'disableUpdateNag',
'includeDirectories',
'loadMemoryFromIncludeDirectories',
'model',
'hasSeenIdeIntegrationNudge',
'folderTrustFeature',
'context',
'tools',
'mcp',
'security',
'advanced',
'enableWelcomeBack',
];
@@ -78,9 +50,16 @@ describe('SettingsSchema', () => {
it('should have correct nested setting structure', () => {
const nestedSettings = [
'accessibility',
'checkpointing',
'fileFiltering',
'general',
'ui',
'ide',
'privacy',
'model',
'context',
'tools',
'mcp',
'security',
'advanced',
];
nestedSettings.forEach((setting) => {
@@ -97,29 +76,36 @@ describe('SettingsSchema', () => {
it('should have accessibility nested properties', () => {
expect(
SETTINGS_SCHEMA.accessibility.properties?.disableLoadingPhrases,
SETTINGS_SCHEMA.ui?.properties?.accessibility?.properties,
).toBeDefined();
expect(
SETTINGS_SCHEMA.accessibility.properties?.disableLoadingPhrases.type,
SETTINGS_SCHEMA.ui?.properties?.accessibility.properties
?.disableLoadingPhrases.type,
).toBe('boolean');
});
it('should have checkpointing nested properties', () => {
expect(SETTINGS_SCHEMA.checkpointing.properties?.enabled).toBeDefined();
expect(SETTINGS_SCHEMA.checkpointing.properties?.enabled.type).toBe(
'boolean',
);
expect(
SETTINGS_SCHEMA.general?.properties?.checkpointing.properties?.enabled,
).toBeDefined();
expect(
SETTINGS_SCHEMA.general?.properties?.checkpointing.properties?.enabled
.type,
).toBe('boolean');
});
it('should have fileFiltering nested properties', () => {
expect(
SETTINGS_SCHEMA.fileFiltering.properties?.respectGitIgnore,
SETTINGS_SCHEMA.context.properties.fileFiltering.properties
?.respectGitIgnore,
).toBeDefined();
expect(
SETTINGS_SCHEMA.fileFiltering.properties?.respectGeminiIgnore,
SETTINGS_SCHEMA.context.properties.fileFiltering.properties
?.respectGeminiIgnore,
).toBeDefined();
expect(
SETTINGS_SCHEMA.fileFiltering.properties?.enableRecursiveFileSearch,
SETTINGS_SCHEMA.context.properties.fileFiltering.properties
?.enableRecursiveFileSearch,
).toBeDefined();
});
@@ -148,11 +134,6 @@ describe('SettingsSchema', () => {
expect(categories.size).toBeGreaterThan(0);
expect(categories).toContain('General');
expect(categories).toContain('UI');
expect(categories).toContain('Mode');
expect(categories).toContain('Updates');
expect(categories).toContain('Accessibility');
expect(categories).toContain('Checkpointing');
expect(categories).toContain('File Filtering');
expect(categories).toContain('Advanced');
});
@@ -181,73 +162,148 @@ describe('SettingsSchema', () => {
it('should have showInDialog property configured', () => {
// Check that user-facing settings are marked for dialog display
expect(SETTINGS_SCHEMA.showMemoryUsage.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.vimMode.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.ideMode.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.disableAutoUpdate.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.hideWindowTitle.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.hideTips.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.hideBanner.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.usageStatisticsEnabled.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.ui.properties.showMemoryUsage.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.general.properties.vimMode.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.ide.properties.enabled.showInDialog).toBe(true);
expect(
SETTINGS_SCHEMA.general.properties.disableAutoUpdate.showInDialog,
).toBe(true);
expect(SETTINGS_SCHEMA.ui.properties.hideWindowTitle.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.ui.properties.hideTips.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.ui.properties.hideBanner.showInDialog).toBe(true);
expect(
SETTINGS_SCHEMA.privacy.properties.usageStatisticsEnabled.showInDialog,
).toBe(false);
// Check that advanced settings are hidden from dialog
expect(SETTINGS_SCHEMA.selectedAuthType.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.coreTools.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.security.properties.auth.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.tools.properties.core.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.mcpServers.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.telemetry.showInDialog).toBe(false);
// Check that some settings are appropriately hidden
expect(SETTINGS_SCHEMA.theme.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.customThemes.showInDialog).toBe(false); // Managed via theme editor
expect(SETTINGS_SCHEMA.checkpointing.showInDialog).toBe(false); // Experimental feature
expect(SETTINGS_SCHEMA.accessibility.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.fileFiltering.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.preferredEditor.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.autoConfigureMaxOldSpaceSize.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.ui.properties.theme.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.ui.properties.customThemes.showInDialog).toBe(
false,
); // Managed via theme editor
expect(
SETTINGS_SCHEMA.general.properties.checkpointing.showInDialog,
).toBe(false); // Experimental feature
expect(SETTINGS_SCHEMA.ui.properties.accessibility.showInDialog).toBe(
false,
); // Changed to false
expect(
SETTINGS_SCHEMA.context.properties.fileFiltering.showInDialog,
).toBe(false); // Changed to false
expect(
SETTINGS_SCHEMA.general.properties.preferredEditor.showInDialog,
).toBe(false); // Changed to false
expect(
SETTINGS_SCHEMA.advanced.properties.autoConfigureMemory.showInDialog,
).toBe(false);
});
it('should infer Settings type correctly', () => {
// This test ensures that the Settings type is properly inferred from the schema
const settings: Settings = {
theme: 'dark',
includeDirectories: ['/path/to/dir'],
loadMemoryFromIncludeDirectories: true,
ui: {
theme: 'dark',
},
context: {
includeDirectories: ['/path/to/dir'],
loadMemoryFromIncludeDirectories: true,
},
};
// TypeScript should not complain about these properties
expect(settings.theme).toBe('dark');
expect(settings.includeDirectories).toEqual(['/path/to/dir']);
expect(settings.loadMemoryFromIncludeDirectories).toBe(true);
expect(settings.ui?.theme).toBe('dark');
expect(settings.context?.includeDirectories).toEqual(['/path/to/dir']);
expect(settings.context?.loadMemoryFromIncludeDirectories).toBe(true);
});
it('should have includeDirectories setting in schema', () => {
expect(SETTINGS_SCHEMA.includeDirectories).toBeDefined();
expect(SETTINGS_SCHEMA.includeDirectories.type).toBe('array');
expect(SETTINGS_SCHEMA.includeDirectories.category).toBe('General');
expect(SETTINGS_SCHEMA.includeDirectories.default).toEqual([]);
expect(
SETTINGS_SCHEMA.context?.properties.includeDirectories,
).toBeDefined();
expect(SETTINGS_SCHEMA.context?.properties.includeDirectories.type).toBe(
'array',
);
expect(
SETTINGS_SCHEMA.context?.properties.includeDirectories.category,
).toBe('Context');
expect(
SETTINGS_SCHEMA.context?.properties.includeDirectories.default,
).toEqual([]);
});
it('should have loadMemoryFromIncludeDirectories setting in schema', () => {
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories).toBeDefined();
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories.type).toBe(
'boolean',
);
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories.category).toBe(
'General',
);
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories.default).toBe(
false,
);
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories,
).toBeDefined();
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories
.type,
).toBe('boolean');
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories
.category,
).toBe('Context');
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories
.default,
).toBe(false);
});
it('should have folderTrustFeature setting in schema', () => {
expect(SETTINGS_SCHEMA.folderTrustFeature).toBeDefined();
expect(SETTINGS_SCHEMA.folderTrustFeature.type).toBe('boolean');
expect(SETTINGS_SCHEMA.folderTrustFeature.category).toBe('General');
expect(SETTINGS_SCHEMA.folderTrustFeature.default).toBe(false);
expect(SETTINGS_SCHEMA.folderTrustFeature.showInDialog).toBe(true);
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled,
).toBeDefined();
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled.type,
).toBe('boolean');
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled
.category,
).toBe('Security');
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled
.default,
).toBe(false);
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled
.showInDialog,
).toBe(true);
});
it('should have debugKeystrokeLogging setting in schema', () => {
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging,
).toBeDefined();
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.type,
).toBe('boolean');
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.category,
).toBe('General');
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.default,
).toBe(false);
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging
.requiresRestart,
).toBe(false);
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.showInDialog,
).toBe(true);
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.description,
).toBe('Enable debug logging of keystrokes to the console.');
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,7 @@
*/
// Mock 'os' first.
import * as osActual from 'os';
import * as osActual from 'node:os';
vi.mock('os', async (importOriginal) => {
const actualOs = await importOriginal<typeof osActual>();
return {
@@ -25,9 +25,9 @@ import {
type Mocked,
type Mock,
} from 'vitest';
import * as fs from 'fs';
import * as fs from 'node:fs';
import stripJsonComments from 'strip-json-comments';
import * as path from 'path';
import * as path from 'node:path';
import {
loadTrustedFolders,
@@ -35,7 +35,7 @@ import {
TrustLevel,
isWorkspaceTrusted,
} from './trustedFolders.js';
import { Settings } from './settings.js';
import type { Settings } from './settings.js';
vi.mock('fs', async (importOriginal) => {
const actualFs = await importOriginal<typeof fs>();
@@ -132,8 +132,12 @@ describe('isWorkspaceTrusted', () => {
let mockCwd: string;
const mockRules: Record<string, TrustLevel> = {};
const mockSettings: Settings = {
folderTrustFeature: true,
folderTrust: true,
security: {
folderTrust: {
featureEnabled: true,
enabled: true,
},
},
};
beforeEach(() => {

View File

@@ -4,11 +4,11 @@
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs';
import * as path from 'path';
import { homedir } from 'os';
import * as fs from 'node:fs';
import * as path from 'node:path';
import { homedir } from 'node:os';
import { getErrorMessage, isWithinRoot } from '@qwen-code/qwen-code-core';
import { Settings } from './settings.js';
import type { Settings } from './settings.js';
import stripJsonComments from 'strip-json-comments';
export const TRUSTED_FOLDERS_FILENAME = 'trustedFolders.json';
@@ -111,8 +111,9 @@ export function saveTrustedFolders(
}
export function isWorkspaceTrusted(settings: Settings): boolean | undefined {
const folderTrustFeature = settings.folderTrustFeature ?? false;
const folderTrustSetting = settings.folderTrust ?? true;
const folderTrustFeature =
settings.security?.folderTrust?.featureEnabled ?? false;
const folderTrustSetting = settings.security?.folderTrust?.enabled ?? true;
const folderTrustEnabled = folderTrustFeature && folderTrustSetting;
if (!folderTrustEnabled) {

View File

@@ -4,19 +4,18 @@
* SPDX-License-Identifier: Apache-2.0
*/
import stripAnsi from 'strip-ansi';
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
main,
setupUnhandledRejectionHandler,
validateDnsResolutionOrder,
startInteractiveUI,
} from './gemini.js';
import {
LoadedSettings,
SettingsFile,
loadSettings,
} from './config/settings.js';
import type { SettingsFile } from './config/settings.js';
import { LoadedSettings, loadSettings } from './config/settings.js';
import { appEvents, AppEvent } from './utils/events.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { FatalConfigError } from '@qwen-code/qwen-code-core';
// Custom error to identify mock process.exit calls
class MockProcessExitError extends Error {
@@ -76,7 +75,6 @@ vi.mock('./utils/sandbox.js', () => ({
}));
describe('gemini.tsx main function', () => {
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
let loadSettingsMock: ReturnType<typeof vi.mocked<typeof loadSettings>>;
let originalEnvGeminiSandbox: string | undefined;
let originalEnvSandbox: string | undefined;
@@ -98,7 +96,6 @@ describe('gemini.tsx main function', () => {
delete process.env['GEMINI_SANDBOX'];
delete process.env['SANDBOX'];
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
initialUnhandledRejectionListeners =
process.listeners('unhandledRejection');
});
@@ -127,7 +124,7 @@ describe('gemini.tsx main function', () => {
vi.restoreAllMocks();
});
it('should call process.exit(1) if settings have errors', async () => {
it('should throw InvalidConfigurationError if settings have errors', async () => {
const settingsError = {
message: 'Test settings error',
path: '/test/settings.json',
@@ -144,37 +141,23 @@ describe('gemini.tsx main function', () => {
path: '/system/settings.json',
settings: {},
};
const systemDefaultsFile: SettingsFile = {
path: '/system/system-defaults.json',
settings: {},
};
const mockLoadedSettings = new LoadedSettings(
systemSettingsFile,
systemDefaultsFile,
userSettingsFile,
workspaceSettingsFile,
[settingsError],
true,
new Set(),
);
loadSettingsMock.mockReturnValue(mockLoadedSettings);
try {
await main();
// If main completes without throwing, the test should fail because process.exit was expected
expect.fail('main function did not exit as expected');
} catch (error) {
expect(error).toBeInstanceOf(MockProcessExitError);
if (error instanceof MockProcessExitError) {
expect(error.code).toBe(1);
}
}
// Verify console.error was called with the error message
expect(consoleErrorSpy).toHaveBeenCalledTimes(2);
expect(stripAnsi(String(consoleErrorSpy.mock.calls[0][0]))).toBe(
'Error in /test/settings.json: Test settings error',
);
expect(stripAnsi(String(consoleErrorSpy.mock.calls[1][0]))).toBe(
'Please fix /test/settings.json and try again.',
);
// Verify process.exit was called.
expect(processExitSpy).toHaveBeenCalledWith(1);
await expect(main()).rejects.toThrow(FatalConfigError);
});
it('should log unhandled promise rejections and open debug console on first error', async () => {
@@ -250,3 +233,100 @@ describe('validateDnsResolutionOrder', () => {
);
});
});
describe('startInteractiveUI', () => {
// Mock dependencies
const mockConfig = {
getProjectRoot: () => '/root',
getScreenReader: () => false,
} as Config;
const mockSettings = {
merged: {
ui: {
hideWindowTitle: false,
},
},
} as LoadedSettings;
const mockStartupWarnings = ['warning1'];
const mockWorkspaceRoot = '/root';
vi.mock('./utils/version.js', () => ({
getCliVersion: vi.fn(() => Promise.resolve('1.0.0')),
}));
vi.mock('./ui/utils/kittyProtocolDetector.js', () => ({
detectAndEnableKittyProtocol: vi.fn(() => Promise.resolve()),
}));
vi.mock('./ui/utils/updateCheck.js', () => ({
checkForUpdates: vi.fn(() => Promise.resolve(null)),
}));
vi.mock('./utils/cleanup.js', () => ({
cleanupCheckpoints: vi.fn(() => Promise.resolve()),
registerCleanup: vi.fn(),
}));
vi.mock('ink', () => ({
render: vi.fn().mockReturnValue({ unmount: vi.fn() }),
}));
beforeEach(() => {
vi.clearAllMocks();
});
it('should render the UI with proper React context and exitOnCtrlC disabled', async () => {
const { render } = await import('ink');
const renderSpy = vi.mocked(render);
await startInteractiveUI(
mockConfig,
mockSettings,
mockStartupWarnings,
mockWorkspaceRoot,
);
// Verify render was called with correct options
expect(renderSpy).toHaveBeenCalledTimes(1);
const [reactElement, options] = renderSpy.mock.calls[0];
// Verify render options
expect(options).toEqual({
exitOnCtrlC: false,
isScreenReaderEnabled: false,
});
// Verify React element structure is valid (but don't deep dive into JSX internals)
expect(reactElement).toBeDefined();
});
it('should perform all startup tasks in correct order', async () => {
const { getCliVersion } = await import('./utils/version.js');
const { detectAndEnableKittyProtocol } = await import(
'./ui/utils/kittyProtocolDetector.js'
);
const { checkForUpdates } = await import('./ui/utils/updateCheck.js');
const { registerCleanup } = await import('./utils/cleanup.js');
await startInteractiveUI(
mockConfig,
mockSettings,
mockStartupWarnings,
mockWorkspaceRoot,
);
// Verify all startup tasks were called
expect(getCliVersion).toHaveBeenCalledTimes(1);
expect(detectAndEnableKittyProtocol).toHaveBeenCalledTimes(1);
expect(registerCleanup).toHaveBeenCalledTimes(1);
// Verify cleanup handler is registered with unmount function
const cleanupFn = vi.mocked(registerCleanup).mock.calls[0][0];
expect(typeof cleanupFn).toBe('function');
// checkForUpdates should be called asynchronously (not waited for)
// We need a small delay to let it execute
await new Promise((resolve) => setTimeout(resolve, 0));
expect(checkForUpdates).toHaveBeenCalledTimes(1);
});
});

View File

@@ -4,49 +4,47 @@
* SPDX-License-Identifier: Apache-2.0
*/
import React from 'react';
import { render } from 'ink';
import { AppWrapper } from './ui/App.js';
import { loadCliConfig, parseArguments } from './config/config.js';
import { readStdin } from './utils/readStdin.js';
import { basename } from 'node:path';
import v8 from 'node:v8';
import os from 'node:os';
import dns from 'node:dns';
import { spawn } from 'node:child_process';
import { start_sandbox } from './utils/sandbox.js';
import type { Config } from '@qwen-code/qwen-code-core';
import {
DnsResolutionOrder,
LoadedSettings,
loadSettings,
SettingScope,
} from './config/settings.js';
import { themeManager } from './ui/themes/theme-manager.js';
import { getStartupWarnings } from './utils/startupWarnings.js';
import { getUserStartupWarnings } from './utils/userStartupWarnings.js';
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
import { runNonInteractive } from './nonInteractiveCli.js';
import { loadExtensions } from './config/extension.js';
import { cleanupCheckpoints, registerCleanup } from './utils/cleanup.js';
import { getCliVersion } from './utils/version.js';
import {
Config,
sessionId,
logUserPrompt,
AuthType,
FatalConfigError,
getOauthClient,
logIdeConnection,
IdeConnectionEvent,
IdeConnectionType,
logIdeConnection,
logUserPrompt,
sessionId,
} from '@qwen-code/qwen-code-core';
import { render } from 'ink';
import { spawn } from 'node:child_process';
import dns from 'node:dns';
import os from 'node:os';
import { basename } from 'node:path';
import v8 from 'node:v8';
import React from 'react';
import { validateAuthMethod } from './config/auth.js';
import { loadCliConfig, parseArguments } from './config/config.js';
import { loadExtensions } from './config/extension.js';
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
import { loadSettings, SettingScope } from './config/settings.js';
import { runNonInteractive } from './nonInteractiveCli.js';
import { AppWrapper } from './ui/App.js';
import { setMaxSizedBoxDebugging } from './ui/components/shared/MaxSizedBox.js';
import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js';
import { SettingsContext } from './ui/contexts/SettingsContext.js';
import { themeManager } from './ui/themes/theme-manager.js';
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
import { detectAndEnableKittyProtocol } from './ui/utils/kittyProtocolDetector.js';
import { checkForUpdates } from './ui/utils/updateCheck.js';
import { cleanupCheckpoints, registerCleanup } from './utils/cleanup.js';
import { AppEvent, appEvents } from './utils/events.js';
import { handleAutoUpdate } from './utils/handleAutoUpdate.js';
import { appEvents, AppEvent } from './utils/events.js';
import { SettingsContext } from './ui/contexts/SettingsContext.js';
import { readStdin } from './utils/readStdin.js';
import { start_sandbox } from './utils/sandbox.js';
import { getStartupWarnings } from './utils/startupWarnings.js';
import { getUserStartupWarnings } from './utils/userStartupWarnings.js';
import { getCliVersion } from './utils/version.js';
import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js';
import { runZedIntegration } from './zed-integration/zedIntegration.js';
export function validateDnsResolutionOrder(
order: string | undefined,
@@ -108,7 +106,6 @@ async function relaunchWithAdditionalArgs(additionalArgs: string[]) {
await new Promise((resolve) => child.on('close', resolve));
process.exit(0);
}
import { runZedIntegration } from './zed-integration/zedIntegration.js';
export function setupUnhandledRejectionHandler() {
let unhandledRejectionOccurred = false;
@@ -132,6 +129,44 @@ ${reason.stack}`
});
}
export async function startInteractiveUI(
config: Config,
settings: LoadedSettings,
startupWarnings: string[],
workspaceRoot: string,
) {
const version = await getCliVersion();
// Detect and enable Kitty keyboard protocol once at startup
await detectAndEnableKittyProtocol();
setWindowTitle(basename(workspaceRoot), settings);
const instance = render(
<React.StrictMode>
<SettingsContext.Provider value={settings}>
<AppWrapper
config={config}
settings={settings}
startupWarnings={startupWarnings}
version={version}
/>
</SettingsContext.Provider>
</React.StrictMode>,
{ exitOnCtrlC: false, isScreenReaderEnabled: config.getScreenReader() },
);
checkForUpdates()
.then((info) => {
handleAutoUpdate(info, settings, config.getProjectRoot());
})
.catch((err) => {
// Silently ignore update check errors.
if (config.getDebugMode()) {
console.error('Update check failed:', err);
}
});
registerCleanup(() => instance.unmount());
}
export async function main() {
setupUnhandledRejectionHandler();
const workspaceRoot = process.cwd();
@@ -139,18 +174,15 @@ export async function main() {
await cleanupCheckpoints();
if (settings.errors.length > 0) {
for (const error of settings.errors) {
let errorMessage = `Error in ${error.path}: ${error.message}`;
if (!process.env['NO_COLOR']) {
errorMessage = `\x1b[31m${errorMessage}\x1b[0m`;
}
console.error(errorMessage);
console.error(`Please fix ${error.path} and try again.`);
}
process.exit(1);
const errorMessages = settings.errors.map(
(error) => `Error in ${error.path}: ${error.message}`,
);
throw new FatalConfigError(
`${errorMessages.join('\n')}\nPlease fix the configuration file(s) and try again.`,
);
}
const argv = await parseArguments();
const argv = await parseArguments(settings.merged);
const extensions = loadExtensions(workspaceRoot);
const config = await loadCliConfig(
settings.merged,
@@ -167,7 +199,7 @@ export async function main() {
registerCleanup(consolePatcher.cleanup);
dns.setDefaultResultOrder(
validateDnsResolutionOrder(settings.merged.dnsResolutionOrder),
validateDnsResolutionOrder(settings.merged.advanced?.dnsResolutionOrder),
);
if (argv.promptInteractive && !process.stdin.isTTY) {
@@ -186,7 +218,7 @@ export async function main() {
}
// Set a default auth type if one isn't set.
if (!settings.merged.selectedAuthType) {
if (!settings.merged.security?.auth?.selectedType) {
if (process.env['CLOUD_SHELL'] === 'true') {
settings.setValue(
SettingScope.User,
@@ -195,6 +227,14 @@ export async function main() {
);
}
}
// Empty key causes issues with the GoogleGenAI package.
if (process.env['GEMINI_API_KEY']?.trim() === '') {
delete process.env['GEMINI_API_KEY'];
}
if (process.env['GOOGLE_API_KEY']?.trim() === '') {
delete process.env['GOOGLE_API_KEY'];
}
setMaxSizedBoxDebugging(config.getDebugMode());
@@ -206,40 +246,72 @@ export async function main() {
}
// Load custom themes from settings
themeManager.loadCustomThemes(settings.merged.customThemes);
themeManager.loadCustomThemes(settings.merged.ui?.customThemes);
if (settings.merged.theme) {
if (!themeManager.setActiveTheme(settings.merged.theme)) {
if (settings.merged.ui?.theme) {
if (!themeManager.setActiveTheme(settings.merged.ui?.theme)) {
// If the theme is not found during initial load, log a warning and continue.
// The useThemeCommand hook in App.tsx will handle opening the dialog.
console.warn(`Warning: Theme "${settings.merged.theme}" not found.`);
console.warn(`Warning: Theme "${settings.merged.ui?.theme}" not found.`);
}
}
// hop into sandbox if we are outside and sandboxing is enabled
if (!process.env['SANDBOX']) {
const memoryArgs = settings.merged.autoConfigureMaxOldSpaceSize
const memoryArgs = settings.merged.advanced?.autoConfigureMemory
? getNodeMemoryArgs(config)
: [];
const sandboxConfig = config.getSandbox();
if (sandboxConfig) {
if (
settings.merged.selectedAuthType &&
!settings.merged.useExternalAuth
settings.merged.security?.auth?.selectedType &&
!settings.merged.security?.auth?.useExternal
) {
// Validate authentication here because the sandbox will interfere with the Oauth2 web redirect.
try {
const err = validateAuthMethod(settings.merged.selectedAuthType);
const err = validateAuthMethod(
settings.merged.security.auth.selectedType,
);
if (err) {
throw new Error(err);
}
await config.refreshAuth(settings.merged.selectedAuthType);
await config.refreshAuth(settings.merged.security.auth.selectedType);
} catch (err) {
console.error('Error authenticating:', err);
process.exit(1);
}
}
await start_sandbox(sandboxConfig, memoryArgs, config);
let stdinData = '';
if (!process.stdin.isTTY) {
stdinData = await readStdin();
}
// This function is a copy of the one from sandbox.ts
// It is moved here to decouple sandbox.ts from the CLI's argument structure.
const injectStdinIntoArgs = (
args: string[],
stdinData?: string,
): string[] => {
const finalArgs = [...args];
if (stdinData) {
const promptIndex = finalArgs.findIndex(
(arg) => arg === '--prompt' || arg === '-p',
);
if (promptIndex > -1 && finalArgs.length > promptIndex + 1) {
// If there's a prompt argument, prepend stdin to it
finalArgs[promptIndex + 1] =
`${stdinData}\n\n${finalArgs[promptIndex + 1]}`;
} else {
// If there's no prompt argument, add stdin as the prompt
finalArgs.push('--prompt', stdinData);
}
}
return finalArgs;
};
const sandboxArgs = injectStdinIntoArgs(process.argv, stdinData);
await start_sandbox(sandboxConfig, memoryArgs, config, sandboxArgs);
process.exit(0);
} else {
// Not in a sandbox and not entering one, so relaunch with additional
@@ -252,11 +324,12 @@ export async function main() {
}
if (
settings.merged.selectedAuthType === AuthType.LOGIN_WITH_GOOGLE &&
settings.merged.security?.auth?.selectedType ===
AuthType.LOGIN_WITH_GOOGLE &&
config.isBrowserLaunchSuppressed()
) {
// Do oauth before app renders to make copying the link possible.
await getOauthClient(settings.merged.selectedAuthType, config);
await getOauthClient(settings.merged.security.auth.selectedType, config);
}
if (config.getExperimentalZedIntegration()) {
@@ -271,36 +344,7 @@ export async function main() {
// Render UI, passing necessary config values. Check that there is no command line question.
if (config.isInteractive()) {
const version = await getCliVersion();
// Detect and enable Kitty keyboard protocol once at startup
await detectAndEnableKittyProtocol();
setWindowTitle(basename(workspaceRoot), settings);
const instance = render(
<React.StrictMode>
<SettingsContext.Provider value={settings}>
<AppWrapper
config={config}
settings={settings}
startupWarnings={startupWarnings}
version={version}
/>
</SettingsContext.Provider>
</React.StrictMode>,
{ exitOnCtrlC: false },
);
checkForUpdates()
.then((info) => {
handleAutoUpdate(info, settings, config.getProjectRoot());
})
.catch((err) => {
// Silently ignore update check errors.
if (config.getDebugMode()) {
console.error('Update check failed:', err);
}
});
registerCleanup(() => instance.unmount());
await startInteractiveUI(config, settings, startupWarnings, workspaceRoot);
return;
}
// If not a TTY, read from stdin
@@ -312,7 +356,9 @@ export async function main() {
}
}
if (!input) {
console.error('No input provided via stdin.');
console.error(
`No input provided via stdin. Input can be provided by piping data into gemini or using the --prompt option.`,
);
process.exit(1);
}
@@ -327,17 +373,21 @@ export async function main() {
});
const nonInteractiveConfig = await validateNonInteractiveAuth(
settings.merged.selectedAuthType,
settings.merged.useExternalAuth,
settings.merged.security?.auth?.selectedType,
settings.merged.security?.auth?.useExternal,
config,
);
if (config.getDebugMode()) {
console.log('Session ID: %s', sessionId);
}
await runNonInteractive(nonInteractiveConfig, input, prompt_id);
process.exit(0);
}
function setWindowTitle(title: string, settings: LoadedSettings) {
if (!settings.merged.hideWindowTitle) {
if (!settings.merged.ui?.hideWindowTitle) {
const windowTitle = (process.env['CLI_TITLE'] || `Qwen - ${title}`).replace(
// eslint-disable-next-line no-control-regex
/[\x00-\x1F\x7F]/g,

View File

@@ -5,19 +5,20 @@
*/
import {
Config,
type Config,
type ToolRegistry,
executeToolCall,
ToolRegistry,
ToolErrorType,
shutdownTelemetry,
GeminiEventType,
ServerGeminiStreamEvent,
type ServerGeminiStreamEvent,
} from '@qwen-code/qwen-code-core';
import { Part } from '@google/genai';
import { type Part } from '@google/genai';
import { runNonInteractive } from './nonInteractiveCli.js';
import { vi } from 'vitest';
// Mock core modules
vi.mock('./ui/hooks/atCommandProcessor.js');
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original =
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
@@ -35,20 +36,16 @@ describe('runNonInteractive', () => {
let mockCoreExecuteToolCall: vi.Mock;
let mockShutdownTelemetry: vi.Mock;
let consoleErrorSpy: vi.SpyInstance;
let processExitSpy: vi.SpyInstance;
let processStdoutSpy: vi.SpyInstance;
let mockGeminiClient: {
sendMessageStream: vi.Mock;
};
beforeEach(() => {
beforeEach(async () => {
mockCoreExecuteToolCall = vi.mocked(executeToolCall);
mockShutdownTelemetry = vi.mocked(shutdownTelemetry);
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
processExitSpy = vi
.spyOn(process, 'exit')
.mockImplementation((() => {}) as (code?: number) => never);
processStdoutSpy = vi
.spyOn(process.stdout, 'write')
.mockImplementation(() => true);
@@ -72,6 +69,14 @@ describe('runNonInteractive', () => {
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
getDebugMode: vi.fn().mockReturnValue(false),
} as unknown as Config;
const { handleAtCommand } = await import(
'./ui/hooks/atCommandProcessor.js'
);
vi.mocked(handleAtCommand).mockImplementation(async ({ query }) => ({
processedQuery: [{ text: query }],
shouldProceed: true,
}));
});
afterEach(() => {
@@ -163,14 +168,16 @@ describe('runNonInteractive', () => {
mockCoreExecuteToolCall.mockResolvedValue({
error: new Error('Execution failed'),
errorType: ToolErrorType.EXECUTION_FAILED,
responseParts: {
functionResponse: {
name: 'errorTool',
response: {
output: 'Error: Execution failed',
responseParts: [
{
functionResponse: {
name: 'errorTool',
response: {
output: 'Error: Execution failed',
},
},
},
},
],
resultDisplay: 'Execution failed',
});
const finalResponse: ServerGeminiStreamEvent[] = [
@@ -189,7 +196,6 @@ describe('runNonInteractive', () => {
expect(consoleErrorSpy).toHaveBeenCalledWith(
'Error executing tool errorTool: Execution failed',
);
expect(processExitSpy).not.toHaveBeenCalled();
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
expect(mockGeminiClient.sendMessageStream).toHaveBeenNthCalledWith(
2,
@@ -215,12 +221,9 @@ describe('runNonInteractive', () => {
throw apiError;
});
await runNonInteractive(mockConfig, 'Initial fail', 'prompt-id-4');
expect(consoleErrorSpy).toHaveBeenCalledWith(
'[API Error: API connection failed]',
);
expect(processExitSpy).toHaveBeenCalledWith(1);
await expect(
runNonInteractive(mockConfig, 'Initial fail', 'prompt-id-4'),
).rejects.toThrow(apiError);
});
it('should not exit if a tool is not found, and should send error back to model', async () => {
@@ -259,7 +262,6 @@ describe('runNonInteractive', () => {
expect(consoleErrorSpy).toHaveBeenCalledWith(
'Error executing tool nonexistentTool: Tool "nonexistentTool" not found in registry.',
);
expect(processExitSpy).not.toHaveBeenCalled();
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
expect(processStdoutSpy).toHaveBeenCalledWith(
"Sorry, I can't find that tool.",
@@ -268,9 +270,54 @@ describe('runNonInteractive', () => {
it('should exit when max session turns are exceeded', async () => {
vi.mocked(mockConfig.getMaxSessionTurns).mockReturnValue(0);
await runNonInteractive(mockConfig, 'Trigger loop', 'prompt-id-6');
expect(consoleErrorSpy).toHaveBeenCalledWith(
'\n Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
await expect(
runNonInteractive(mockConfig, 'Trigger loop', 'prompt-id-6'),
).rejects.toThrow(
'Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
);
});
it('should preprocess @include commands before sending to the model', async () => {
// 1. Mock the imported atCommandProcessor
const { handleAtCommand } = await import(
'./ui/hooks/atCommandProcessor.js'
);
const mockHandleAtCommand = vi.mocked(handleAtCommand);
// 2. Define the raw input and the expected processed output
const rawInput = 'Summarize @file.txt';
const processedParts: Part[] = [
{ text: 'Summarize @file.txt' },
{ text: '\n--- Content from referenced files ---\n' },
{ text: 'This is the content of the file.' },
{ text: '\n--- End of content ---' },
];
// 3. Setup the mock to return the processed parts
mockHandleAtCommand.mockResolvedValue({
processedQuery: processedParts,
shouldProceed: true,
});
// Mock a simple stream response from the Gemini client
const events: ServerGeminiStreamEvent[] = [
{ type: GeminiEventType.Content, value: 'Summary complete.' },
];
mockGeminiClient.sendMessageStream.mockReturnValue(
createStreamFromEvents(events),
);
// 4. Run the non-interactive mode with the raw input
await runNonInteractive(mockConfig, rawInput, 'prompt-id-7');
// 5. Assert that sendMessageStream was called with the PROCESSED parts, not the raw input
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
processedParts,
expect.any(AbortSignal),
'prompt-id-7',
);
// 6. Assert the final output is correct
expect(processStdoutSpy).toHaveBeenCalledWith('Summary complete.');
});
});

View File

@@ -4,18 +4,20 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config, ToolCallRequestInfo } from '@qwen-code/qwen-code-core';
import {
Config,
ToolCallRequestInfo,
executeToolCall,
shutdownTelemetry,
isTelemetrySdkInitialized,
GeminiEventType,
parseAndFormatApiError,
FatalInputError,
FatalTurnLimitedError,
} from '@qwen-code/qwen-code-core';
import { Content, Part, FunctionCall } from '@google/genai';
import type { Content, Part } from '@google/genai';
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
import { handleAtCommand } from './ui/hooks/atCommandProcessor.js';
export async function runNonInteractive(
config: Config,
@@ -40,9 +42,28 @@ export async function runNonInteractive(
const geminiClient = config.getGeminiClient();
const abortController = new AbortController();
const { processedQuery, shouldProceed } = await handleAtCommand({
query: input,
config,
addItem: (_item, _timestamp) => 0,
onDebugMessage: () => {},
messageId: Date.now(),
signal: abortController.signal,
});
if (!shouldProceed || !processedQuery) {
// An error occurred during @include processing (e.g., file not found).
// The error message is already logged by handleAtCommand.
throw new FatalInputError(
'Exiting due to an error processing the @ command.',
);
}
let currentMessages: Content[] = [
{ role: 'user', parts: [{ text: input }] },
{ role: 'user', parts: processedQuery as Part[] },
];
let turnCount = 0;
while (true) {
turnCount++;
@@ -50,12 +71,11 @@ export async function runNonInteractive(
config.getMaxSessionTurns() >= 0 &&
turnCount > config.getMaxSessionTurns()
) {
console.error(
'\n Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
throw new FatalTurnLimitedError(
'Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
);
return;
}
const functionCalls: FunctionCall[] = [];
const toolCallRequests: ToolCallRequestInfo[] = [];
const responseStream = geminiClient.sendMessageStream(
currentMessages[0]?.parts || [],
@@ -72,29 +92,13 @@ export async function runNonInteractive(
if (event.type === GeminiEventType.Content) {
process.stdout.write(event.value);
} else if (event.type === GeminiEventType.ToolCallRequest) {
const toolCallRequest = event.value;
const fc: FunctionCall = {
name: toolCallRequest.name,
args: toolCallRequest.args,
id: toolCallRequest.callId,
};
functionCalls.push(fc);
toolCallRequests.push(event.value);
}
}
if (functionCalls.length > 0) {
if (toolCallRequests.length > 0) {
const toolResponseParts: Part[] = [];
for (const fc of functionCalls) {
const callId = fc.id ?? `${fc.name}-${Date.now()}`;
const requestInfo: ToolCallRequestInfo = {
callId,
name: fc.name as string,
args: (fc.args ?? {}) as Record<string, unknown>,
isClientInitiated: false,
prompt_id,
};
for (const requestInfo of toolCallRequests) {
const toolResponse = await executeToolCall(
config,
requestInfo,
@@ -103,21 +107,12 @@ export async function runNonInteractive(
if (toolResponse.error) {
console.error(
`Error executing tool ${fc.name}: ${toolResponse.resultDisplay || toolResponse.error.message}`,
`Error executing tool ${requestInfo.name}: ${toolResponse.resultDisplay || toolResponse.error.message}`,
);
}
if (toolResponse.responseParts) {
const parts = Array.isArray(toolResponse.responseParts)
? toolResponse.responseParts
: [toolResponse.responseParts];
for (const part of parts) {
if (typeof part === 'string') {
toolResponseParts.push({ text: part });
} else if (part) {
toolResponseParts.push(part);
}
}
toolResponseParts.push(...toolResponse.responseParts);
}
}
currentMessages = [{ role: 'user', parts: toolResponseParts }];
@@ -133,7 +128,7 @@ export async function runNonInteractive(
config.getContentGeneratorConfig()?.authType,
),
);
process.exit(1);
throw error;
} finally {
consolePatcher.cleanup();
if (isTelemetrySdkInitialized()) {

View File

@@ -15,6 +15,14 @@ vi.mock('../ui/commands/aboutCommand.js', async () => {
};
});
vi.mock('../ui/commands/approvalModeCommand.js', () => ({
approvalModeCommand: {
name: 'approval-mode',
description: 'Approval mode command',
kind: 'built-in',
},
}));
vi.mock('../ui/commands/ideCommand.js', () => ({ ideCommand: vi.fn() }));
vi.mock('../ui/commands/restoreCommand.js', () => ({
restoreCommand: vi.fn(),
@@ -22,7 +30,7 @@ vi.mock('../ui/commands/restoreCommand.js', () => ({
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { BuiltinCommandLoader } from './BuiltinCommandLoader.js';
import { Config } from '@qwen-code/qwen-code-core';
import type { Config } from '@qwen-code/qwen-code-core';
import { CommandKind } from '../ui/commands/types.js';
import { ideCommand } from '../ui/commands/ideCommand.js';
@@ -56,6 +64,13 @@ vi.mock('../ui/commands/mcpCommand.js', () => ({
kind: 'BUILT_IN',
},
}));
vi.mock('../ui/commands/modelCommand.js', () => ({
modelCommand: {
name: 'model',
description: 'Model command',
kind: 'BUILT_IN',
},
}));
describe('BuiltinCommandLoader', () => {
let mockConfig: Config;
@@ -121,10 +136,17 @@ describe('BuiltinCommandLoader', () => {
expect(aboutCmd).toBeDefined();
expect(aboutCmd?.kind).toBe(CommandKind.BUILT_IN);
const approvalModeCmd = commands.find((c) => c.name === 'approval-mode');
expect(approvalModeCmd).toBeDefined();
expect(approvalModeCmd?.kind).toBe(CommandKind.BUILT_IN);
const ideCmd = commands.find((c) => c.name === 'ide');
expect(ideCmd).toBeDefined();
const mcpCmd = commands.find((c) => c.name === 'mcp');
expect(mcpCmd).toBeDefined();
const modelCmd = commands.find((c) => c.name === 'model');
expect(modelCmd).toBeDefined();
});
});

View File

@@ -4,10 +4,12 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { ICommandLoader } from './types.js';
import { SlashCommand } from '../ui/commands/types.js';
import { Config } from '@qwen-code/qwen-code-core';
import type { ICommandLoader } from './types.js';
import type { SlashCommand } from '../ui/commands/types.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { aboutCommand } from '../ui/commands/aboutCommand.js';
import { agentsCommand } from '../ui/commands/agentsCommand.js';
import { approvalModeCommand } from '../ui/commands/approvalModeCommand.js';
import { authCommand } from '../ui/commands/authCommand.js';
import { bugCommand } from '../ui/commands/bugCommand.js';
import { chatCommand } from '../ui/commands/chatCommand.js';
@@ -24,18 +26,18 @@ import { ideCommand } from '../ui/commands/ideCommand.js';
import { initCommand } from '../ui/commands/initCommand.js';
import { mcpCommand } from '../ui/commands/mcpCommand.js';
import { memoryCommand } from '../ui/commands/memoryCommand.js';
import { modelCommand } from '../ui/commands/modelCommand.js';
import { privacyCommand } from '../ui/commands/privacyCommand.js';
import { quitCommand, quitConfirmCommand } from '../ui/commands/quitCommand.js';
import { restoreCommand } from '../ui/commands/restoreCommand.js';
import { settingsCommand } from '../ui/commands/settingsCommand.js';
import { statsCommand } from '../ui/commands/statsCommand.js';
import { summaryCommand } from '../ui/commands/summaryCommand.js';
import { terminalSetupCommand } from '../ui/commands/terminalSetupCommand.js';
import { themeCommand } from '../ui/commands/themeCommand.js';
import { toolsCommand } from '../ui/commands/toolsCommand.js';
import { settingsCommand } from '../ui/commands/settingsCommand.js';
import { vimCommand } from '../ui/commands/vimCommand.js';
import { setupGithubCommand } from '../ui/commands/setupGithubCommand.js';
import { terminalSetupCommand } from '../ui/commands/terminalSetupCommand.js';
import { agentsCommand } from '../ui/commands/agentsCommand.js';
/**
* Loads the core, hard-coded slash commands that are an integral part
@@ -55,6 +57,7 @@ export class BuiltinCommandLoader implements ICommandLoader {
const allDefinitions: Array<SlashCommand | null> = [
aboutCommand,
agentsCommand,
approvalModeCommand,
authCommand,
bugCommand,
chatCommand,
@@ -71,6 +74,7 @@ export class BuiltinCommandLoader implements ICommandLoader {
initCommand,
mcpCommand,
memoryCommand,
modelCommand,
privacyCommand,
quitCommand,
quitConfirmCommand,

View File

@@ -4,8 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { SlashCommand } from '../ui/commands/types.js';
import { ICommandLoader } from './types.js';
import type { SlashCommand } from '../ui/commands/types.js';
import type { ICommandLoader } from './types.js';
/**
* Orchestrates the discovery and loading of all slash commands for the CLI.

View File

@@ -5,11 +5,8 @@
*/
import * as path from 'node:path';
import {
Config,
getProjectCommandsDir,
getUserCommandsDir,
} from '@qwen-code/qwen-code-core';
import type { Config } from '@qwen-code/qwen-code-core';
import { Storage } from '@qwen-code/qwen-code-core';
import mock from 'mock-fs';
import { FileCommandLoader } from './FileCommandLoader.js';
import { assert, vi } from 'vitest';
@@ -17,15 +14,23 @@ import { createMockCommandContext } from '../test-utils/mockCommandContext.js';
import {
SHELL_INJECTION_TRIGGER,
SHORTHAND_ARGS_PLACEHOLDER,
type PromptPipelineContent,
} from './prompt-processors/types.js';
import {
ConfirmationRequiredError,
ShellProcessor,
} from './prompt-processors/shellProcessor.js';
import { DefaultArgumentProcessor } from './prompt-processors/argumentProcessor.js';
import { CommandContext } from '../ui/commands/types.js';
import type { CommandContext } from '../ui/commands/types.js';
import { AtFileProcessor } from './prompt-processors/atFileProcessor.js';
const mockShellProcess = vi.hoisted(() => vi.fn());
const mockAtFileProcess = vi.hoisted(() => vi.fn());
vi.mock('./prompt-processors/atFileProcessor.js', () => ({
AtFileProcessor: vi.fn().mockImplementation(() => ({
process: mockAtFileProcess,
})),
}));
vi.mock('./prompt-processors/shellProcessor.js', () => ({
ShellProcessor: vi.fn().mockImplementation(() => ({
process: mockShellProcess,
@@ -57,6 +62,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
return {
...original,
Storage: original.Storage,
isCommandAllowed: vi.fn(),
ShellExecutionService: {
execute: vi.fn(),
@@ -70,15 +76,28 @@ describe('FileCommandLoader', () => {
beforeEach(() => {
vi.clearAllMocks();
mockShellProcess.mockImplementation(
(prompt: string, context: CommandContext) => {
(prompt: PromptPipelineContent, context: CommandContext) => {
const userArgsRaw = context?.invocation?.args || '';
const processedPrompt = prompt.replaceAll(
// This is a simplified mock. A real implementation would need to iterate
// through all parts and process only the text parts.
const firstTextPart = prompt.find(
(p) => typeof p === 'string' || 'text' in p,
);
let textContent = '';
if (typeof firstTextPart === 'string') {
textContent = firstTextPart;
} else if (firstTextPart && 'text' in firstTextPart) {
textContent = firstTextPart.text ?? '';
}
const processedText = textContent.replaceAll(
SHORTHAND_ARGS_PLACEHOLDER,
userArgsRaw,
);
return Promise.resolve(processedPrompt);
return Promise.resolve([{ text: processedText }]);
},
);
mockAtFileProcess.mockImplementation(async (prompt: string) => prompt);
});
afterEach(() => {
@@ -86,7 +105,7 @@ describe('FileCommandLoader', () => {
});
it('loads a single command from a file', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "This is a test prompt"',
@@ -112,7 +131,7 @@ describe('FileCommandLoader', () => {
'',
);
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('This is a test prompt');
expect(result.content).toEqual([{ text: 'This is a test prompt' }]);
} else {
assert.fail('Incorrect action type');
}
@@ -127,7 +146,7 @@ describe('FileCommandLoader', () => {
itif(process.platform !== 'win32')(
'loads commands from a symlinked directory',
async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
const realCommandsDir = '/real/commands';
mock({
[realCommandsDir]: {
@@ -152,7 +171,7 @@ describe('FileCommandLoader', () => {
itif(process.platform !== 'win32')(
'loads commands from a symlinked subdirectory',
async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
const realNamespacedDir = '/real/namespaced-commands';
mock({
[userCommandsDir]: {
@@ -176,7 +195,7 @@ describe('FileCommandLoader', () => {
);
it('loads multiple commands', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test1.toml': 'prompt = "Prompt 1"',
@@ -191,7 +210,7 @@ describe('FileCommandLoader', () => {
});
it('creates deeply nested namespaces correctly', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
@@ -213,7 +232,7 @@ describe('FileCommandLoader', () => {
});
it('creates namespaces from nested directories', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
git: {
@@ -232,8 +251,10 @@ describe('FileCommandLoader', () => {
});
it('returns both user and project commands in order', async () => {
const userCommandsDir = getUserCommandsDir();
const projectCommandsDir = getProjectCommandsDir(process.cwd());
const userCommandsDir = Storage.getUserCommandsDir();
const projectCommandsDir = new Storage(
process.cwd(),
).getProjectCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "User prompt"',
@@ -262,7 +283,7 @@ describe('FileCommandLoader', () => {
'',
);
if (userResult?.type === 'submit_prompt') {
expect(userResult.content).toBe('User prompt');
expect(userResult.content).toEqual([{ text: 'User prompt' }]);
} else {
assert.fail('Incorrect action type for user command');
}
@@ -277,14 +298,14 @@ describe('FileCommandLoader', () => {
'',
);
if (projectResult?.type === 'submit_prompt') {
expect(projectResult.content).toBe('Project prompt');
expect(projectResult.content).toEqual([{ text: 'Project prompt' }]);
} else {
assert.fail('Incorrect action type for project command');
}
});
it('ignores files with TOML syntax errors', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'invalid.toml': 'this is not valid toml',
@@ -300,7 +321,7 @@ describe('FileCommandLoader', () => {
});
it('ignores files that are semantically invalid (missing prompt)', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'no_prompt.toml': 'description = "This file is missing a prompt"',
@@ -316,7 +337,7 @@ describe('FileCommandLoader', () => {
});
it('handles filename edge cases correctly', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.v1.toml': 'prompt = "Test prompt"',
@@ -338,7 +359,7 @@ describe('FileCommandLoader', () => {
});
it('uses a default description if not provided', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "Test prompt"',
@@ -353,7 +374,7 @@ describe('FileCommandLoader', () => {
});
it('uses the provided description', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "Test prompt"\ndescription = "My test command"',
@@ -368,7 +389,7 @@ describe('FileCommandLoader', () => {
});
it('should sanitize colons in filenames to prevent namespace conflicts', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'legacy:command.toml': 'prompt = "This is a legacy command"',
@@ -388,7 +409,7 @@ describe('FileCommandLoader', () => {
describe('Processor Instantiation Logic', () => {
it('instantiates only DefaultArgumentProcessor if no {{args}} or !{} are present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'simple.toml': `prompt = "Just a regular prompt"`,
@@ -403,7 +424,7 @@ describe('FileCommandLoader', () => {
});
it('instantiates only ShellProcessor if {{args}} is present (but not !{})', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'args.toml': `prompt = "Prompt with {{args}}"`,
@@ -418,7 +439,7 @@ describe('FileCommandLoader', () => {
});
it('instantiates ShellProcessor and DefaultArgumentProcessor if !{} is present (but not {{args}})', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Prompt with !{cmd}"`,
@@ -433,7 +454,7 @@ describe('FileCommandLoader', () => {
});
it('instantiates only ShellProcessor if both {{args}} and !{} are present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'both.toml': `prompt = "Prompt with {{args}} and !{cmd}"`,
@@ -446,15 +467,65 @@ describe('FileCommandLoader', () => {
expect(ShellProcessor).toHaveBeenCalledTimes(1);
expect(DefaultArgumentProcessor).not.toHaveBeenCalled();
});
it('instantiates AtFileProcessor and DefaultArgumentProcessor if @{} is present', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'at-file.toml': `prompt = "Context: @{./my-file.txt}"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(AtFileProcessor).toHaveBeenCalledTimes(1);
expect(ShellProcessor).not.toHaveBeenCalled();
expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1);
});
it('instantiates ShellProcessor and AtFileProcessor if !{} and @{} are present', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell-and-at.toml': `prompt = "Run !{cmd} with @{file.txt}"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(ShellProcessor).toHaveBeenCalledTimes(1);
expect(AtFileProcessor).toHaveBeenCalledTimes(1);
expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1); // because no {{args}}
});
it('instantiates only ShellProcessor and AtFileProcessor if {{args}} and @{} are present', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'args-and-at.toml': `prompt = "Run {{args}} with @{file.txt}"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(ShellProcessor).toHaveBeenCalledTimes(1);
expect(AtFileProcessor).toHaveBeenCalledTimes(1);
expect(DefaultArgumentProcessor).not.toHaveBeenCalled();
});
});
describe('Extension Command Loading', () => {
it('loads commands from active extensions', async () => {
const userCommandsDir = getUserCommandsDir();
const projectCommandsDir = getProjectCommandsDir(process.cwd());
const userCommandsDir = Storage.getUserCommandsDir();
const projectCommandsDir = new Storage(
process.cwd(),
).getProjectCommandsDir();
const extensionDir = path.join(
process.cwd(),
'.gemini/extensions/test-ext',
'.qwen/extensions/test-ext',
);
mock({
@@ -465,7 +536,7 @@ describe('FileCommandLoader', () => {
'project.toml': 'prompt = "Project command"',
},
[extensionDir]: {
'gemini-extension.json': JSON.stringify({
'qwen-extension.json': JSON.stringify({
name: 'test-ext',
version: '1.0.0',
}),
@@ -499,16 +570,18 @@ describe('FileCommandLoader', () => {
});
it('extension commands have extensionName metadata for conflict resolution', async () => {
const userCommandsDir = getUserCommandsDir();
const projectCommandsDir = getProjectCommandsDir(process.cwd());
const userCommandsDir = Storage.getUserCommandsDir();
const projectCommandsDir = new Storage(
process.cwd(),
).getProjectCommandsDir();
const extensionDir = path.join(
process.cwd(),
'.gemini/extensions/test-ext',
'.qwen/extensions/test-ext',
);
mock({
[extensionDir]: {
'gemini-extension.json': JSON.stringify({
'qwen-extension.json': JSON.stringify({
name: 'test-ext',
version: '1.0.0',
}),
@@ -555,7 +628,7 @@ describe('FileCommandLoader', () => {
);
expect(result0?.type).toBe('submit_prompt');
if (result0?.type === 'submit_prompt') {
expect(result0.content).toBe('User deploy command');
expect(result0.content).toEqual([{ text: 'User deploy command' }]);
}
expect(commands[1].name).toBe('deploy');
@@ -572,7 +645,7 @@ describe('FileCommandLoader', () => {
);
expect(result1?.type).toBe('submit_prompt');
if (result1?.type === 'submit_prompt') {
expect(result1.content).toBe('Project deploy command');
expect(result1.content).toEqual([{ text: 'Project deploy command' }]);
}
expect(commands[2].name).toBe('deploy');
@@ -590,23 +663,23 @@ describe('FileCommandLoader', () => {
);
expect(result2?.type).toBe('submit_prompt');
if (result2?.type === 'submit_prompt') {
expect(result2.content).toBe('Extension deploy command');
expect(result2.content).toEqual([{ text: 'Extension deploy command' }]);
}
});
it('only loads commands from active extensions', async () => {
const extensionDir1 = path.join(
process.cwd(),
'.gemini/extensions/active-ext',
'.qwen/extensions/active-ext',
);
const extensionDir2 = path.join(
process.cwd(),
'.gemini/extensions/inactive-ext',
'.qwen/extensions/inactive-ext',
);
mock({
[extensionDir1]: {
'gemini-extension.json': JSON.stringify({
'qwen-extension.json': JSON.stringify({
name: 'active-ext',
version: '1.0.0',
}),
@@ -615,7 +688,7 @@ describe('FileCommandLoader', () => {
},
},
[extensionDir2]: {
'gemini-extension.json': JSON.stringify({
'qwen-extension.json': JSON.stringify({
name: 'inactive-ext',
version: '1.0.0',
}),
@@ -654,12 +727,12 @@ describe('FileCommandLoader', () => {
it('handles missing extension commands directory gracefully', async () => {
const extensionDir = path.join(
process.cwd(),
'.gemini/extensions/no-commands',
'.qwen/extensions/no-commands',
);
mock({
[extensionDir]: {
'gemini-extension.json': JSON.stringify({
'qwen-extension.json': JSON.stringify({
name: 'no-commands',
version: '1.0.0',
}),
@@ -684,11 +757,11 @@ describe('FileCommandLoader', () => {
});
it('handles nested command structure in extensions', async () => {
const extensionDir = path.join(process.cwd(), '.gemini/extensions/a');
const extensionDir = path.join(process.cwd(), '.qwen/extensions/a');
mock({
[extensionDir]: {
'gemini-extension.json': JSON.stringify({
'qwen-extension.json': JSON.stringify({
name: 'a',
version: '1.0.0',
}),
@@ -733,7 +806,9 @@ describe('FileCommandLoader', () => {
'',
);
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('Nested command from extension a');
expect(result.content).toEqual([
{ text: 'Nested command from extension a' },
]);
} else {
assert.fail('Incorrect action type');
}
@@ -742,7 +817,7 @@ describe('FileCommandLoader', () => {
describe('Argument Handling Integration (via ShellProcessor)', () => {
it('correctly processes a command with {{args}}', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shorthand.toml':
@@ -767,14 +842,16 @@ describe('FileCommandLoader', () => {
);
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('The user wants to: do something cool');
expect(result.content).toEqual([
{ text: 'The user wants to: do something cool' },
]);
}
});
});
describe('Default Argument Processor Integration', () => {
it('correctly processes a command without {{args}}', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'model_led.toml':
@@ -801,14 +878,14 @@ describe('FileCommandLoader', () => {
if (result?.type === 'submit_prompt') {
const expectedContent =
'This is the instruction.\n\n/model_led 1.2.0 added "a feature"';
expect(result.content).toBe(expectedContent);
expect(result.content).toEqual([{ text: expectedContent }]);
}
});
});
describe('Shell Processor Integration', () => {
it('instantiates ShellProcessor if {{args}} is present (even without shell trigger)', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'args_only.toml': `prompt = "Hello {{args}}"`,
@@ -821,7 +898,7 @@ describe('FileCommandLoader', () => {
expect(ShellProcessor).toHaveBeenCalledWith('args_only');
});
it('instantiates ShellProcessor if the trigger is present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run this: ${SHELL_INJECTION_TRIGGER}echo hello}"`,
@@ -835,7 +912,7 @@ describe('FileCommandLoader', () => {
});
it('does not instantiate ShellProcessor if no triggers ({{args}} or !{}) are present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'regular.toml': `prompt = "Just a regular prompt"`,
@@ -849,13 +926,13 @@ describe('FileCommandLoader', () => {
});
it('returns a "submit_prompt" action if shell processing succeeds', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run !{echo 'hello'}"`,
},
});
mockShellProcess.mockResolvedValue('Run hello');
mockShellProcess.mockResolvedValue([{ text: 'Run hello' }]);
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
@@ -871,12 +948,12 @@ describe('FileCommandLoader', () => {
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('Run hello');
expect(result.content).toEqual([{ text: 'Run hello' }]);
}
});
it('returns a "confirm_shell_commands" action if shell processing requires it', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
const rawInvocation = '/shell rm -rf /';
mock({
[userCommandsDir]: {
@@ -910,7 +987,7 @@ describe('FileCommandLoader', () => {
});
it('re-throws other errors from the processor', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run !{something}"`,
@@ -934,23 +1011,36 @@ describe('FileCommandLoader', () => {
),
).rejects.toThrow('Something else went wrong');
});
it('assembles the processor pipeline in the correct order (Shell -> Default)', async () => {
const userCommandsDir = getUserCommandsDir();
it('assembles the processor pipeline in the correct order (AtFile -> Shell -> Default)', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
// This prompt uses !{} but NOT {{args}}, so both processors should be active.
// This prompt uses !{}, @{}, but NOT {{args}}, so all processors should be active.
'pipeline.toml': `
prompt = "Shell says: ${SHELL_INJECTION_TRIGGER}echo foo}."
prompt = "Shell says: !{echo foo}. File says: @{./bar.txt}"
`,
},
'./bar.txt': 'bar content',
});
const defaultProcessMock = vi
.fn()
.mockImplementation((p) => Promise.resolve(`${p}-default-processed`));
.mockImplementation((p: PromptPipelineContent) =>
Promise.resolve([
{ text: `${(p[0] as { text: string }).text}-default-processed` },
]),
);
mockShellProcess.mockImplementation((p) =>
Promise.resolve(`${p}-shell-processed`),
mockShellProcess.mockImplementation((p: PromptPipelineContent) =>
Promise.resolve([
{ text: `${(p[0] as { text: string }).text}-shell-processed` },
]),
);
mockAtFileProcess.mockImplementation((p: PromptPipelineContent) =>
Promise.resolve([
{ text: `${(p[0] as { text: string }).text}-at-file-processed` },
]),
);
vi.mocked(DefaultArgumentProcessor).mockImplementation(
@@ -968,35 +1058,115 @@ describe('FileCommandLoader', () => {
const result = await command!.action!(
createMockCommandContext({
invocation: {
raw: '/pipeline bar',
raw: '/pipeline baz',
name: 'pipeline',
args: 'bar',
args: 'baz',
},
}),
'bar',
'baz',
);
expect(mockAtFileProcess.mock.invocationCallOrder[0]).toBeLessThan(
mockShellProcess.mock.invocationCallOrder[0],
);
expect(mockShellProcess.mock.invocationCallOrder[0]).toBeLessThan(
defaultProcessMock.mock.invocationCallOrder[0],
);
// Verify the flow of the prompt through the processors
// 1. Shell processor runs first
expect(mockShellProcess).toHaveBeenCalledWith(
expect.stringContaining(SHELL_INJECTION_TRIGGER),
// 1. AtFile processor runs first
expect(mockAtFileProcess).toHaveBeenCalledWith(
[{ text: expect.stringContaining('@{./bar.txt}') }],
expect.any(Object),
);
// 2. Default processor runs second
// 2. Shell processor runs second
expect(mockShellProcess).toHaveBeenCalledWith(
[{ text: expect.stringContaining('-at-file-processed') }],
expect.any(Object),
);
// 3. Default processor runs third
expect(defaultProcessMock).toHaveBeenCalledWith(
expect.stringContaining('-shell-processed'),
[{ text: expect.stringContaining('-shell-processed') }],
expect.any(Object),
);
if (result?.type === 'submit_prompt') {
expect(result.content).toContain('-shell-processed-default-processed');
const contentAsArray = Array.isArray(result.content)
? result.content
: [result.content];
expect(contentAsArray.length).toBeGreaterThan(0);
const firstPart = contentAsArray[0];
if (typeof firstPart === 'object' && firstPart && 'text' in firstPart) {
expect(firstPart.text).toContain(
'-at-file-processed-shell-processed-default-processed',
);
} else {
assert.fail(
'First part of content is not a text part or is a string',
);
}
} else {
assert.fail('Incorrect action type');
}
});
});
describe('@-file Processor Integration', () => {
it('correctly processes a command with @{file}', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'at-file.toml':
'prompt = "Context from file: @{./test.txt}"\ndescription = "@-file test"',
},
'./test.txt': 'file content',
});
mockAtFileProcess.mockImplementation(
async (prompt: PromptPipelineContent) => {
// A simplified mock of AtFileProcessor's behavior
const textContent = (prompt[0] as { text: string }).text;
if (textContent.includes('@{./test.txt}')) {
return [
{
text: textContent.replace('@{./test.txt}', 'file content'),
},
];
}
return prompt;
},
);
// Prevent default processor from interfering
vi.mocked(DefaultArgumentProcessor).mockImplementation(
() =>
({
process: (p: PromptPipelineContent) => Promise.resolve(p),
}) as unknown as DefaultArgumentProcessor,
);
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'at-file');
expect(command).toBeDefined();
const result = await command!.action?.(
createMockCommandContext({
invocation: {
raw: '/at-file',
name: 'at-file',
args: '',
},
}),
'',
);
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toEqual([
{ text: 'Context from file: file content' },
]);
}
});
});
});

View File

@@ -4,33 +4,35 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { promises as fs } from 'fs';
import path from 'path';
import { promises as fs } from 'node:fs';
import path from 'node:path';
import toml from '@iarna/toml';
import { glob } from 'glob';
import { z } from 'zod';
import {
Config,
getProjectCommandsDir,
getUserCommandsDir,
} from '@qwen-code/qwen-code-core';
import { ICommandLoader } from './types.js';
import {
import type { Config } from '@qwen-code/qwen-code-core';
import { Storage } from '@qwen-code/qwen-code-core';
import type { ICommandLoader } from './types.js';
import type {
CommandContext,
CommandKind,
SlashCommand,
SlashCommandActionReturn,
} from '../ui/commands/types.js';
import { CommandKind } from '../ui/commands/types.js';
import { DefaultArgumentProcessor } from './prompt-processors/argumentProcessor.js';
import {
import type {
IPromptProcessor,
PromptPipelineContent,
} from './prompt-processors/types.js';
import {
SHORTHAND_ARGS_PLACEHOLDER,
SHELL_INJECTION_TRIGGER,
AT_FILE_INJECTION_TRIGGER,
} from './prompt-processors/types.js';
import {
ConfirmationRequiredError,
ShellProcessor,
} from './prompt-processors/shellProcessor.js';
import { AtFileProcessor } from './prompt-processors/atFileProcessor.js';
interface CommandDirectory {
path: string;
@@ -130,11 +132,13 @@ export class FileCommandLoader implements ICommandLoader {
private getCommandDirectories(): CommandDirectory[] {
const dirs: CommandDirectory[] = [];
const storage = this.config?.storage ?? new Storage(this.projectRoot);
// 1. User commands
dirs.push({ path: getUserCommandsDir() });
dirs.push({ path: Storage.getUserCommandsDir() });
// 2. Project commands (override user commands)
dirs.push({ path: getProjectCommandsDir(this.projectRoot) });
dirs.push({ path: storage.getProjectCommandsDir() });
// 3. Extension commands (processed last to detect all conflicts)
if (this.config) {
@@ -225,16 +229,25 @@ export class FileCommandLoader implements ICommandLoader {
const usesShellInjection = validDef.prompt.includes(
SHELL_INJECTION_TRIGGER,
);
const usesAtFileInjection = validDef.prompt.includes(
AT_FILE_INJECTION_TRIGGER,
);
// Interpolation (Shell Execution and Argument Injection)
// If the prompt uses either shell injection OR argument placeholders,
// we must use the ShellProcessor.
// 1. @-File Injection (Security First).
// This runs first to ensure we're not executing shell commands that
// could dynamically generate malicious @-paths.
if (usesAtFileInjection) {
processors.push(new AtFileProcessor(baseCommandName));
}
// 2. Argument and Shell Injection.
// This runs after file content has been safely injected.
if (usesShellInjection || usesArgs) {
processors.push(new ShellProcessor(baseCommandName));
}
// Default Argument Handling
// If NO explicit argument injection ({{args}}) was used, we append the raw invocation.
// 3. Default Argument Handling.
// Appends the raw invocation if no explicit {{args}} are used.
if (!usesArgs) {
processors.push(new DefaultArgumentProcessor());
}
@@ -254,19 +267,24 @@ export class FileCommandLoader implements ICommandLoader {
);
return {
type: 'submit_prompt',
content: validDef.prompt, // Fallback to unprocessed prompt
content: [{ text: validDef.prompt }], // Fallback to unprocessed prompt
};
}
try {
let processedPrompt = validDef.prompt;
let processedContent: PromptPipelineContent = [
{ text: validDef.prompt },
];
for (const processor of processors) {
processedPrompt = await processor.process(processedPrompt, context);
processedContent = await processor.process(
processedContent,
context,
);
}
return {
type: 'submit_prompt',
content: processedPrompt,
content: processedContent,
};
} catch (e) {
// Check if it's our specific error type

View File

@@ -0,0 +1,128 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { McpPromptLoader } from './McpPromptLoader.js';
import type { Config } from '@qwen-code/qwen-code-core';
import type { PromptArgument } from '@modelcontextprotocol/sdk/types.js';
import { describe, it, expect } from 'vitest';
describe('McpPromptLoader', () => {
const mockConfig = {} as Config;
describe('parseArgs', () => {
it('should handle multi-word positional arguments', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [
{ name: 'arg1', required: true },
{ name: 'arg2', required: true },
];
const userArgs = 'hello world';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ arg1: 'hello', arg2: 'world' });
});
it('should handle quoted multi-word positional arguments', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [
{ name: 'arg1', required: true },
{ name: 'arg2', required: true },
];
const userArgs = '"hello world" foo';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ arg1: 'hello world', arg2: 'foo' });
});
it('should handle a single positional argument with multiple words', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [{ name: 'arg1', required: true }];
const userArgs = 'hello world';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ arg1: 'hello world' });
});
it('should handle escaped quotes in positional arguments', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [{ name: 'arg1', required: true }];
const userArgs = '"hello \\"world\\""';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ arg1: 'hello "world"' });
});
it('should handle escaped backslashes in positional arguments', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [{ name: 'arg1', required: true }];
const userArgs = '"hello\\\\world"';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ arg1: 'hello\\world' });
});
it('should handle named args followed by positional args', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [
{ name: 'named', required: true },
{ name: 'pos', required: true },
];
const userArgs = '--named="value" positional';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ named: 'value', pos: 'positional' });
});
it('should handle positional args followed by named args', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [
{ name: 'pos', required: true },
{ name: 'named', required: true },
];
const userArgs = 'positional --named="value"';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ pos: 'positional', named: 'value' });
});
it('should handle positional args interspersed with named args', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [
{ name: 'pos1', required: true },
{ name: 'named', required: true },
{ name: 'pos2', required: true },
];
const userArgs = 'p1 --named="value" p2';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ pos1: 'p1', named: 'value', pos2: 'p2' });
});
it('should treat an escaped quote at the start as a literal', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [
{ name: 'arg1', required: true },
{ name: 'arg2', required: true },
];
const userArgs = '\\"hello world';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({ arg1: '"hello', arg2: 'world' });
});
it('should handle a complex mix of args', () => {
const loader = new McpPromptLoader(mockConfig);
const promptArgs: PromptArgument[] = [
{ name: 'pos1', required: true },
{ name: 'named1', required: true },
{ name: 'pos2', required: true },
{ name: 'named2', required: true },
{ name: 'pos3', required: true },
];
const userArgs =
'p1 --named1="value 1" "p2 has spaces" --named2=value2 "p3 \\"with quotes\\""';
const result = loader.parseArgs(userArgs, promptArgs);
expect(result).toEqual({
pos1: 'p1',
named1: 'value 1',
pos2: 'p2 has spaces',
named2: 'value2',
pos3: 'p3 "with quotes"',
});
});
});
});

View File

@@ -4,19 +4,19 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config } from '@qwen-code/qwen-code-core';
import {
Config,
getErrorMessage,
getMCPServerPrompts,
} from '@qwen-code/qwen-code-core';
import {
import type {
CommandContext,
CommandKind,
SlashCommand,
SlashCommandActionReturn,
} from '../ui/commands/types.js';
import { ICommandLoader } from './types.js';
import { PromptArgument } from '@modelcontextprotocol/sdk/types.js';
import { CommandKind } from '../ui/commands/types.js';
import type { ICommandLoader } from './types.js';
import type { PromptArgument } from '@modelcontextprotocol/sdk/types.js';
/**
* Discovers and loads executable slash commands from prompts exposed by
@@ -169,7 +169,16 @@ export class McpPromptLoader implements ICommandLoader {
return Promise.resolve(promptCommands);
}
private parseArgs(
/**
* Parses the `userArgs` string representing the prompt arguments (all the text
* after the command) into a record matching the shape of the `promptArgs`.
*
* @param userArgs
* @param promptArgs
* @returns A record of the parsed arguments
* @visibleForTesting
*/
parseArgs(
userArgs: string,
promptArgs: PromptArgument[] | undefined,
): Record<string, unknown> | Error {
@@ -177,28 +186,36 @@ export class McpPromptLoader implements ICommandLoader {
const promptInputs: Record<string, unknown> = {};
// arg parsing: --key="value" or --key=value
const namedArgRegex = /--([^=]+)=(?:"((?:\\.|[^"\\])*)"|([^ ]*))/g;
const namedArgRegex = /--([^=]+)=(?:"((?:\\.|[^"\\])*)"|([^ ]+))/g;
let match;
const remainingArgs: string[] = [];
let lastIndex = 0;
const positionalParts: string[] = [];
while ((match = namedArgRegex.exec(userArgs)) !== null) {
const key = match[1];
const value = match[2] ?? match[3]; // Quoted or unquoted value
// Extract the quoted or unquoted argument and remove escape chars.
const value = (match[2] ?? match[3]).replace(/\\(.)/g, '$1');
argValues[key] = value;
// Capture text between matches as potential positional args
if (match.index > lastIndex) {
remainingArgs.push(userArgs.substring(lastIndex, match.index).trim());
positionalParts.push(userArgs.substring(lastIndex, match.index));
}
lastIndex = namedArgRegex.lastIndex;
}
// Capture any remaining text after the last named arg
if (lastIndex < userArgs.length) {
remainingArgs.push(userArgs.substring(lastIndex).trim());
positionalParts.push(userArgs.substring(lastIndex));
}
const positionalArgs = remainingArgs.join(' ').split(/ +/);
const positionalArgsString = positionalParts.join('').trim();
// extracts either quoted strings or non-quoted sequences of non-space characters.
const positionalArgRegex = /(?:"((?:\\.|[^"\\])*)"|([^ ]+))/g;
const positionalArgs: string[] = [];
while ((match = positionalArgRegex.exec(positionalArgsString)) !== null) {
// Extract the quoted or unquoted argument and remove escape chars.
positionalArgs.push((match[1] ?? match[2]).replace(/\\(.)/g, '$1'));
}
if (!promptArgs) {
return promptInputs;
@@ -213,19 +230,27 @@ export class McpPromptLoader implements ICommandLoader {
(arg) => arg.required && !promptInputs[arg.name],
);
const missingArgs: string[] = [];
for (let i = 0; i < unfilledArgs.length; i++) {
if (positionalArgs.length > i && positionalArgs[i]) {
promptInputs[unfilledArgs[i].name] = positionalArgs[i];
} else {
missingArgs.push(unfilledArgs[i].name);
if (unfilledArgs.length === 1) {
// If we have only one unfilled arg, we don't require quotes we just
// join all the given arguments together as if they were quoted.
promptInputs[unfilledArgs[0].name] = positionalArgs.join(' ');
} else {
const missingArgs: string[] = [];
for (let i = 0; i < unfilledArgs.length; i++) {
if (positionalArgs.length > i) {
promptInputs[unfilledArgs[i].name] = positionalArgs[i];
} else {
missingArgs.push(unfilledArgs[i].name);
}
}
if (missingArgs.length > 0) {
const missingArgNames = missingArgs
.map((name) => `--${name}`)
.join(', ');
return new Error(`Missing required argument(s): ${missingArgNames}`);
}
}
if (missingArgs.length > 0) {
const missingArgNames = missingArgs.map((name) => `--${name}`).join(', ');
return new Error(`Missing required argument(s): ${missingArgNames}`);
}
return promptInputs;
}
}

View File

@@ -13,7 +13,7 @@ describe('Argument Processors', () => {
const processor = new DefaultArgumentProcessor();
it('should append the full command if args are provided', async () => {
const prompt = 'Parse the command.';
const prompt = [{ text: 'Parse the command.' }];
const context = createMockCommandContext({
invocation: {
raw: '/mycommand arg1 "arg two"',
@@ -22,11 +22,13 @@ describe('Argument Processors', () => {
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('Parse the command.\n\n/mycommand arg1 "arg two"');
expect(result).toEqual([
{ text: 'Parse the command.\n\n/mycommand arg1 "arg two"' },
]);
});
it('should NOT append the full command if no args are provided', async () => {
const prompt = 'Parse the command.';
const prompt = [{ text: 'Parse the command.' }];
const context = createMockCommandContext({
invocation: {
raw: '/mycommand',
@@ -35,7 +37,7 @@ describe('Argument Processors', () => {
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('Parse the command.');
expect(result).toEqual([{ text: 'Parse the command.' }]);
});
});
});

View File

@@ -4,8 +4,9 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { IPromptProcessor } from './types.js';
import { CommandContext } from '../../ui/commands/types.js';
import { appendToLastTextPart } from '@qwen-code/qwen-code-core';
import type { IPromptProcessor, PromptPipelineContent } from './types.js';
import type { CommandContext } from '../../ui/commands/types.js';
/**
* Appends the user's full command invocation to the prompt if arguments are
@@ -14,9 +15,12 @@ import { CommandContext } from '../../ui/commands/types.js';
* This processor is only used if the prompt does NOT contain {{args}}.
*/
export class DefaultArgumentProcessor implements IPromptProcessor {
async process(prompt: string, context: CommandContext): Promise<string> {
if (context.invocation!.args) {
return `${prompt}\n\n${context.invocation!.raw}`;
async process(
prompt: PromptPipelineContent,
context: CommandContext,
): Promise<PromptPipelineContent> {
if (context.invocation?.args) {
return appendToLastTextPart(prompt, context.invocation.raw);
}
return prompt;
}

View File

@@ -0,0 +1,221 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { type CommandContext } from '../../ui/commands/types.js';
import { AtFileProcessor } from './atFileProcessor.js';
import { MessageType } from '../../ui/types.js';
import type { Config } from '@qwen-code/qwen-code-core';
import type { PartUnion } from '@google/genai';
// Mock the core dependency
const mockReadPathFromWorkspace = vi.hoisted(() => vi.fn());
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original = await importOriginal<object>();
return {
...original,
readPathFromWorkspace: mockReadPathFromWorkspace,
};
});
describe('AtFileProcessor', () => {
let context: CommandContext;
let mockConfig: Config;
beforeEach(() => {
vi.clearAllMocks();
mockConfig = {
// The processor only passes the config through, so we don't need a full mock.
} as unknown as Config;
context = createMockCommandContext({
services: {
config: mockConfig,
},
});
// Default mock success behavior: return content wrapped in a text part.
mockReadPathFromWorkspace.mockImplementation(
async (path: string): Promise<PartUnion[]> => [
{ text: `content of ${path}` },
],
);
});
it('should not change the prompt if no @{ trigger is present', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [{ text: 'This is a simple prompt.' }];
const result = await processor.process(prompt, context);
expect(result).toEqual(prompt);
expect(mockReadPathFromWorkspace).not.toHaveBeenCalled();
});
it('should not change the prompt if config service is missing', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [{ text: 'Analyze @{file.txt}' }];
const contextWithoutConfig = createMockCommandContext({
services: {
config: null,
},
});
const result = await processor.process(prompt, contextWithoutConfig);
expect(result).toEqual(prompt);
expect(mockReadPathFromWorkspace).not.toHaveBeenCalled();
});
describe('Parsing Logic', () => {
it('should replace a single valid @{path/to/file.txt} placeholder', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [
{ text: 'Analyze this file: @{path/to/file.txt}' },
];
const result = await processor.process(prompt, context);
expect(mockReadPathFromWorkspace).toHaveBeenCalledWith(
'path/to/file.txt',
mockConfig,
);
expect(result).toEqual([
{ text: 'Analyze this file: ' },
{ text: 'content of path/to/file.txt' },
]);
});
it('should replace multiple different @{...} placeholders', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [
{ text: 'Compare @{file1.js} with @{file2.js}' },
];
const result = await processor.process(prompt, context);
expect(mockReadPathFromWorkspace).toHaveBeenCalledTimes(2);
expect(mockReadPathFromWorkspace).toHaveBeenCalledWith(
'file1.js',
mockConfig,
);
expect(mockReadPathFromWorkspace).toHaveBeenCalledWith(
'file2.js',
mockConfig,
);
expect(result).toEqual([
{ text: 'Compare ' },
{ text: 'content of file1.js' },
{ text: ' with ' },
{ text: 'content of file2.js' },
]);
});
it('should handle placeholders at the beginning, middle, and end', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [
{ text: '@{start.txt} in the @{middle.txt} and @{end.txt}' },
];
const result = await processor.process(prompt, context);
expect(result).toEqual([
{ text: 'content of start.txt' },
{ text: ' in the ' },
{ text: 'content of middle.txt' },
{ text: ' and ' },
{ text: 'content of end.txt' },
]);
});
it('should correctly parse paths that contain balanced braces', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [
{ text: 'Analyze @{path/with/{braces}/file.txt}' },
];
const result = await processor.process(prompt, context);
expect(mockReadPathFromWorkspace).toHaveBeenCalledWith(
'path/with/{braces}/file.txt',
mockConfig,
);
expect(result).toEqual([
{ text: 'Analyze ' },
{ text: 'content of path/with/{braces}/file.txt' },
]);
});
it('should throw an error if the prompt contains an unclosed trigger', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [{ text: 'Hello @{world' }];
// The new parser throws an error for unclosed injections.
await expect(processor.process(prompt, context)).rejects.toThrow(
/Unclosed injection/,
);
});
});
describe('Integration and Error Handling', () => {
it('should leave the placeholder unmodified if readPathFromWorkspace throws', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [
{ text: 'Analyze @{not-found.txt} and @{good-file.txt}' },
];
mockReadPathFromWorkspace.mockImplementation(async (path: string) => {
if (path === 'not-found.txt') {
throw new Error('File not found');
}
return [{ text: `content of ${path}` }];
});
const result = await processor.process(prompt, context);
expect(result).toEqual([
{ text: 'Analyze ' },
{ text: '@{not-found.txt}' }, // Placeholder is preserved as a text part
{ text: ' and ' },
{ text: 'content of good-file.txt' },
]);
});
});
describe('UI Feedback', () => {
it('should call ui.addItem with an ERROR on failure', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [{ text: 'Analyze @{bad-file.txt}' }];
mockReadPathFromWorkspace.mockRejectedValue(new Error('Access denied'));
await processor.process(prompt, context);
expect(context.ui.addItem).toHaveBeenCalledTimes(1);
expect(context.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.ERROR,
text: "Failed to inject content for '@{bad-file.txt}': Access denied",
},
expect.any(Number),
);
});
it('should call ui.addItem with a WARNING if the file was ignored', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [{ text: 'Analyze @{ignored.txt}' }];
// Simulate an ignored file by returning an empty array.
mockReadPathFromWorkspace.mockResolvedValue([]);
const result = await processor.process(prompt, context);
// The placeholder should be removed, resulting in only the prefix.
expect(result).toEqual([{ text: 'Analyze ' }]);
expect(context.ui.addItem).toHaveBeenCalledTimes(1);
expect(context.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: "File '@{ignored.txt}' was ignored by .gitignore or .qwenignore and was not included in the prompt.",
},
expect.any(Number),
);
});
it('should NOT call ui.addItem on success', async () => {
const processor = new AtFileProcessor();
const prompt: PartUnion[] = [{ text: 'Analyze @{good-file.txt}' }];
await processor.process(prompt, context);
expect(context.ui.addItem).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,96 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
flatMapTextParts,
readPathFromWorkspace,
} from '@qwen-code/qwen-code-core';
import type { CommandContext } from '../../ui/commands/types.js';
import { MessageType } from '../../ui/types.js';
import {
AT_FILE_INJECTION_TRIGGER,
type IPromptProcessor,
type PromptPipelineContent,
} from './types.js';
import { extractInjections } from './injectionParser.js';
export class AtFileProcessor implements IPromptProcessor {
constructor(private readonly commandName?: string) {}
async process(
input: PromptPipelineContent,
context: CommandContext,
): Promise<PromptPipelineContent> {
const config = context.services.config;
if (!config) {
return input;
}
return flatMapTextParts(input, async (text) => {
if (!text.includes(AT_FILE_INJECTION_TRIGGER)) {
return [{ text }];
}
const injections = extractInjections(
text,
AT_FILE_INJECTION_TRIGGER,
this.commandName,
);
if (injections.length === 0) {
return [{ text }];
}
const output: PromptPipelineContent = [];
let lastIndex = 0;
for (const injection of injections) {
const prefix = text.substring(lastIndex, injection.startIndex);
if (prefix) {
output.push({ text: prefix });
}
const pathStr = injection.content;
try {
const fileContentParts = await readPathFromWorkspace(pathStr, config);
if (fileContentParts.length === 0) {
const uiMessage = `File '@{${pathStr}}' was ignored by .gitignore or .qwenignore and was not included in the prompt.`;
context.ui.addItem(
{ type: MessageType.INFO, text: uiMessage },
Date.now(),
);
}
output.push(...fileContentParts);
} catch (error) {
const message =
error instanceof Error ? error.message : String(error);
const uiMessage = `Failed to inject content for '@{${pathStr}}': ${message}`;
console.error(
`[AtFileProcessor] ${uiMessage}. Leaving placeholder in prompt.`,
);
context.ui.addItem(
{ type: MessageType.ERROR, text: uiMessage },
Date.now(),
);
const placeholder = text.substring(
injection.startIndex,
injection.endIndex,
);
output.push({ text: placeholder });
}
lastIndex = injection.endIndex;
}
const suffix = text.substring(lastIndex);
if (suffix) {
output.push({ text: suffix });
}
return output;
});
}
}

View File

@@ -0,0 +1,223 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { extractInjections } from './injectionParser.js';
describe('extractInjections', () => {
const SHELL_TRIGGER = '!{';
const AT_FILE_TRIGGER = '@{';
describe('Basic Functionality', () => {
it('should return an empty array if no trigger is present', () => {
const prompt = 'This is a simple prompt without injections.';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toEqual([]);
});
it('should extract a single, simple injection', () => {
const prompt = 'Run this command: !{ls -la}';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toEqual([
{
content: 'ls -la',
startIndex: 18,
endIndex: 27,
},
]);
});
it('should extract multiple injections', () => {
const prompt = 'First: !{cmd1}, Second: !{cmd2}';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
content: 'cmd1',
startIndex: 7,
endIndex: 14,
});
expect(result[1]).toEqual({
content: 'cmd2',
startIndex: 24,
endIndex: 31,
});
});
it('should handle different triggers (e.g., @{)', () => {
const prompt = 'Read this file: @{path/to/file.txt}';
const result = extractInjections(prompt, AT_FILE_TRIGGER);
expect(result).toEqual([
{
content: 'path/to/file.txt',
startIndex: 16,
endIndex: 35,
},
]);
});
});
describe('Positioning and Edge Cases', () => {
it('should handle injections at the start and end of the prompt', () => {
const prompt = '!{start} middle text !{end}';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
content: 'start',
startIndex: 0,
endIndex: 8,
});
expect(result[1]).toEqual({
content: 'end',
startIndex: 21,
endIndex: 27,
});
});
it('should handle adjacent injections', () => {
const prompt = '!{A}!{B}';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({ content: 'A', startIndex: 0, endIndex: 4 });
expect(result[1]).toEqual({ content: 'B', startIndex: 4, endIndex: 8 });
});
it('should handle empty injections', () => {
const prompt = 'Empty: !{}';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toEqual([
{
content: '',
startIndex: 7,
endIndex: 10,
},
]);
});
it('should trim whitespace within the content', () => {
const prompt = '!{ \n command with space \t }';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toEqual([
{
content: 'command with space',
startIndex: 0,
endIndex: 29,
},
]);
});
it('should ignore similar patterns that are not the exact trigger', () => {
const prompt = 'Not a trigger: !(cmd) or {cmd} or ! {cmd}';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toEqual([]);
});
it('should ignore extra closing braces before the trigger', () => {
const prompt = 'Ignore this } then !{run}';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toEqual([
{
content: 'run',
startIndex: 19,
endIndex: 25,
},
]);
});
it('should stop parsing at the first balanced closing brace (non-greedy)', () => {
// This tests that the parser doesn't greedily consume extra closing braces
const prompt = 'Run !{ls -l}} extra braces';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toEqual([
{
content: 'ls -l',
startIndex: 4,
endIndex: 12,
},
]);
});
});
describe('Nested Braces (Balanced)', () => {
it('should correctly parse content with simple nested braces (e.g., JSON)', () => {
const prompt = `Send JSON: !{curl -d '{"key": "value"}'}`;
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toHaveLength(1);
expect(result[0].content).toBe(`curl -d '{"key": "value"}'`);
});
it('should correctly parse content with shell constructs (e.g., awk)', () => {
const prompt = `Process text: !{awk '{print $1}' file.txt}`;
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toHaveLength(1);
expect(result[0].content).toBe(`awk '{print $1}' file.txt`);
});
it('should correctly parse multiple levels of nesting', () => {
const prompt = `!{level1 {level2 {level3}} suffix}`;
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toHaveLength(1);
expect(result[0].content).toBe(`level1 {level2 {level3}} suffix`);
expect(result[0].endIndex).toBe(prompt.length);
});
it('should correctly parse paths containing balanced braces', () => {
const prompt = 'Analyze @{path/with/{braces}/file.txt}';
const result = extractInjections(prompt, AT_FILE_TRIGGER);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('path/with/{braces}/file.txt');
});
it('should correctly handle an injection containing the trigger itself', () => {
// This works because the parser counts braces, it doesn't look for the trigger again until the current one is closed.
const prompt = '!{echo "The trigger is !{ confusing }"}';
const expectedContent = 'echo "The trigger is !{ confusing }"';
const result = extractInjections(prompt, SHELL_TRIGGER);
expect(result).toHaveLength(1);
expect(result[0].content).toBe(expectedContent);
});
});
describe('Error Handling (Unbalanced/Unclosed)', () => {
it('should throw an error for a simple unclosed injection', () => {
const prompt = 'This prompt has !{an unclosed trigger';
expect(() => extractInjections(prompt, SHELL_TRIGGER)).toThrow(
/Invalid syntax: Unclosed injection starting at index 16 \('!{'\)/,
);
});
it('should throw an error if the prompt ends inside a nested block', () => {
const prompt = 'This fails: !{outer {inner';
expect(() => extractInjections(prompt, SHELL_TRIGGER)).toThrow(
/Invalid syntax: Unclosed injection starting at index 12 \('!{'\)/,
);
});
it('should include the context name in the error message if provided', () => {
const prompt = 'Failing !{command';
const contextName = 'test-command';
expect(() =>
extractInjections(prompt, SHELL_TRIGGER, contextName),
).toThrow(
/Invalid syntax in command 'test-command': Unclosed injection starting at index 8/,
);
});
it('should throw if content contains unbalanced braces (e.g., missing closing)', () => {
// This is functionally the same as an unclosed injection from the parser's perspective.
const prompt = 'Analyze @{path/with/braces{example.txt}';
expect(() => extractInjections(prompt, AT_FILE_TRIGGER)).toThrow(
/Invalid syntax: Unclosed injection starting at index 8 \('@{'\)/,
);
});
it('should clearly state that unbalanced braces in content are not supported in the error', () => {
const prompt = 'Analyze @{path/with/braces{example.txt}';
expect(() => extractInjections(prompt, AT_FILE_TRIGGER)).toThrow(
/Paths or commands with unbalanced braces are not supported directly/,
);
});
});
});

View File

@@ -0,0 +1,89 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Represents a single detected injection site in a prompt string.
*/
export interface Injection {
/** The content extracted from within the braces (e.g., the command or path), trimmed. */
content: string;
/** The starting index of the injection (inclusive, points to the start of the trigger). */
startIndex: number;
/** The ending index of the injection (exclusive, points after the closing '}'). */
endIndex: number;
}
/**
* Iteratively parses a prompt string to extract injections (e.g., !{...} or @{...}),
* correctly handling nested braces within the content.
*
* This parser relies on simple brace counting and does not support escaping.
*
* @param prompt The prompt string to parse.
* @param trigger The opening trigger sequence (e.g., '!{', '@{').
* @param contextName Optional context name (e.g., command name) for error messages.
* @returns An array of extracted Injection objects.
* @throws Error if an unclosed injection is found.
*/
export function extractInjections(
prompt: string,
trigger: string,
contextName?: string,
): Injection[] {
const injections: Injection[] = [];
let index = 0;
while (index < prompt.length) {
const startIndex = prompt.indexOf(trigger, index);
if (startIndex === -1) {
break;
}
let currentIndex = startIndex + trigger.length;
let braceCount = 1;
let foundEnd = false;
while (currentIndex < prompt.length) {
const char = prompt[currentIndex];
if (char === '{') {
braceCount++;
} else if (char === '}') {
braceCount--;
if (braceCount === 0) {
const injectionContent = prompt.substring(
startIndex + trigger.length,
currentIndex,
);
const endIndex = currentIndex + 1;
injections.push({
content: injectionContent.trim(),
startIndex,
endIndex,
});
index = endIndex;
foundEnd = true;
break;
}
}
currentIndex++;
}
// Check if the inner loop finished without finding the closing brace.
if (!foundEnd) {
const contextInfo = contextName ? ` in command '${contextName}'` : '';
// Enforce strict parsing (Comment 1) and clarify limitations (Comment 2).
throw new Error(
`Invalid syntax${contextInfo}: Unclosed injection starting at index ${startIndex} ('${trigger}'). Ensure braces are balanced. Paths or commands with unbalanced braces are not supported directly.`,
);
}
}
return injections;
}

View File

@@ -7,14 +7,16 @@
import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
import { ConfirmationRequiredError, ShellProcessor } from './shellProcessor.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { CommandContext } from '../../ui/commands/types.js';
import { ApprovalMode, Config } from '@qwen-code/qwen-code-core';
import os from 'os';
import type { CommandContext } from '../../ui/commands/types.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { ApprovalMode } from '@qwen-code/qwen-code-core';
import os from 'node:os';
import { quote } from 'shell-quote';
import { createPartFromText } from '@google/genai';
import type { PromptPipelineContent } from './types.js';
// Helper function to determine the expected escaped string based on the current OS,
// mirroring the logic in the actual `escapeShellArg` implementation. This makes
// our tests robust and platform-agnostic.
// mirroring the logic in the actual `escapeShellArg` implementation.
function getExpectedEscapedArgForPlatform(arg: string): string {
if (os.platform() === 'win32') {
const comSpec = (process.env['ComSpec'] || 'cmd.exe').toLowerCase();
@@ -31,6 +33,11 @@ function getExpectedEscapedArgForPlatform(arg: string): string {
}
}
// Helper to create PromptPipelineContent
function createPromptPipelineContent(text: string): PromptPipelineContent {
return [createPartFromText(text)];
}
const mockCheckCommandPermissions = vi.hoisted(() => vi.fn());
const mockShellExecute = vi.hoisted(() => vi.fn());
@@ -92,7 +99,7 @@ describe('ShellProcessor', () => {
it('should throw an error if config is missing', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{ls}';
const prompt: PromptPipelineContent = createPromptPipelineContent('!{ls}');
const contextWithoutConfig = createMockCommandContext({
services: {
config: null,
@@ -106,15 +113,19 @@ describe('ShellProcessor', () => {
it('should not change the prompt if no shell injections are present', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'This is a simple prompt with no injections.';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'This is a simple prompt with no injections.',
);
const result = await processor.process(prompt, context);
expect(result).toBe(prompt);
expect(result).toEqual(prompt);
expect(mockShellExecute).not.toHaveBeenCalled();
});
it('should process a single valid shell injection if allowed', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'The current status is: !{git status}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'The current status is: !{git status}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
@@ -137,12 +148,14 @@ describe('ShellProcessor', () => {
expect.any(Object),
false,
);
expect(result).toBe('The current status is: On branch main');
expect(result).toEqual([{ text: 'The current status is: On branch main' }]);
});
it('should process multiple valid shell injections if all are allowed', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{git status} in !{pwd}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'!{git status} in !{pwd}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
@@ -163,12 +176,14 @@ describe('ShellProcessor', () => {
expect(mockCheckCommandPermissions).toHaveBeenCalledTimes(2);
expect(mockShellExecute).toHaveBeenCalledTimes(2);
expect(result).toBe('On branch main in /usr/home');
expect(result).toEqual([{ text: 'On branch main in /usr/home' }]);
});
it('should throw ConfirmationRequiredError if a command is not allowed in default mode', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Do something dangerous: !{rm -rf /}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Do something dangerous: !{rm -rf /}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['rm -rf /'],
@@ -181,7 +196,9 @@ describe('ShellProcessor', () => {
it('should NOT throw ConfirmationRequiredError if a command is not allowed but approval mode is YOLO', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Do something dangerous: !{rm -rf /}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Do something dangerous: !{rm -rf /}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['rm -rf /'],
@@ -202,12 +219,14 @@ describe('ShellProcessor', () => {
expect.any(Object),
false,
);
expect(result).toBe('Do something dangerous: deleted');
expect(result).toEqual([{ text: 'Do something dangerous: deleted' }]);
});
it('should still throw an error for a hard-denied command even in YOLO mode', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Do something forbidden: !{reboot}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Do something forbidden: !{reboot}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['reboot'],
@@ -227,7 +246,9 @@ describe('ShellProcessor', () => {
it('should throw ConfirmationRequiredError with the correct command', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Do something dangerous: !{rm -rf /}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Do something dangerous: !{rm -rf /}',
);
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['rm -rf /'],
@@ -249,7 +270,9 @@ describe('ShellProcessor', () => {
it('should throw ConfirmationRequiredError with multiple commands if multiple are disallowed', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{cmd1} and !{cmd2}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'!{cmd1} and !{cmd2}',
);
mockCheckCommandPermissions.mockImplementation((cmd) => {
if (cmd === 'cmd1') {
return { allAllowed: false, disallowedCommands: ['cmd1'] };
@@ -274,7 +297,9 @@ describe('ShellProcessor', () => {
it('should not execute any commands if at least one requires confirmation', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'First: !{echo "hello"}, Second: !{rm -rf /}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'First: !{echo "hello"}, Second: !{rm -rf /}',
);
mockCheckCommandPermissions.mockImplementation((cmd) => {
if (cmd.includes('rm')) {
@@ -293,7 +318,9 @@ describe('ShellProcessor', () => {
it('should only request confirmation for disallowed commands in a mixed prompt', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Allowed: !{ls -l}, Disallowed: !{rm -rf /}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Allowed: !{ls -l}, Disallowed: !{rm -rf /}',
);
mockCheckCommandPermissions.mockImplementation((cmd) => ({
allAllowed: !cmd.includes('rm'),
@@ -313,7 +340,9 @@ describe('ShellProcessor', () => {
it('should execute all commands if they are on the session allowlist', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Run !{cmd1} and !{cmd2}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Run !{cmd1} and !{cmd2}',
);
// Add commands to the session allowlist
context.session.sessionShellAllowlist = new Set(['cmd1', 'cmd2']);
@@ -345,12 +374,14 @@ describe('ShellProcessor', () => {
context.session.sessionShellAllowlist,
);
expect(mockShellExecute).toHaveBeenCalledTimes(2);
expect(result).toBe('Run output1 and output2');
expect(result).toEqual([{ text: 'Run output1 and output2' }]);
});
it('should trim whitespace from the command inside the injection before interpolation', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Files: !{ ls {{args}} -l }';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Files: !{ ls {{args}} -l }',
);
const rawArgs = context.invocation!.args;
@@ -384,7 +415,8 @@ describe('ShellProcessor', () => {
it('should handle an empty command inside the injection gracefully (skips execution)', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'This is weird: !{}';
const prompt: PromptPipelineContent =
createPromptPipelineContent('This is weird: !{}');
const result = await processor.process(prompt, context);
@@ -392,77 +424,14 @@ describe('ShellProcessor', () => {
expect(mockShellExecute).not.toHaveBeenCalled();
// It replaces !{} with an empty string.
expect(result).toBe('This is weird: ');
});
describe('Robust Parsing (Balanced Braces)', () => {
it('should correctly parse commands containing nested braces (e.g., awk)', async () => {
const processor = new ShellProcessor('test-command');
const command = "awk '{print $1}' file.txt";
const prompt = `Output: !{${command}}`;
mockShellExecute.mockReturnValue({
result: Promise.resolve({ ...SUCCESS_RESULT, output: 'result' }),
});
const result = await processor.process(prompt, context);
expect(mockCheckCommandPermissions).toHaveBeenCalledWith(
command,
expect.any(Object),
context.session.sessionShellAllowlist,
);
expect(mockShellExecute).toHaveBeenCalledWith(
command,
expect.any(String),
expect.any(Function),
expect.any(Object),
false,
);
expect(result).toBe('Output: result');
});
it('should handle deeply nested braces correctly', async () => {
const processor = new ShellProcessor('test-command');
const command = "echo '{{a},{b}}'";
const prompt = `!{${command}}`;
mockShellExecute.mockReturnValue({
result: Promise.resolve({ ...SUCCESS_RESULT, output: '{{a},{b}}' }),
});
const result = await processor.process(prompt, context);
expect(mockShellExecute).toHaveBeenCalledWith(
command,
expect.any(String),
expect.any(Function),
expect.any(Object),
false,
);
expect(result).toBe('{{a},{b}}');
});
it('should throw an error for unclosed shell injections', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'This prompt is broken: !{ls -l';
await expect(processor.process(prompt, context)).rejects.toThrow(
/Unclosed shell injection/,
);
});
it('should throw an error for unclosed nested braces', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Broken: !{echo {a}';
await expect(processor.process(prompt, context)).rejects.toThrow(
/Unclosed shell injection/,
);
});
expect(result).toEqual([{ text: 'This is weird: ' }]);
});
describe('Error Reporting', () => {
it('should append exit code and command name on failure', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{cmd}';
const prompt: PromptPipelineContent =
createPromptPipelineContent('!{cmd}');
mockShellExecute.mockReturnValue({
result: Promise.resolve({
...SUCCESS_RESULT,
@@ -474,14 +443,17 @@ describe('ShellProcessor', () => {
const result = await processor.process(prompt, context);
expect(result).toBe(
"some error output\n[Shell command 'cmd' exited with code 1]",
);
expect(result).toEqual([
{
text: "some error output\n[Shell command 'cmd' exited with code 1]",
},
]);
});
it('should append signal info and command name if terminated by signal', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{cmd}';
const prompt: PromptPipelineContent =
createPromptPipelineContent('!{cmd}');
mockShellExecute.mockReturnValue({
result: Promise.resolve({
...SUCCESS_RESULT,
@@ -494,14 +466,17 @@ describe('ShellProcessor', () => {
const result = await processor.process(prompt, context);
expect(result).toBe(
"output\n[Shell command 'cmd' terminated by signal SIGTERM]",
);
expect(result).toEqual([
{
text: "output\n[Shell command 'cmd' terminated by signal SIGTERM]",
},
]);
});
it('should throw a detailed error if the shell fails to spawn', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{bad-command}';
const prompt: PromptPipelineContent =
createPromptPipelineContent('!{bad-command}');
const spawnError = new Error('spawn EACCES');
mockShellExecute.mockReturnValue({
result: Promise.resolve({
@@ -521,7 +496,9 @@ describe('ShellProcessor', () => {
it('should report abort status with command name if aborted', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{long-running-command}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'!{long-running-command}',
);
const spawnError = new Error('Aborted');
mockShellExecute.mockReturnValue({
result: Promise.resolve({
@@ -535,9 +512,11 @@ describe('ShellProcessor', () => {
});
const result = await processor.process(prompt, context);
expect(result).toBe(
"partial output\n[Shell command 'long-running-command' aborted]",
);
expect(result).toEqual([
{
text: "partial output\n[Shell command 'long-running-command' aborted]",
},
]);
});
});
@@ -551,29 +530,35 @@ describe('ShellProcessor', () => {
it('should perform raw replacement if no shell injections are present (optimization path)', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'The user said: {{args}}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'The user said: {{args}}',
);
const result = await processor.process(prompt, context);
expect(result).toBe(`The user said: ${rawArgs}`);
expect(result).toEqual([{ text: `The user said: ${rawArgs}` }]);
expect(mockShellExecute).not.toHaveBeenCalled();
});
it('should perform raw replacement outside !{} blocks', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Outside: {{args}}. Inside: !{echo "hello"}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Outside: {{args}}. Inside: !{echo "hello"}',
);
mockShellExecute.mockReturnValue({
result: Promise.resolve({ ...SUCCESS_RESULT, output: 'hello' }),
});
const result = await processor.process(prompt, context);
expect(result).toBe(`Outside: ${rawArgs}. Inside: hello`);
expect(result).toEqual([{ text: `Outside: ${rawArgs}. Inside: hello` }]);
});
it('should perform escaped replacement inside !{} blocks', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Command: !{grep {{args}} file.txt}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Command: !{grep {{args}} file.txt}',
);
mockShellExecute.mockReturnValue({
result: Promise.resolve({ ...SUCCESS_RESULT, output: 'match found' }),
});
@@ -591,12 +576,14 @@ describe('ShellProcessor', () => {
false,
);
expect(result).toBe('Command: match found');
expect(result).toEqual([{ text: 'Command: match found' }]);
});
it('should handle both raw (outside) and escaped (inside) injection simultaneously', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'User "({{args}})" requested search: !{search {{args}}}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'User "({{args}})" requested search: !{search {{args}}}',
);
mockShellExecute.mockReturnValue({
result: Promise.resolve({ ...SUCCESS_RESULT, output: 'results' }),
});
@@ -613,12 +600,15 @@ describe('ShellProcessor', () => {
false,
);
expect(result).toBe(`User "(${rawArgs})" requested search: results`);
expect(result).toEqual([
{ text: `User "(${rawArgs})" requested search: results` },
]);
});
it('should perform security checks on the final, resolved (escaped) command', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{rm {{args}}}';
const prompt: PromptPipelineContent =
createPromptPipelineContent('!{rm {{args}}}');
const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs);
const expectedResolvedCommand = `rm ${expectedEscapedArgs}`;
@@ -641,7 +631,8 @@ describe('ShellProcessor', () => {
it('should report the resolved command if a hard denial occurs', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{rm {{args}}}';
const prompt: PromptPipelineContent =
createPromptPipelineContent('!{rm {{args}}}');
const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs);
const expectedResolvedCommand = `rm ${expectedEscapedArgs}`;
mockCheckCommandPermissions.mockReturnValue({
@@ -661,7 +652,9 @@ describe('ShellProcessor', () => {
const processor = new ShellProcessor('test-command');
const multilineArgs = 'first line\nsecond line';
context.invocation!.args = multilineArgs;
const prompt = 'Commit message: !{git commit -m {{args}}}';
const prompt: PromptPipelineContent = createPromptPipelineContent(
'Commit message: !{git commit -m {{args}}}',
);
const expectedEscapedArgs =
getExpectedEscapedArgForPlatform(multilineArgs);
@@ -690,7 +683,8 @@ describe('ShellProcessor', () => {
])('should safely escape args containing $name', async ({ input }) => {
const processor = new ShellProcessor('test-command');
context.invocation!.args = input;
const prompt = '!{echo {{args}}}';
const prompt: PromptPipelineContent =
createPromptPipelineContent('!{echo {{args}}}');
const expectedEscapedArgs = getExpectedEscapedArgForPlatform(input);
const expectedCommand = `echo ${expectedEscapedArgs}`;

View File

@@ -10,14 +10,16 @@ import {
escapeShellArg,
getShellConfiguration,
ShellExecutionService,
flatMapTextParts,
} from '@qwen-code/qwen-code-core';
import { CommandContext } from '../../ui/commands/types.js';
import type { CommandContext } from '../../ui/commands/types.js';
import type { IPromptProcessor, PromptPipelineContent } from './types.js';
import {
IPromptProcessor,
SHELL_INJECTION_TRIGGER,
SHORTHAND_ARGS_PLACEHOLDER,
} from './types.js';
import { extractInjections, type Injection } from './injectionParser.js';
export class ConfirmationRequiredError extends Error {
constructor(
@@ -30,15 +32,10 @@ export class ConfirmationRequiredError extends Error {
}
/**
* Represents a single detected shell injection site in the prompt.
* Represents a single detected shell injection site in the prompt,
* after resolution of arguments. Extends the base Injection interface.
*/
interface ShellInjection {
/** The shell command extracted from within !{...}, trimmed. */
command: string;
/** The starting index of the injection (inclusive, points to '!'). */
startIndex: number;
/** The ending index of the injection (exclusive, points after '}'). */
endIndex: number;
interface ResolvedShellInjection extends Injection {
/** The command after {{args}} has been escaped and substituted. */
resolvedCommand?: string;
}
@@ -56,11 +53,25 @@ interface ShellInjection {
export class ShellProcessor implements IPromptProcessor {
constructor(private readonly commandName: string) {}
async process(prompt: string, context: CommandContext): Promise<string> {
async process(
prompt: PromptPipelineContent,
context: CommandContext,
): Promise<PromptPipelineContent> {
return flatMapTextParts(prompt, (text) =>
this.processString(text, context),
);
}
private async processString(
prompt: string,
context: CommandContext,
): Promise<PromptPipelineContent> {
const userArgsRaw = context.invocation?.args || '';
if (!prompt.includes(SHELL_INJECTION_TRIGGER)) {
return prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw);
return [
{ text: prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw) },
];
}
const config = context.services.config;
@@ -71,26 +82,37 @@ export class ShellProcessor implements IPromptProcessor {
}
const { sessionShellAllowlist } = context.session;
const injections = this.extractInjections(prompt);
const injections = extractInjections(
prompt,
SHELL_INJECTION_TRIGGER,
this.commandName,
);
// If extractInjections found no closed blocks (and didn't throw), treat as raw.
if (injections.length === 0) {
return prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw);
return [
{ text: prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw) },
];
}
const { shell } = getShellConfiguration();
const userArgsEscaped = escapeShellArg(userArgsRaw, shell);
const resolvedInjections = injections.map((injection) => {
if (injection.command === '') {
return injection;
}
// Replace {{args}} inside the command string with the escaped version.
const resolvedCommand = injection.command.replaceAll(
SHORTHAND_ARGS_PLACEHOLDER,
userArgsEscaped,
);
return { ...injection, resolvedCommand };
});
const resolvedInjections: ResolvedShellInjection[] = injections.map(
(injection) => {
const command = injection.content;
if (command === '') {
return { ...injection, resolvedCommand: undefined };
}
const resolvedCommand = command.replaceAll(
SHORTHAND_ARGS_PLACEHOLDER,
userArgsEscaped,
);
return { ...injection, resolvedCommand };
},
);
const commandsToConfirm = new Set<string>();
for (const injection of resolvedInjections) {
@@ -180,69 +202,6 @@ export class ShellProcessor implements IPromptProcessor {
userArgsRaw,
);
return processedPrompt;
}
/**
* Iteratively parses the prompt string to extract shell injections (!{...}),
* correctly handling nested braces within the command.
*
* @param prompt The prompt string to parse.
* @returns An array of extracted ShellInjection objects.
* @throws Error if an unclosed injection (`!{`) is found.
*/
private extractInjections(prompt: string): ShellInjection[] {
const injections: ShellInjection[] = [];
let index = 0;
while (index < prompt.length) {
const startIndex = prompt.indexOf(SHELL_INJECTION_TRIGGER, index);
if (startIndex === -1) {
break;
}
let currentIndex = startIndex + SHELL_INJECTION_TRIGGER.length;
let braceCount = 1;
let foundEnd = false;
while (currentIndex < prompt.length) {
const char = prompt[currentIndex];
// We count literal braces. This parser does not interpret shell quoting/escaping.
if (char === '{') {
braceCount++;
} else if (char === '}') {
braceCount--;
if (braceCount === 0) {
const commandContent = prompt.substring(
startIndex + SHELL_INJECTION_TRIGGER.length,
currentIndex,
);
const endIndex = currentIndex + 1;
injections.push({
command: commandContent.trim(),
startIndex,
endIndex,
});
index = endIndex;
foundEnd = true;
break;
}
}
currentIndex++;
}
// Check if the inner loop finished without finding the closing brace.
if (!foundEnd) {
throw new Error(
`Invalid syntax in command '${this.commandName}': Unclosed shell injection starting at index ${startIndex} ('!{'). Ensure braces are balanced.`,
);
}
}
return injections;
return [{ text: processedPrompt }];
}
}

View File

@@ -4,7 +4,13 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { CommandContext } from '../../ui/commands/types.js';
import type { CommandContext } from '../../ui/commands/types.js';
import type { PartUnion } from '@google/genai';
/**
* Defines the input/output type for prompt processors.
*/
export type PromptPipelineContent = PartUnion[];
/**
* Defines the interface for a prompt processor, a module that can transform
@@ -13,12 +19,8 @@ import { CommandContext } from '../../ui/commands/types.js';
*/
export interface IPromptProcessor {
/**
* Processes a prompt string, applying a specific transformation as part of a pipeline.
*
* Each processor in a command's pipeline receives the output of the previous
* processor. This method provides the full command context, allowing for
* complex transformations that may require access to invocation details,
* application services, or UI state.
* Processes a prompt input (which may contain text and multi-modal parts),
* applying a specific transformation as part of a pipeline.
*
* @param prompt The current state of the prompt string. This may have been
* modified by previous processors in the pipeline.
@@ -28,7 +30,10 @@ export interface IPromptProcessor {
* @returns A promise that resolves to the transformed prompt string, which
* will be passed to the next processor or, if it's the last one, sent to the model.
*/
process(prompt: string, context: CommandContext): Promise<string>;
process(
prompt: PromptPipelineContent,
context: CommandContext,
): Promise<PromptPipelineContent>;
}
/**
@@ -42,3 +47,8 @@ export const SHORTHAND_ARGS_PLACEHOLDER = '{{args}}';
* The trigger string for shell command injection in custom commands.
*/
export const SHELL_INJECTION_TRIGGER = '!{';
/**
* The trigger string for at file injection in custom commands.
*/
export const AT_FILE_INJECTION_TRIGGER = '@{';

Some files were not shown because too many files have changed in this diff Show More