mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-01-15 05:19:15 +00:00
Compare commits
89 Commits
qwencode-s
...
feat/exten
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b37ede07e8 | ||
|
|
0a88dd7861 | ||
|
|
70991e474f | ||
|
|
551e546974 | ||
|
|
74013bd8b2 | ||
|
|
18713ef2b0 | ||
|
|
50dac93c80 | ||
|
|
22504b0a5b | ||
|
|
105ad743fa | ||
|
|
ac3f7cb8c8 | ||
|
|
e27e9a5f18 | ||
|
|
2578d8c151 | ||
|
|
a877fedc52 | ||
|
|
2bc8079519 | ||
|
|
25dbe98e6e | ||
|
|
e5dbd69899 | ||
|
|
17eb20c134 | ||
|
|
5d59ceb6f3 | ||
|
|
7f645b9726 | ||
|
|
8c109be48c | ||
|
|
e9a1d9a927 | ||
|
|
8aceddffa2 | ||
|
|
cebe0448d0 | ||
|
|
919560e3a4 | ||
|
|
26bd4f882d | ||
|
|
3787e95572 | ||
|
|
7233d37bd1 | ||
|
|
93dcca5147 | ||
|
|
f7d04323f3 | ||
|
|
9a27857f10 | ||
|
|
452f4f3c0e | ||
|
|
5cc01e5e09 | ||
|
|
ac0be9fb84 | ||
|
|
257c6705e1 | ||
|
|
27e7438b75 | ||
|
|
8a3ff8db12 | ||
|
|
26f8b67d4f | ||
|
|
b64d636280 | ||
|
|
781c57b438 | ||
|
|
c81c24d45d | ||
|
|
c53bdde747 | ||
|
|
99db18069d | ||
|
|
a0a5b831d4 | ||
|
|
8f74dd224c | ||
|
|
b931d28f35 | ||
|
|
4407597794 | ||
|
|
101bd5f9b3 | ||
|
|
61c626b618 | ||
|
|
9f65bd3b39 | ||
|
|
a28278e950 | ||
|
|
2b3830cf83 | ||
|
|
90bf101040 | ||
|
|
2b9140940d | ||
|
|
a8f7bab544 | ||
|
|
4efdea0981 | ||
|
|
05791d4200 | ||
|
|
add35d2904 | ||
|
|
4ca62ba836 | ||
|
|
660901e1fd | ||
|
|
8e64c5acaf | ||
|
|
bc2a7efcb3 | ||
|
|
1dfd880e17 | ||
|
|
4f970c9987 | ||
|
|
398a1044ce | ||
|
|
251031cfc5 | ||
|
|
10a0c843c1 | ||
|
|
77c257d9d0 | ||
|
|
955547d523 | ||
|
|
3bc862df89 | ||
|
|
4311af96eb | ||
|
|
b49c11e9a2 | ||
|
|
9cdd85c62a | ||
|
|
87d8d82be7 | ||
|
|
fefc138485 | ||
|
|
f07259a7c9 | ||
|
|
4d9f25e9fe | ||
|
|
18e9b2340b | ||
|
|
ad427da340 | ||
|
|
484e0fd943 | ||
|
|
b8a16d362a | ||
|
|
17129024f4 | ||
|
|
15efeb0107 | ||
|
|
d2bc46cbb4 | ||
|
|
84eb5c562f | ||
|
|
7b01b26ff5 | ||
|
|
0f3e97ea1c | ||
|
|
177fc42f04 | ||
|
|
2560c2d1a2 | ||
|
|
bd6e16d41b |
131
.github/workflows/release-sdk.yml
vendored
131
.github/workflows/release-sdk.yml
vendored
@@ -33,6 +33,10 @@ on:
|
||||
type: 'boolean'
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}'
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
release-sdk:
|
||||
runs-on: 'ubuntu-latest'
|
||||
@@ -46,6 +50,7 @@ jobs:
|
||||
packages: 'write'
|
||||
id-token: 'write'
|
||||
issues: 'write'
|
||||
pull-requests: 'write'
|
||||
outputs:
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
|
||||
@@ -86,6 +91,8 @@ jobs:
|
||||
with:
|
||||
node-version-file: '.nvmrc'
|
||||
cache: 'npm'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
scope: '@qwen-code'
|
||||
|
||||
- name: 'Install Dependencies'
|
||||
run: |-
|
||||
@@ -121,6 +128,14 @@ jobs:
|
||||
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
|
||||
MANUAL_VERSION: '${{ inputs.version }}'
|
||||
|
||||
- name: 'Set SDK package version (local only)'
|
||||
env:
|
||||
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
|
||||
run: |-
|
||||
# Ensure the package version matches the computed release version.
|
||||
# This is required for nightly/preview because npm does not allow re-publishing the same version.
|
||||
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: 'Build CLI Bundle'
|
||||
run: |
|
||||
npm run build
|
||||
@@ -153,7 +168,21 @@ jobs:
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: 'Build SDK'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm run build
|
||||
|
||||
- name: 'Publish @qwen-code/sdk'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
|
||||
|
||||
- name: 'Create and switch to a release branch'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
id: 'release_branch'
|
||||
env:
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
@@ -162,50 +191,22 @@ jobs:
|
||||
git switch -c "${BRANCH_NAME}"
|
||||
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Update package version'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
env:
|
||||
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
|
||||
run: |-
|
||||
npm version "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: 'Commit and Conditionally Push package version'
|
||||
- name: 'Commit and Push package version (stable only)'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
env:
|
||||
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
git add packages/sdk-typescript/package.json
|
||||
# Only persist version bumps after a successful publish.
|
||||
git add packages/sdk-typescript/package.json package-lock.json
|
||||
if git diff --staged --quiet; then
|
||||
echo "No version changes to commit"
|
||||
else
|
||||
git commit -m "chore(release): sdk-typescript ${RELEASE_TAG}"
|
||||
fi
|
||||
if [[ "${IS_DRY_RUN}" == "false" ]]; then
|
||||
echo "Pushing release branch to remote..."
|
||||
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
|
||||
else
|
||||
echo "Dry run enabled. Skipping push."
|
||||
fi
|
||||
|
||||
- name: 'Build SDK'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm run build
|
||||
|
||||
- name: 'Configure npm for publishing'
|
||||
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: '.nvmrc'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
scope: '@qwen-code'
|
||||
|
||||
- name: 'Publish @qwen-code/sdk'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
|
||||
echo "Pushing release branch to remote..."
|
||||
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
|
||||
|
||||
- name: 'Create GitHub Release and Tag'
|
||||
if: |-
|
||||
@@ -215,12 +216,68 @@ jobs:
|
||||
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
|
||||
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
|
||||
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
|
||||
REF: '${{ github.event.inputs.ref || github.sha }}'
|
||||
run: |-
|
||||
# For stable releases, use the release branch; for nightly/preview, use the current ref
|
||||
if [[ "${IS_NIGHTLY}" == "true" || "${IS_PREVIEW}" == "true" ]]; then
|
||||
TARGET="${REF}"
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
else
|
||||
TARGET="${RELEASE_BRANCH}"
|
||||
PRERELEASE_FLAG=""
|
||||
fi
|
||||
|
||||
gh release create "sdk-typescript-${RELEASE_TAG}" \
|
||||
--target "$RELEASE_BRANCH" \
|
||||
--target "${TARGET}" \
|
||||
--title "SDK TypeScript Release ${RELEASE_TAG}" \
|
||||
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
|
||||
--generate-notes
|
||||
--generate-notes \
|
||||
${PRERELEASE_FLAG}
|
||||
|
||||
- name: 'Create PR to merge release branch into main'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
id: 'pr'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
|
||||
pr_url="$(gh pr list --head "${RELEASE_BRANCH}" --base main --json url --jq '.[0].url')"
|
||||
if [[ -z "${pr_url}" ]]; then
|
||||
pr_url="$(gh pr create \
|
||||
--base main \
|
||||
--head "${RELEASE_BRANCH}" \
|
||||
--title "chore(release): sdk-typescript ${RELEASE_TAG}" \
|
||||
--body "Automated release PR for sdk-typescript ${RELEASE_TAG}.")"
|
||||
fi
|
||||
|
||||
echo "PR_URL=${pr_url}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Wait for CI checks to complete'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
echo "Waiting for CI checks to complete..."
|
||||
gh pr checks "${PR_URL}" --watch --interval 30
|
||||
|
||||
- name: 'Enable auto-merge for release PR'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
gh pr merge "${PR_URL}" --merge --auto
|
||||
|
||||
- name: 'Create Issue on Failure'
|
||||
if: |-
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -23,6 +23,7 @@ package-lock.json
|
||||
.idea
|
||||
*.iml
|
||||
.cursor
|
||||
.qoder
|
||||
|
||||
# OS metadata
|
||||
.DS_Store
|
||||
|
||||
@@ -191,6 +191,7 @@ See [settings](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/se
|
||||
|
||||
Looking for a graphical interface?
|
||||
|
||||
- [**AionUi**](https://github.com/iOfficeAI/AionUi) A modern GUI for command-line AI tools including Qwen Code
|
||||
- [**Gemini CLI Desktop**](https://github.com/Piebald-AI/gemini-cli-desktop) A cross-platform desktop/web/mobile UI for Qwen Code
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@@ -43,6 +43,7 @@ Qwen Code uses JSON settings files for persistent configuration. There are four
|
||||
In addition to a project settings file, a project's `.qwen` directory can contain other project-specific files related to Qwen Code's operation, such as:
|
||||
|
||||
- [Custom sandbox profiles](../features/sandbox) (e.g. `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
|
||||
- [Agent Skills](../features/skills) (experimental) under `.qwen/skills/` (each Skill is a directory containing a `SKILL.md`).
|
||||
|
||||
### Available settings in `settings.json`
|
||||
|
||||
@@ -380,6 +381,8 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--checkpointing` | | Enables [checkpointing](../features/checkpointing). | | |
|
||||
| `--experimental-acp` | | Enables ACP mode (Agent Control Protocol). Useful for IDE/editor integrations like [Zed](../integration-zed). | | Experimental. |
|
||||
| `--experimental-skills` | | Enables experimental [Agent Skills](../features/skills) (registers the `skill` tool and loads Skills from `.qwen/skills/` and `~/.qwen/skills/`). | | Experimental. |
|
||||
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
|
||||
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |
|
||||
| `--proxy` | | Sets the proxy for the CLI. | Proxy URL | Example: `--proxy http://localhost:7890`. |
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
export default {
|
||||
commands: 'Commands',
|
||||
'sub-agents': 'SubAgents',
|
||||
skills: 'Skills (Experimental)',
|
||||
headless: 'Headless Mode',
|
||||
checkpointing: {
|
||||
display: 'hidden',
|
||||
@@ -9,4 +10,5 @@ export default {
|
||||
mcp: 'MCP',
|
||||
'token-caching': 'Token Caching',
|
||||
sandbox: 'Sandboxing',
|
||||
language: 'i18n',
|
||||
};
|
||||
|
||||
@@ -48,7 +48,7 @@ Commands specifically for controlling interface and output language.
|
||||
| → `ui [language]` | Set UI interface language | `/language ui zh-CN` |
|
||||
| → `output [language]` | Set LLM output language | `/language output Chinese` |
|
||||
|
||||
- Available UI languages: `zh-CN` (Simplified Chinese), `en-US` (English)
|
||||
- Available built-in UI languages: `zh-CN` (Simplified Chinese), `en-US` (English), `ru-RU` (Russian), `de-DE` (German)
|
||||
- Output language examples: `Chinese`, `English`, `Japanese`, etc.
|
||||
|
||||
### 1.4 Tool and Model Management
|
||||
@@ -72,17 +72,16 @@ Commands for managing AI tools and models.
|
||||
|
||||
Commands for obtaining information and performing system settings.
|
||||
|
||||
| Command | Description | Usage Examples |
|
||||
| --------------- | ----------------------------------------------- | ------------------------------------------------ |
|
||||
| `/help` | Display help information for available commands | `/help` or `/?` |
|
||||
| `/about` | Display version information | `/about` |
|
||||
| `/stats` | Display detailed statistics for current session | `/stats` |
|
||||
| `/settings` | Open settings editor | `/settings` |
|
||||
| `/auth` | Change authentication method | `/auth` |
|
||||
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
|
||||
| `/copy` | Copy last output content to clipboard | `/copy` |
|
||||
| `/quit-confirm` | Show confirmation dialog before quitting | `/quit-confirm` (shortcut: press `Ctrl+C` twice) |
|
||||
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
|
||||
| Command | Description | Usage Examples |
|
||||
| ----------- | ----------------------------------------------- | -------------------------------- |
|
||||
| `/help` | Display help information for available commands | `/help` or `/?` |
|
||||
| `/about` | Display version information | `/about` |
|
||||
| `/stats` | Display detailed statistics for current session | `/stats` |
|
||||
| `/settings` | Open settings editor | `/settings` |
|
||||
| `/auth` | Change authentication method | `/auth` |
|
||||
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
|
||||
| `/copy` | Copy last output content to clipboard | `/copy` |
|
||||
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
|
||||
|
||||
### 1.6 Common Shortcuts
|
||||
|
||||
|
||||
@@ -189,19 +189,20 @@ qwen -p "Write code" --output-format stream-json --include-partial-messages | jq
|
||||
|
||||
Key command-line options for headless usage:
|
||||
|
||||
| Option | Description | Example |
|
||||
| ---------------------------- | --------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
|
||||
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
|
||||
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
|
||||
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
|
||||
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
|
||||
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
|
||||
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
|
||||
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
|
||||
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
|
||||
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
|
||||
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
|
||||
| Option | Description | Example |
|
||||
| ---------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
|
||||
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
|
||||
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
|
||||
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
|
||||
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
|
||||
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
|
||||
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
|
||||
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
|
||||
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
|
||||
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
|
||||
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
|
||||
| `--experimental-skills` | Enable experimental Skills (registers the `skill` tool) | `qwen --experimental-skills -p "What Skills are available?"` |
|
||||
|
||||
For complete details on all available configuration options, settings files, and environment variables, see the [Configuration Guide](../configuration/settings).
|
||||
|
||||
|
||||
136
docs/users/features/language.md
Normal file
136
docs/users/features/language.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Internationalization (i18n) & Language
|
||||
|
||||
Qwen Code is built for multilingual workflows: it supports UI localization (i18n/l10n) in the CLI, lets you choose the assistant output language, and allows custom UI language packs.
|
||||
|
||||
## Overview
|
||||
|
||||
From a user point of view, Qwen Code’s “internationalization” spans multiple layers:
|
||||
|
||||
| Capability / Setting | What it controls | Where stored |
|
||||
| ------------------------ | ---------------------------------------------------------------------- | ---------------------------- |
|
||||
| `/language ui` | Terminal UI text (menus, system messages, prompts) | `~/.qwen/settings.json` |
|
||||
| `/language output` | Language the AI responds in (an output preference, not UI translation) | `~/.qwen/output-language.md` |
|
||||
| Custom UI language packs | Overrides/extends built-in UI translations | `~/.qwen/locales/*.js` |
|
||||
|
||||
## UI Language
|
||||
|
||||
This is the CLI’s UI localization layer (i18n/l10n): it controls the language of menus, prompts, and system messages.
|
||||
|
||||
### Setting the UI Language
|
||||
|
||||
Use the `/language ui` command:
|
||||
|
||||
```bash
|
||||
/language ui zh-CN # Chinese
|
||||
/language ui en-US # English
|
||||
/language ui ru-RU # Russian
|
||||
/language ui de-DE # German
|
||||
```
|
||||
|
||||
Aliases are also supported:
|
||||
|
||||
```bash
|
||||
/language ui zh # Chinese
|
||||
/language ui en # English
|
||||
/language ui ru # Russian
|
||||
/language ui de # German
|
||||
```
|
||||
|
||||
### Auto-detection
|
||||
|
||||
On first startup, Qwen Code detects your system locale and sets the UI language automatically.
|
||||
|
||||
Detection priority:
|
||||
|
||||
1. `QWEN_CODE_LANG` environment variable
|
||||
2. `LANG` environment variable
|
||||
3. System locale via JavaScript Intl API
|
||||
4. Default: English
|
||||
|
||||
## LLM Output Language
|
||||
|
||||
The LLM output language controls what language the AI assistant responds in, regardless of what language you type your questions in.
|
||||
|
||||
### How It Works
|
||||
|
||||
The LLM output language is controlled by a rule file at `~/.qwen/output-language.md`. This file is automatically included in the LLM's context during startup, instructing it to respond in the specified language.
|
||||
|
||||
### Auto-detection
|
||||
|
||||
On first startup, if no `output-language.md` file exists, Qwen Code automatically creates one based on your system locale. For example:
|
||||
|
||||
- System locale `zh` creates a rule for Chinese responses
|
||||
- System locale `en` creates a rule for English responses
|
||||
- System locale `ru` creates a rule for Russian responses
|
||||
- System locale `de` creates a rule for German responses
|
||||
|
||||
### Manual Setting
|
||||
|
||||
Use `/language output <language>` to change:
|
||||
|
||||
```bash
|
||||
/language output Chinese
|
||||
/language output English
|
||||
/language output Japanese
|
||||
/language output German
|
||||
```
|
||||
|
||||
Any language name works. The LLM will be instructed to respond in that language.
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> After changing the output language, restart Qwen Code for the change to take effect.
|
||||
|
||||
### File Location
|
||||
|
||||
```
|
||||
~/.qwen/output-language.md
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Via Settings Dialog
|
||||
|
||||
1. Run `/settings`
|
||||
2. Find "Language" under General
|
||||
3. Select your preferred UI language
|
||||
|
||||
### Via Environment Variable
|
||||
|
||||
```bash
|
||||
export QWEN_CODE_LANG=zh
|
||||
```
|
||||
|
||||
This influences auto-detection on first startup (if you haven’t set a UI language and no `output-language.md` file exists yet).
|
||||
|
||||
## Custom Language Packs
|
||||
|
||||
For UI translations, you can create custom language packs in `~/.qwen/locales/`:
|
||||
|
||||
- Example: `~/.qwen/locales/es.js` for Spanish
|
||||
- Example: `~/.qwen/locales/fr.js` for French
|
||||
|
||||
User directory takes precedence over built-in translations.
|
||||
|
||||
> [!tip]
|
||||
>
|
||||
> Contributions are welcome! If you’d like to improve built-in translations or add new languages.
|
||||
> For a concrete example, see [PR #1238: feat(i18n): add Russian language support](https://github.com/QwenLM/qwen-code/pull/1238).
|
||||
|
||||
### Language Pack Format
|
||||
|
||||
```javascript
|
||||
// ~/.qwen/locales/es.js
|
||||
export default {
|
||||
Hello: 'Hola',
|
||||
Settings: 'Configuracion',
|
||||
// ... more translations
|
||||
};
|
||||
```
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/language` - Show current language settings
|
||||
- `/language ui [lang]` - Set UI language
|
||||
- `/language output <language>` - Set LLM output language
|
||||
- `/settings` - Open settings dialog
|
||||
282
docs/users/features/skills.md
Normal file
282
docs/users/features/skills.md
Normal file
@@ -0,0 +1,282 @@
|
||||
# Agent Skills (Experimental)
|
||||
|
||||
> Create, manage, and share Skills to extend Qwen Code’s capabilities.
|
||||
|
||||
This guide shows you how to create, use, and manage Agent Skills in **Qwen Code**. Skills are modular capabilities that extend the model’s effectiveness through organized folders containing instructions (and optionally scripts/resources).
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> Skills are currently **experimental** and must be enabled with `--experimental-skills`.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Qwen Code (recent version)
|
||||
- Run with the experimental flag enabled:
|
||||
|
||||
```bash
|
||||
qwen --experimental-skills
|
||||
```
|
||||
|
||||
- Basic familiarity with Qwen Code ([Quickstart](../quickstart.md))
|
||||
|
||||
## What are Agent Skills?
|
||||
|
||||
Agent Skills package expertise into discoverable capabilities. Each Skill consists of a `SKILL.md` file with instructions that the model can load when relevant, plus optional supporting files like scripts and templates.
|
||||
|
||||
### How Skills are invoked
|
||||
|
||||
Skills are **model-invoked** — the model autonomously decides when to use them based on your request and the Skill’s description. This is different from slash commands, which are **user-invoked** (you explicitly type `/command`).
|
||||
|
||||
### Benefits
|
||||
|
||||
- Extend Qwen Code for your workflows
|
||||
- Share expertise across your team via git
|
||||
- Reduce repetitive prompting
|
||||
- Compose multiple Skills for complex tasks
|
||||
|
||||
## Create a Skill
|
||||
|
||||
Skills are stored as directories containing a `SKILL.md` file.
|
||||
|
||||
### Personal Skills
|
||||
|
||||
Personal Skills are available across all your projects. Store them in `~/.qwen/skills/`:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.qwen/skills/my-skill-name
|
||||
```
|
||||
|
||||
Use personal Skills for:
|
||||
|
||||
- Your individual workflows and preferences
|
||||
- Experimental Skills you’re developing
|
||||
- Personal productivity helpers
|
||||
|
||||
### Project Skills
|
||||
|
||||
Project Skills are shared with your team. Store them in `.qwen/skills/` within your project:
|
||||
|
||||
```bash
|
||||
mkdir -p .qwen/skills/my-skill-name
|
||||
```
|
||||
|
||||
Use project Skills for:
|
||||
|
||||
- Team workflows and conventions
|
||||
- Project-specific expertise
|
||||
- Shared utilities and scripts
|
||||
|
||||
Project Skills can be checked into git and automatically become available to teammates.
|
||||
|
||||
## Write `SKILL.md`
|
||||
|
||||
Create a `SKILL.md` file with YAML frontmatter and Markdown content:
|
||||
|
||||
```yaml
|
||||
---
|
||||
name: your-skill-name
|
||||
description: Brief description of what this Skill does and when to use it
|
||||
---
|
||||
|
||||
# Your Skill Name
|
||||
|
||||
## Instructions
|
||||
Provide clear, step-by-step guidance for Qwen Code.
|
||||
|
||||
## Examples
|
||||
Show concrete examples of using this Skill.
|
||||
```
|
||||
|
||||
### Field requirements
|
||||
|
||||
Qwen Code currently validates that:
|
||||
|
||||
- `name` is a non-empty string
|
||||
- `description` is a non-empty string
|
||||
|
||||
Recommended conventions (not strictly enforced yet):
|
||||
|
||||
- Use lowercase letters, numbers, and hyphens in `name`
|
||||
- Make `description` specific: include both **what** the Skill does and **when** to use it (key words users will naturally mention)
|
||||
|
||||
## Add supporting files
|
||||
|
||||
Create additional files alongside `SKILL.md`:
|
||||
|
||||
```text
|
||||
my-skill/
|
||||
├── SKILL.md (required)
|
||||
├── reference.md (optional documentation)
|
||||
├── examples.md (optional examples)
|
||||
├── scripts/
|
||||
│ └── helper.py (optional utility)
|
||||
└── templates/
|
||||
└── template.txt (optional template)
|
||||
```
|
||||
|
||||
Reference these files from `SKILL.md`:
|
||||
|
||||
````markdown
|
||||
For advanced usage, see [reference.md](reference.md).
|
||||
|
||||
Run the helper script:
|
||||
|
||||
```bash
|
||||
python scripts/helper.py input.txt
|
||||
```
|
||||
````
|
||||
|
||||
## View available Skills
|
||||
|
||||
When `--experimental-skills` is enabled, Qwen Code discovers Skills from:
|
||||
|
||||
- Personal Skills: `~/.qwen/skills/`
|
||||
- Project Skills: `.qwen/skills/`
|
||||
|
||||
To view available Skills, ask Qwen Code directly:
|
||||
|
||||
```text
|
||||
What Skills are available?
|
||||
```
|
||||
|
||||
Or inspect the filesystem:
|
||||
|
||||
```bash
|
||||
# List personal Skills
|
||||
ls ~/.qwen/skills/
|
||||
|
||||
# List project Skills (if in a project directory)
|
||||
ls .qwen/skills/
|
||||
|
||||
# View a specific Skill’s content
|
||||
cat ~/.qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
## Test a Skill
|
||||
|
||||
After creating a Skill, test it by asking questions that match your description.
|
||||
|
||||
Example: if your description mentions “PDF files”:
|
||||
|
||||
```text
|
||||
Can you help me extract text from this PDF?
|
||||
```
|
||||
|
||||
The model autonomously decides to use your Skill if it matches the request — you don’t need to explicitly invoke it.
|
||||
|
||||
## Debug a Skill
|
||||
|
||||
If Qwen Code doesn’t use your Skill, check these common issues:
|
||||
|
||||
### Make the description specific
|
||||
|
||||
Too vague:
|
||||
|
||||
```yaml
|
||||
description: Helps with documents
|
||||
```
|
||||
|
||||
Specific:
|
||||
|
||||
```yaml
|
||||
description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDFs, forms, or document extraction.
|
||||
```
|
||||
|
||||
### Verify file path
|
||||
|
||||
- Personal Skills: `~/.qwen/skills/<skill-name>/SKILL.md`
|
||||
- Project Skills: `.qwen/skills/<skill-name>/SKILL.md`
|
||||
|
||||
```bash
|
||||
# Personal
|
||||
ls ~/.qwen/skills/my-skill/SKILL.md
|
||||
|
||||
# Project
|
||||
ls .qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
### Check YAML syntax
|
||||
|
||||
Invalid YAML prevents the Skill metadata from loading correctly.
|
||||
|
||||
```bash
|
||||
cat SKILL.md | head -n 15
|
||||
```
|
||||
|
||||
Ensure:
|
||||
|
||||
- Opening `---` on line 1
|
||||
- Closing `---` before Markdown content
|
||||
- Valid YAML syntax (no tabs, correct indentation)
|
||||
|
||||
### View errors
|
||||
|
||||
Run Qwen Code with debug mode to see Skill loading errors:
|
||||
|
||||
```bash
|
||||
qwen --experimental-skills --debug
|
||||
```
|
||||
|
||||
## Share Skills with your team
|
||||
|
||||
You can share Skills through project repositories:
|
||||
|
||||
1. Add the Skill under `.qwen/skills/`
|
||||
2. Commit and push
|
||||
3. Teammates pull the changes and run with `--experimental-skills`
|
||||
|
||||
```bash
|
||||
git add .qwen/skills/
|
||||
git commit -m "Add team Skill for PDF processing"
|
||||
git push
|
||||
```
|
||||
|
||||
## Update a Skill
|
||||
|
||||
Edit `SKILL.md` directly:
|
||||
|
||||
```bash
|
||||
# Personal Skill
|
||||
code ~/.qwen/skills/my-skill/SKILL.md
|
||||
|
||||
# Project Skill
|
||||
code .qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
Changes take effect the next time you start Qwen Code. If Qwen Code is already running, restart it to load the updates.
|
||||
|
||||
## Remove a Skill
|
||||
|
||||
Delete the Skill directory:
|
||||
|
||||
```bash
|
||||
# Personal
|
||||
rm -rf ~/.qwen/skills/my-skill
|
||||
|
||||
# Project
|
||||
rm -rf .qwen/skills/my-skill
|
||||
git commit -m "Remove unused Skill"
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
### Keep Skills focused
|
||||
|
||||
One Skill should address one capability:
|
||||
|
||||
- Focused: “PDF form filling”, “Excel analysis”, “Git commit messages”
|
||||
- Too broad: “Document processing” (split into smaller Skills)
|
||||
|
||||
### Write clear descriptions
|
||||
|
||||
Help the model discover when to use Skills by including specific triggers:
|
||||
|
||||
```yaml
|
||||
description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or .xlsx data.
|
||||
```
|
||||
|
||||
### Test with your team
|
||||
|
||||
- Does the Skill activate when expected?
|
||||
- Are the instructions clear?
|
||||
- Are there missing examples or edge cases?
|
||||
@@ -1,4 +1,6 @@
|
||||
# Qwen Code overview
|
||||
[](https://npm-compare.com/@qwen-code/qwen-code)
|
||||
[](https://www.npmjs.com/package/@qwen-code/qwen-code)
|
||||
|
||||
> Learn about Qwen Code, Qwen's agentic coding tool that lives in your terminal and helps you turn ideas into code faster than ever before.
|
||||
|
||||
@@ -46,7 +48,7 @@ You'll be prompted to log in on first use. That's it! [Continue with Quickstart
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. You can search for **Qwen Code** in the VS Code Marketplace and download it.
|
||||
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. Download and install the [Qwen Code Companion](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion) now.
|
||||
|
||||
## What Qwen Code does for you
|
||||
|
||||
|
||||
@@ -24,6 +24,8 @@ export default tseslint.config(
|
||||
'.integration-tests/**',
|
||||
'packages/**/.integration-test/**',
|
||||
'dist/**',
|
||||
'docs-site/.next/**',
|
||||
'docs-site/out/**',
|
||||
],
|
||||
},
|
||||
eslint.configs.recommended,
|
||||
|
||||
@@ -5,8 +5,6 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { existsSync } from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
|
||||
|
||||
describe('file-system', () => {
|
||||
@@ -202,8 +200,8 @@ describe('file-system', () => {
|
||||
const readAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'read_file',
|
||||
);
|
||||
const writeAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'write_file',
|
||||
const editAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'edit_file',
|
||||
);
|
||||
const successfulReplace = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
|
||||
@@ -226,15 +224,15 @@ describe('file-system', () => {
|
||||
|
||||
// CRITICAL: Verify that no matter what the model did, it never successfully
|
||||
// wrote or replaced anything.
|
||||
if (writeAttempt) {
|
||||
if (editAttempt) {
|
||||
console.error(
|
||||
'A write_file attempt was made when no file should be written.',
|
||||
'A edit_file attempt was made when no file should be written.',
|
||||
);
|
||||
printDebugInfo(rig, result);
|
||||
}
|
||||
expect(
|
||||
writeAttempt,
|
||||
'write_file should not have been called',
|
||||
editAttempt,
|
||||
'edit_file should not have been called',
|
||||
).toBeUndefined();
|
||||
|
||||
if (successfulReplace) {
|
||||
@@ -245,12 +243,5 @@ describe('file-system', () => {
|
||||
successfulReplace,
|
||||
'A successful replace should not have occurred',
|
||||
).toBeUndefined();
|
||||
|
||||
// Final verification: ensure the file was not created.
|
||||
const filePath = path.join(rig.testDir!, fileName);
|
||||
const fileExists = existsSync(filePath);
|
||||
expect(fileExists, 'The non-existent file should not be created').toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -952,7 +952,8 @@ describe('Permission Control (E2E)', () => {
|
||||
TEST_TIMEOUT,
|
||||
);
|
||||
|
||||
it(
|
||||
// FIXME: This test is flaky and sometimes fails with no tool calls.
|
||||
it.skip(
|
||||
'should allow read-only tools without restrictions',
|
||||
async () => {
|
||||
// Create test files for the model to read
|
||||
|
||||
@@ -314,4 +314,88 @@ describe('System Control (E2E)', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('supportedCommands API', () => {
|
||||
it('should return list of supported slash commands', async () => {
|
||||
const sessionId = crypto.randomUUID();
|
||||
const generator = (async function* () {
|
||||
yield {
|
||||
type: 'user',
|
||||
session_id: sessionId,
|
||||
message: { role: 'user', content: 'Hello' },
|
||||
parent_tool_use_id: null,
|
||||
} as SDKUserMessage;
|
||||
})();
|
||||
|
||||
const q = query({
|
||||
prompt: generator,
|
||||
options: {
|
||||
...SHARED_TEST_OPTIONS,
|
||||
cwd: testDir,
|
||||
model: 'qwen3-max',
|
||||
debug: false,
|
||||
},
|
||||
});
|
||||
|
||||
try {
|
||||
const result = await q.supportedCommands();
|
||||
// Start consuming messages to trigger initialization
|
||||
const messageConsumer = (async () => {
|
||||
try {
|
||||
for await (const _message of q) {
|
||||
// Just consume messages
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore errors from query being closed
|
||||
if (error instanceof Error && error.message !== 'Query is closed') {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
})();
|
||||
|
||||
// Verify result structure
|
||||
expect(result).toBeDefined();
|
||||
expect(result).toHaveProperty('commands');
|
||||
expect(Array.isArray(result?.['commands'])).toBe(true);
|
||||
|
||||
const commands = result?.['commands'] as string[];
|
||||
|
||||
// Verify default allowed built-in commands are present
|
||||
expect(commands).toContain('init');
|
||||
expect(commands).toContain('summary');
|
||||
expect(commands).toContain('compress');
|
||||
|
||||
// Verify commands are sorted
|
||||
const sortedCommands = [...commands].sort();
|
||||
expect(commands).toEqual(sortedCommands);
|
||||
|
||||
// Verify all commands are strings
|
||||
commands.forEach((cmd) => {
|
||||
expect(typeof cmd).toBe('string');
|
||||
expect(cmd.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
await q.close();
|
||||
await messageConsumer;
|
||||
} catch (error) {
|
||||
await q.close();
|
||||
throw error;
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw error when supportedCommands is called on closed query', async () => {
|
||||
const q = query({
|
||||
prompt: 'Hello',
|
||||
options: {
|
||||
...SHARED_TEST_OPTIONS,
|
||||
cwd: testDir,
|
||||
model: 'qwen3-max',
|
||||
},
|
||||
});
|
||||
|
||||
await q.close();
|
||||
|
||||
await expect(q.supportedCommands()).rejects.toThrow('Query is closed');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
2115
package-lock.json
generated
2115
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -18,9 +18,6 @@
|
||||
"scripts": {
|
||||
"start": "cross-env node scripts/start.js",
|
||||
"debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js",
|
||||
"auth:npm": "npx google-artifactregistry-auth",
|
||||
"auth:docker": "gcloud auth configure-docker us-west1-docker.pkg.dev",
|
||||
"auth": "npm run auth:npm && npm run auth:docker",
|
||||
"generate": "node scripts/generate-git-commit-info.js",
|
||||
"build": "node scripts/build.js",
|
||||
"build-and-start": "npm run build && npm run start",
|
||||
@@ -95,7 +92,6 @@
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"glob": "^10.5.0",
|
||||
"globals": "^16.0.0",
|
||||
"google-artifactregistry-auth": "^3.4.0",
|
||||
"husky": "^9.1.7",
|
||||
"json": "^11.0.0",
|
||||
"lint-staged": "^16.1.6",
|
||||
|
||||
@@ -36,16 +36,17 @@
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@google/genai": "1.30.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@qwen-code/qwen-code-core": "file:../core",
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@types/update-notifier": "^6.0.8",
|
||||
"ansi-regex": "^6.2.2",
|
||||
"command-exists": "^1.2.9",
|
||||
"comment-json": "^4.2.5",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^17.1.0",
|
||||
"prompts": "^2.4.2",
|
||||
"fzf": "^0.5.2",
|
||||
"glob": "^10.5.0",
|
||||
"highlight.js": "^11.11.1",
|
||||
@@ -79,6 +80,7 @@
|
||||
"@types/command-exists": "^1.2.3",
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/dotenv": "^6.1.1",
|
||||
"@types/prompts": "^2.4.9",
|
||||
"@types/node": "^20.11.24",
|
||||
"@types/react": "^19.1.8",
|
||||
"@types/react-dom": "^19.1.6",
|
||||
|
||||
@@ -98,6 +98,14 @@ export class AgentSideConnection implements Client {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a custom notification to the client.
|
||||
* Used for extension-specific notifications that are not part of the core ACP protocol.
|
||||
*/
|
||||
async sendCustomNotification<T>(method: string, params: T): Promise<void> {
|
||||
return await this.#connection.sendNotification(method, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Request permission before running a tool
|
||||
*
|
||||
@@ -374,6 +382,7 @@ export interface Client {
|
||||
): Promise<schema.RequestPermissionResponse>;
|
||||
sessionUpdate(params: schema.SessionNotification): Promise<void>;
|
||||
authenticateUpdate(params: schema.AuthenticateUpdate): Promise<void>;
|
||||
sendCustomNotification<T>(method: string, params: T): Promise<void>;
|
||||
writeTextFile(
|
||||
params: schema.WriteTextFileRequest,
|
||||
): Promise<schema.WriteTextFileResponse>;
|
||||
|
||||
@@ -15,10 +15,10 @@ import {
|
||||
qwenOAuth2Events,
|
||||
MCPServerConfig,
|
||||
SessionService,
|
||||
buildApiHistoryFromConversation,
|
||||
type Config,
|
||||
type ConversationRecord,
|
||||
type DeviceAuthorizationData,
|
||||
tokenLimit,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { ApprovalModeValue } from './schema.js';
|
||||
import * as acp from './acp.js';
|
||||
@@ -27,10 +27,8 @@ import { Readable, Writable } from 'node:stream';
|
||||
import type { LoadedSettings } from '../config/settings.js';
|
||||
import { SettingScope } from '../config/settings.js';
|
||||
import { z } from 'zod';
|
||||
import { ExtensionStorage, type Extension } from '../config/extension.js';
|
||||
import type { CliArgs } from '../config/config.js';
|
||||
import { loadCliConfig } from '../config/config.js';
|
||||
import { ExtensionEnablementManager } from '../config/extensions/extensionEnablement.js';
|
||||
|
||||
// Import the modular Session class
|
||||
import { Session } from './session/Session.js';
|
||||
@@ -38,7 +36,6 @@ import { Session } from './session/Session.js';
|
||||
export async function runAcpAgent(
|
||||
config: Config,
|
||||
settings: LoadedSettings,
|
||||
extensions: Extension[],
|
||||
argv: CliArgs,
|
||||
) {
|
||||
const stdout = Writable.toWeb(process.stdout) as WritableStream;
|
||||
@@ -51,8 +48,7 @@ export async function runAcpAgent(
|
||||
console.debug = console.error;
|
||||
|
||||
new acp.AgentSideConnection(
|
||||
(client: acp.Client) =>
|
||||
new GeminiAgent(config, settings, extensions, argv, client),
|
||||
(client: acp.Client) => new GeminiAgent(config, settings, argv, client),
|
||||
stdout,
|
||||
stdin,
|
||||
);
|
||||
@@ -65,7 +61,6 @@ class GeminiAgent {
|
||||
constructor(
|
||||
private config: Config,
|
||||
private settings: LoadedSettings,
|
||||
private extensions: Extension[],
|
||||
private argv: CliArgs,
|
||||
private client: acp.Client,
|
||||
) {}
|
||||
@@ -165,9 +160,30 @@ class GeminiAgent {
|
||||
this.setupFileSystem(config);
|
||||
|
||||
const session = await this.createAndStoreSession(config);
|
||||
const configuredModel = (
|
||||
config.getModel() ||
|
||||
this.config.getModel() ||
|
||||
''
|
||||
).trim();
|
||||
const modelId = configuredModel || 'default';
|
||||
const modelName = configuredModel || modelId;
|
||||
|
||||
return {
|
||||
sessionId: session.getId(),
|
||||
models: {
|
||||
currentModelId: modelId,
|
||||
availableModels: [
|
||||
{
|
||||
modelId,
|
||||
name: modelName,
|
||||
description: null,
|
||||
_meta: {
|
||||
contextLimit: tokenLimit(modelId),
|
||||
},
|
||||
},
|
||||
],
|
||||
_meta: null,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -194,16 +210,7 @@ class GeminiAgent {
|
||||
continue: false,
|
||||
};
|
||||
|
||||
const config = await loadCliConfig(
|
||||
settings,
|
||||
this.extensions,
|
||||
new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
this.argv.extensions,
|
||||
),
|
||||
argvForSession,
|
||||
cwd,
|
||||
);
|
||||
const config = await loadCliConfig(settings, argvForSession, cwd);
|
||||
|
||||
await config.initialize();
|
||||
return config;
|
||||
@@ -327,12 +334,20 @@ class GeminiAgent {
|
||||
const sessionId = config.getSessionId();
|
||||
const geminiClient = config.getGeminiClient();
|
||||
|
||||
const history = conversation
|
||||
? buildApiHistoryFromConversation(conversation)
|
||||
: undefined;
|
||||
const chat = history
|
||||
? await geminiClient.startChat(history)
|
||||
: await geminiClient.startChat();
|
||||
// Use GeminiClient to manage chat lifecycle properly
|
||||
// This ensures geminiClient.chat is in sync with the session's chat
|
||||
//
|
||||
// Note: When loading a session, config.initialize() has already been called
|
||||
// in newSessionConfig(), which in turn calls geminiClient.initialize().
|
||||
// The GeminiClient.initialize() method checks config.getResumedSessionData()
|
||||
// and automatically loads the conversation history into the chat instance.
|
||||
// So we only need to initialize if it hasn't been done yet.
|
||||
if (!geminiClient.isInitialized()) {
|
||||
await geminiClient.initialize();
|
||||
}
|
||||
|
||||
// Now get the chat instance that's managed by GeminiClient
|
||||
const chat = geminiClient.getChat();
|
||||
|
||||
const session = new Session(
|
||||
sessionId,
|
||||
|
||||
@@ -93,6 +93,7 @@ export type ModeInfo = z.infer<typeof modeInfoSchema>;
|
||||
export type ModesData = z.infer<typeof modesDataSchema>;
|
||||
|
||||
export type AgentInfo = z.infer<typeof agentInfoSchema>;
|
||||
export type ModelInfo = z.infer<typeof modelInfoSchema>;
|
||||
|
||||
export type PromptCapabilities = z.infer<typeof promptCapabilitiesSchema>;
|
||||
|
||||
@@ -254,8 +255,26 @@ export const authenticateUpdateSchema = z.object({
|
||||
|
||||
export type AuthenticateUpdate = z.infer<typeof authenticateUpdateSchema>;
|
||||
|
||||
export const acpMetaSchema = z.record(z.unknown()).nullable().optional();
|
||||
|
||||
export const modelIdSchema = z.string();
|
||||
|
||||
export const modelInfoSchema = z.object({
|
||||
_meta: acpMetaSchema,
|
||||
description: z.string().nullable().optional(),
|
||||
modelId: modelIdSchema,
|
||||
name: z.string(),
|
||||
});
|
||||
|
||||
export const sessionModelStateSchema = z.object({
|
||||
_meta: acpMetaSchema,
|
||||
availableModels: z.array(modelInfoSchema),
|
||||
currentModelId: modelIdSchema,
|
||||
});
|
||||
|
||||
export const newSessionResponseSchema = z.object({
|
||||
sessionId: z.string(),
|
||||
models: sessionModelStateSchema,
|
||||
});
|
||||
|
||||
export const loadSessionResponseSchema = z.null();
|
||||
@@ -514,6 +533,13 @@ export const currentModeUpdateSchema = z.object({
|
||||
|
||||
export type CurrentModeUpdate = z.infer<typeof currentModeUpdateSchema>;
|
||||
|
||||
export const currentModelUpdateSchema = z.object({
|
||||
sessionUpdate: z.literal('current_model_update'),
|
||||
model: modelInfoSchema,
|
||||
});
|
||||
|
||||
export type CurrentModelUpdate = z.infer<typeof currentModelUpdateSchema>;
|
||||
|
||||
export const sessionUpdateSchema = z.union([
|
||||
z.object({
|
||||
content: contentBlockSchema,
|
||||
@@ -555,6 +581,7 @@ export const sessionUpdateSchema = z.union([
|
||||
sessionUpdate: z.literal('plan'),
|
||||
}),
|
||||
currentModeUpdateSchema,
|
||||
currentModelUpdateSchema,
|
||||
availableCommandsUpdateSchema,
|
||||
]);
|
||||
|
||||
|
||||
@@ -41,9 +41,11 @@ import * as fs from 'node:fs/promises';
|
||||
import * as path from 'node:path';
|
||||
import { z } from 'zod';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { normalizePartList } from '../../utils/nonInteractiveHelpers.js';
|
||||
import {
|
||||
handleSlashCommand,
|
||||
getAvailableCommands,
|
||||
type NonInteractiveSlashCommandResult,
|
||||
} from '../../nonInteractiveCliCommands.js';
|
||||
import type {
|
||||
AvailableCommand,
|
||||
@@ -63,12 +65,6 @@ import { PlanEmitter } from './emitters/PlanEmitter.js';
|
||||
import { MessageEmitter } from './emitters/MessageEmitter.js';
|
||||
import { SubAgentTracker } from './SubAgentTracker.js';
|
||||
|
||||
/**
|
||||
* Built-in commands that are allowed in ACP integration mode.
|
||||
* Only safe, read-only commands that don't require interactive UI.
|
||||
*/
|
||||
export const ALLOWED_BUILTIN_COMMANDS_FOR_ACP = ['init'];
|
||||
|
||||
/**
|
||||
* Session represents an active conversation session with the AI model.
|
||||
* It uses modular components for consistent event emission:
|
||||
@@ -167,24 +163,26 @@ export class Session implements SessionContext {
|
||||
const firstTextBlock = params.prompt.find((block) => block.type === 'text');
|
||||
const inputText = firstTextBlock?.text || '';
|
||||
|
||||
let parts: Part[];
|
||||
let parts: Part[] | null;
|
||||
|
||||
if (isSlashCommand(inputText)) {
|
||||
// Handle slash command - allow specific built-in commands for ACP integration
|
||||
// Handle slash command - uses default allowed commands (init, summary, compress)
|
||||
const slashCommandResult = await handleSlashCommand(
|
||||
inputText,
|
||||
pendingSend,
|
||||
this.config,
|
||||
this.settings,
|
||||
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
|
||||
);
|
||||
|
||||
if (slashCommandResult) {
|
||||
// Use the result from the slash command
|
||||
parts = slashCommandResult as Part[];
|
||||
} else {
|
||||
// Slash command didn't return a prompt, continue with normal processing
|
||||
parts = await this.#resolvePrompt(params.prompt, pendingSend.signal);
|
||||
parts = await this.#processSlashCommandResult(
|
||||
slashCommandResult,
|
||||
params.prompt,
|
||||
);
|
||||
|
||||
// If parts is null, the command was fully handled (e.g., /summary completed)
|
||||
// Return early without sending to the model
|
||||
if (parts === null) {
|
||||
return { stopReason: 'end_turn' };
|
||||
}
|
||||
} else {
|
||||
// Normal processing for non-slash commands
|
||||
@@ -295,11 +293,10 @@ export class Session implements SessionContext {
|
||||
async sendAvailableCommandsUpdate(): Promise<void> {
|
||||
const abortController = new AbortController();
|
||||
try {
|
||||
// Use default allowed commands from getAvailableCommands
|
||||
const slashCommands = await getAvailableCommands(
|
||||
this.config,
|
||||
this.settings,
|
||||
abortController.signal,
|
||||
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
|
||||
);
|
||||
|
||||
// Convert SlashCommand[] to AvailableCommand[] format for ACP protocol
|
||||
@@ -647,6 +644,103 @@ export class Session implements SessionContext {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes the result of a slash command execution.
|
||||
*
|
||||
* Supported result types in ACP mode:
|
||||
* - submit_prompt: Submits content to the model
|
||||
* - stream_messages: Streams multiple messages to the client (ACP-specific)
|
||||
* - unsupported: Command cannot be executed in ACP mode
|
||||
* - no_command: No command was found, use original prompt
|
||||
*
|
||||
* Note: 'message' type is not supported in ACP mode - commands should use
|
||||
* 'stream_messages' instead for consistent async handling.
|
||||
*
|
||||
* @param result The result from handleSlashCommand
|
||||
* @param originalPrompt The original prompt blocks
|
||||
* @returns Parts to use for the prompt, or null if command was handled without needing model interaction
|
||||
*/
|
||||
async #processSlashCommandResult(
|
||||
result: NonInteractiveSlashCommandResult,
|
||||
originalPrompt: acp.ContentBlock[],
|
||||
): Promise<Part[] | null> {
|
||||
switch (result.type) {
|
||||
case 'submit_prompt':
|
||||
// Command wants to submit a prompt to the model
|
||||
// Convert PartListUnion to Part[]
|
||||
return normalizePartList(result.content);
|
||||
|
||||
case 'message': {
|
||||
// 'message' type is not ideal for ACP mode, but we handle it for compatibility
|
||||
// by converting it to a stream_messages-like notification
|
||||
await this.client.sendCustomNotification('_qwencode/slash_command', {
|
||||
sessionId: this.sessionId,
|
||||
command: originalPrompt
|
||||
.filter((block) => block.type === 'text')
|
||||
.map((block) => (block.type === 'text' ? block.text : ''))
|
||||
.join(' '),
|
||||
messageType: result.messageType,
|
||||
message: result.content || '',
|
||||
});
|
||||
|
||||
if (result.messageType === 'error') {
|
||||
// Throw error to stop execution
|
||||
throw new Error(result.content || 'Slash command failed.');
|
||||
}
|
||||
// For info messages, return null to indicate command was handled
|
||||
return null;
|
||||
}
|
||||
|
||||
case 'stream_messages': {
|
||||
// Command returns multiple messages via async generator (ACP-preferred)
|
||||
const command = originalPrompt
|
||||
.filter((block) => block.type === 'text')
|
||||
.map((block) => (block.type === 'text' ? block.text : ''))
|
||||
.join(' ');
|
||||
|
||||
// Stream all messages to the client
|
||||
for await (const msg of result.messages) {
|
||||
await this.client.sendCustomNotification('_qwencode/slash_command', {
|
||||
sessionId: this.sessionId,
|
||||
command,
|
||||
messageType: msg.messageType,
|
||||
message: msg.content,
|
||||
});
|
||||
|
||||
// If we encounter an error message, throw after sending
|
||||
if (msg.messageType === 'error') {
|
||||
throw new Error(msg.content || 'Slash command failed.');
|
||||
}
|
||||
}
|
||||
|
||||
// All messages sent successfully, return null to indicate command was handled
|
||||
return null;
|
||||
}
|
||||
|
||||
case 'unsupported': {
|
||||
// Command returned an unsupported result type
|
||||
const unsupportedError = `Slash command not supported in ACP integration: ${result.reason}`;
|
||||
throw new Error(unsupportedError);
|
||||
}
|
||||
|
||||
case 'no_command':
|
||||
// No command was found or executed, use original prompt
|
||||
return originalPrompt.map((block) => {
|
||||
if (block.type === 'text') {
|
||||
return { text: block.text };
|
||||
}
|
||||
throw new Error(`Unsupported block type: ${block.type}`);
|
||||
});
|
||||
|
||||
default: {
|
||||
// Exhaustiveness check
|
||||
const _exhaustive: never = result;
|
||||
const unknownError = `Unknown slash command result type: ${(_exhaustive as NonInteractiveSlashCommandResult).type}`;
|
||||
throw new Error(unknownError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async #resolvePrompt(
|
||||
message: acp.ContentBlock[],
|
||||
abortSignal: AbortSignal,
|
||||
|
||||
87
packages/cli/src/commands/extensions/consent.ts
Normal file
87
packages/cli/src/commands/extensions/consent.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
import type { ConfirmationRequest } from '../../ui/types.js';
|
||||
|
||||
/**
|
||||
* Requests consent from the user to perform an action, by reading a Y/n
|
||||
* character from stdin.
|
||||
*
|
||||
* This should not be called from interactive mode as it will break the CLI.
|
||||
*
|
||||
* @param consentDescription The description of the thing they will be consenting to.
|
||||
* @returns boolean, whether they consented or not.
|
||||
*/
|
||||
export async function requestConsentNonInteractive(
|
||||
consentDescription: string,
|
||||
): Promise<boolean> {
|
||||
console.info(consentDescription);
|
||||
const result = await promptForConsentNonInteractive(
|
||||
'Do you want to continue? [Y/n]: ',
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests consent from the user to perform an action, in interactive mode.
|
||||
*
|
||||
* This should not be called from non-interactive mode as it will not work.
|
||||
*
|
||||
* @param consentDescription The description of the thing they will be consenting to.
|
||||
* @param addExtensionUpdateConfirmationRequest A function to actually add a prompt to the UI.
|
||||
* @returns boolean, whether they consented or not.
|
||||
*/
|
||||
export async function requestConsentInteractive(
|
||||
consentDescription: string,
|
||||
addExtensionUpdateConfirmationRequest: (value: ConfirmationRequest) => void,
|
||||
): Promise<boolean> {
|
||||
return promptForConsentInteractive(
|
||||
consentDescription + '\n\nDo you want to continue?',
|
||||
addExtensionUpdateConfirmationRequest,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks users a prompt and awaits for a y/n response on stdin.
|
||||
*
|
||||
* This should not be called from interactive mode as it will break the CLI.
|
||||
*
|
||||
* @param prompt A yes/no prompt to ask the user
|
||||
* @returns Whether or not the user answers 'y' (yes). Defaults to 'yes' on enter.
|
||||
*/
|
||||
async function promptForConsentNonInteractive(
|
||||
prompt: string,
|
||||
): Promise<boolean> {
|
||||
const readline = await import('node:readline');
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
});
|
||||
|
||||
return new Promise((resolve) => {
|
||||
rl.question(prompt, (answer) => {
|
||||
rl.close();
|
||||
resolve(['y', ''].includes(answer.trim().toLowerCase()));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks users an interactive yes/no prompt.
|
||||
*
|
||||
* This should not be called from non-interactive mode as it will break the CLI.
|
||||
*
|
||||
* @param prompt A markdown prompt to ask the user
|
||||
* @param addExtensionUpdateConfirmationRequest Function to update the UI state with the confirmation request.
|
||||
* @returns Whether or not the user answers yes.
|
||||
*/
|
||||
async function promptForConsentInteractive(
|
||||
prompt: string,
|
||||
addExtensionUpdateConfirmationRequest: (value: ConfirmationRequest) => void,
|
||||
): Promise<boolean> {
|
||||
return new Promise<boolean>((resolve) => {
|
||||
addExtensionUpdateConfirmationRequest({
|
||||
prompt,
|
||||
onConfirm: (resolvedConfirmed) => {
|
||||
resolve(resolvedConfirmed);
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -5,21 +5,22 @@
|
||||
*/
|
||||
|
||||
import { type CommandModule } from 'yargs';
|
||||
import { disableExtension } from '../../config/extension.js';
|
||||
import { SettingScope } from '../../config/settings.js';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { getExtensionManager } from './utils.js';
|
||||
|
||||
interface DisableArgs {
|
||||
name: string;
|
||||
scope?: string;
|
||||
}
|
||||
|
||||
export function handleDisable(args: DisableArgs) {
|
||||
export async function handleDisable(args: DisableArgs) {
|
||||
const extensionManager = await getExtensionManager();
|
||||
try {
|
||||
if (args.scope?.toLowerCase() === 'workspace') {
|
||||
disableExtension(args.name, SettingScope.Workspace);
|
||||
extensionManager.disableExtension(args.name, SettingScope.Workspace);
|
||||
} else {
|
||||
disableExtension(args.name, SettingScope.User);
|
||||
extensionManager.disableExtension(args.name, SettingScope.User);
|
||||
}
|
||||
console.log(
|
||||
`Extension "${args.name}" successfully disabled for scope "${args.scope}".`,
|
||||
@@ -61,8 +62,8 @@ export const disableCommand: CommandModule = {
|
||||
}
|
||||
return true;
|
||||
}),
|
||||
handler: (argv) => {
|
||||
handleDisable({
|
||||
handler: async (argv) => {
|
||||
await handleDisable({
|
||||
name: argv['name'] as string,
|
||||
scope: argv['scope'] as string,
|
||||
});
|
||||
|
||||
@@ -6,20 +6,22 @@
|
||||
|
||||
import { type CommandModule } from 'yargs';
|
||||
import { FatalConfigError, getErrorMessage } from '@qwen-code/qwen-code-core';
|
||||
import { enableExtension } from '../../config/extension.js';
|
||||
import { SettingScope } from '../../config/settings.js';
|
||||
import { getExtensionManager } from './utils.js';
|
||||
|
||||
interface EnableArgs {
|
||||
name: string;
|
||||
scope?: string;
|
||||
}
|
||||
|
||||
export function handleEnable(args: EnableArgs) {
|
||||
export async function handleEnable(args: EnableArgs) {
|
||||
const extensionManager = await getExtensionManager();
|
||||
|
||||
try {
|
||||
if (args.scope?.toLowerCase() === 'workspace') {
|
||||
enableExtension(args.name, SettingScope.Workspace);
|
||||
extensionManager.enableExtension(args.name, SettingScope.Workspace);
|
||||
} else {
|
||||
enableExtension(args.name, SettingScope.User);
|
||||
extensionManager.enableExtension(args.name, SettingScope.User);
|
||||
}
|
||||
if (args.scope) {
|
||||
console.log(
|
||||
@@ -66,8 +68,8 @@ export const enableCommand: CommandModule = {
|
||||
}
|
||||
return true;
|
||||
}),
|
||||
handler: (argv) => {
|
||||
handleEnable({
|
||||
handler: async (argv) => {
|
||||
await handleEnable({
|
||||
name: argv['name'] as string,
|
||||
scope: argv['scope'] as string,
|
||||
});
|
||||
|
||||
@@ -5,58 +5,64 @@
|
||||
*/
|
||||
|
||||
import type { CommandModule } from 'yargs';
|
||||
|
||||
import {
|
||||
installExtension,
|
||||
requestConsentNonInteractive,
|
||||
} from '../../config/extension.js';
|
||||
import type { ExtensionInstallMetadata } from '@qwen-code/qwen-code-core';
|
||||
ExtensionManager,
|
||||
parseInstallSource,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { stat } from 'node:fs/promises';
|
||||
import { isWorkspaceTrusted } from '../../config/trustedFolders.js';
|
||||
import { loadSettings } from '../../config/settings.js';
|
||||
import { requestConsentNonInteractive } from './consent.js';
|
||||
|
||||
interface InstallArgs {
|
||||
source: string;
|
||||
ref?: string;
|
||||
autoUpdate?: boolean;
|
||||
allowPreRelease?: boolean;
|
||||
consent?: boolean;
|
||||
}
|
||||
|
||||
export async function handleInstall(args: InstallArgs) {
|
||||
try {
|
||||
let installMetadata: ExtensionInstallMetadata;
|
||||
const { source } = args;
|
||||
const installMetadata = await parseInstallSource(args.source);
|
||||
|
||||
if (
|
||||
source.startsWith('http://') ||
|
||||
source.startsWith('https://') ||
|
||||
source.startsWith('git@') ||
|
||||
source.startsWith('sso://')
|
||||
installMetadata.type !== 'git' &&
|
||||
installMetadata.type !== 'github-release'
|
||||
) {
|
||||
installMetadata = {
|
||||
source,
|
||||
type: 'git',
|
||||
ref: args.ref,
|
||||
autoUpdate: args.autoUpdate,
|
||||
};
|
||||
} else {
|
||||
if (args.ref || args.autoUpdate) {
|
||||
throw new Error(
|
||||
'--ref and --auto-update are not applicable for local extensions.',
|
||||
'--ref and --auto-update are not applicable for marketplace extensions.',
|
||||
);
|
||||
}
|
||||
try {
|
||||
await stat(source);
|
||||
installMetadata = {
|
||||
source,
|
||||
type: 'local',
|
||||
};
|
||||
} catch {
|
||||
throw new Error('Install source not found.');
|
||||
}
|
||||
}
|
||||
|
||||
const name = await installExtension(
|
||||
installMetadata,
|
||||
requestConsentNonInteractive,
|
||||
const requestConsent = args.consent
|
||||
? () => Promise.resolve(true)
|
||||
: requestConsentNonInteractive;
|
||||
const workspaceDir = process.cwd();
|
||||
const extensionManager = new ExtensionManager({
|
||||
workspaceDir,
|
||||
isWorkspaceTrusted: !!isWorkspaceTrusted(
|
||||
loadSettings(workspaceDir).merged,
|
||||
),
|
||||
requestConsent,
|
||||
});
|
||||
await extensionManager.refreshCache();
|
||||
|
||||
const extension = await extensionManager.installExtension(
|
||||
{
|
||||
...installMetadata,
|
||||
ref: args.ref,
|
||||
autoUpdate: args.autoUpdate,
|
||||
allowPreRelease: args.allowPreRelease,
|
||||
},
|
||||
requestConsent,
|
||||
);
|
||||
console.log(
|
||||
`Extension "${extension.name}" installed successfully and enabled.`,
|
||||
);
|
||||
console.log(`Extension "${name}" installed successfully and enabled.`);
|
||||
} catch (error) {
|
||||
console.error(getErrorMessage(error));
|
||||
process.exit(1);
|
||||
@@ -65,11 +71,13 @@ export async function handleInstall(args: InstallArgs) {
|
||||
|
||||
export const installCommand: CommandModule = {
|
||||
command: 'install <source>',
|
||||
describe: 'Installs an extension from a git repository URL or a local path.',
|
||||
describe:
|
||||
'Installs an extension from a git repository URL, local path, or claude marketplace (marketplace-url:plugin-name).',
|
||||
builder: (yargs) =>
|
||||
yargs
|
||||
.positional('source', {
|
||||
describe: 'The github URL or local path of the extension to install.',
|
||||
describe:
|
||||
'The github URL, local path, or marketplace source (marketplace-url:plugin-name) of the extension to install.',
|
||||
type: 'string',
|
||||
demandOption: true,
|
||||
})
|
||||
@@ -81,6 +89,16 @@ export const installCommand: CommandModule = {
|
||||
describe: 'Enable auto-update for this extension.',
|
||||
type: 'boolean',
|
||||
})
|
||||
.option('pre-release', {
|
||||
describe: 'Enable pre-release versions for this extension.',
|
||||
type: 'boolean',
|
||||
})
|
||||
.option('consent', {
|
||||
describe:
|
||||
'Acknowledge the security risks of installing an extension and skip the confirmation prompt.',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
})
|
||||
.check((argv) => {
|
||||
if (!argv.source) {
|
||||
throw new Error('The source argument must be provided.');
|
||||
@@ -92,6 +110,8 @@ export const installCommand: CommandModule = {
|
||||
source: argv['source'] as string,
|
||||
ref: argv['ref'] as string | undefined,
|
||||
autoUpdate: argv['auto-update'] as boolean | undefined,
|
||||
allowPreRelease: argv['pre-release'] as boolean | undefined,
|
||||
consent: argv['consent'] as boolean | undefined,
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
@@ -5,13 +5,10 @@
|
||||
*/
|
||||
|
||||
import type { CommandModule } from 'yargs';
|
||||
import {
|
||||
installExtension,
|
||||
requestConsentNonInteractive,
|
||||
} from '../../config/extension.js';
|
||||
import type { ExtensionInstallMetadata } from '@qwen-code/qwen-code-core';
|
||||
|
||||
import { type ExtensionInstallMetadata } from '@qwen-code/qwen-code-core';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { requestConsentNonInteractive } from './consent.js';
|
||||
import { getExtensionManager } from './utils.js';
|
||||
|
||||
interface InstallArgs {
|
||||
path: string;
|
||||
@@ -23,12 +20,14 @@ export async function handleLink(args: InstallArgs) {
|
||||
source: args.path,
|
||||
type: 'link',
|
||||
};
|
||||
const extensionName = await installExtension(
|
||||
const extensionManager = await getExtensionManager();
|
||||
|
||||
const extension = await extensionManager.installExtension(
|
||||
installMetadata,
|
||||
requestConsentNonInteractive,
|
||||
);
|
||||
console.log(
|
||||
`Extension "${extensionName}" linked successfully and enabled.`,
|
||||
`Extension "${extension.name}" linked successfully and enabled.`,
|
||||
);
|
||||
} catch (error) {
|
||||
console.error(getErrorMessage(error));
|
||||
|
||||
@@ -5,19 +5,23 @@
|
||||
*/
|
||||
|
||||
import type { CommandModule } from 'yargs';
|
||||
import { loadUserExtensions, toOutputString } from '../../config/extension.js';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { getExtensionManager } from './utils.js';
|
||||
|
||||
export async function handleList() {
|
||||
try {
|
||||
const extensions = loadUserExtensions();
|
||||
const extensionManager = await getExtensionManager();
|
||||
const extensions = extensionManager.getLoadedExtensions();
|
||||
|
||||
if (extensions.length === 0) {
|
||||
console.log('No extensions installed.');
|
||||
return;
|
||||
}
|
||||
console.log(
|
||||
extensions
|
||||
.map((extension, _): string => toOutputString(extension, process.cwd()))
|
||||
.map((extension, _): string =>
|
||||
extensionManager.toOutputString(extension, process.cwd()),
|
||||
)
|
||||
.join('\n\n'),
|
||||
);
|
||||
} catch (error) {
|
||||
|
||||
@@ -5,8 +5,11 @@
|
||||
*/
|
||||
|
||||
import type { CommandModule } from 'yargs';
|
||||
import { uninstallExtension } from '../../config/extension.js';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { ExtensionManager } from '@qwen-code/qwen-code-core';
|
||||
import { requestConsentNonInteractive } from './consent.js';
|
||||
import { isWorkspaceTrusted } from '../../config/trustedFolders.js';
|
||||
import { loadSettings } from '../../config/settings.js';
|
||||
|
||||
interface UninstallArgs {
|
||||
name: string; // can be extension name or source URL.
|
||||
@@ -14,7 +17,16 @@ interface UninstallArgs {
|
||||
|
||||
export async function handleUninstall(args: UninstallArgs) {
|
||||
try {
|
||||
await uninstallExtension(args.name);
|
||||
const workspaceDir = process.cwd();
|
||||
const extensionManager = new ExtensionManager({
|
||||
workspaceDir,
|
||||
requestConsent: requestConsentNonInteractive,
|
||||
isWorkspaceTrusted: !!isWorkspaceTrusted(
|
||||
loadSettings(workspaceDir).merged,
|
||||
),
|
||||
});
|
||||
await extensionManager.refreshCache();
|
||||
await extensionManager.uninstallExtension(args.name, false);
|
||||
console.log(`Extension "${args.name}" successfully uninstalled.`);
|
||||
} catch (error) {
|
||||
console.error(getErrorMessage(error));
|
||||
|
||||
@@ -5,22 +5,13 @@
|
||||
*/
|
||||
|
||||
import type { CommandModule } from 'yargs';
|
||||
import {
|
||||
loadExtensions,
|
||||
annotateActiveExtensions,
|
||||
ExtensionStorage,
|
||||
requestConsentNonInteractive,
|
||||
} from '../../config/extension.js';
|
||||
import {
|
||||
updateAllUpdatableExtensions,
|
||||
type ExtensionUpdateInfo,
|
||||
checkForAllExtensionUpdates,
|
||||
updateExtension,
|
||||
} from '../../config/extensions/update.js';
|
||||
import { checkForExtensionUpdate } from '../../config/extensions/github.js';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { ExtensionUpdateState } from '../../ui/state/extensions.js';
|
||||
import { ExtensionEnablementManager } from '../../config/extensions/extensionEnablement.js';
|
||||
import {
|
||||
checkForExtensionUpdate,
|
||||
type ExtensionUpdateInfo,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { getExtensionManager } from './utils.js';
|
||||
|
||||
interface UpdateArgs {
|
||||
name?: string;
|
||||
@@ -31,19 +22,9 @@ const updateOutput = (info: ExtensionUpdateInfo) =>
|
||||
`Extension "${info.name}" successfully updated: ${info.originalVersion} → ${info.updatedVersion}.`;
|
||||
|
||||
export async function handleUpdate(args: UpdateArgs) {
|
||||
const workingDir = process.cwd();
|
||||
const extensionEnablementManager = new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
// Force enable named extensions, otherwise we will only update the enabled
|
||||
// ones.
|
||||
args.name ? [args.name] : [],
|
||||
);
|
||||
const allExtensions = loadExtensions(extensionEnablementManager);
|
||||
const extensions = annotateActiveExtensions(
|
||||
allExtensions,
|
||||
workingDir,
|
||||
extensionEnablementManager,
|
||||
);
|
||||
const extensionManager = await getExtensionManager();
|
||||
const extensions = extensionManager.getLoadedExtensions();
|
||||
|
||||
if (args.name) {
|
||||
try {
|
||||
const extension = extensions.find(
|
||||
@@ -53,25 +34,23 @@ export async function handleUpdate(args: UpdateArgs) {
|
||||
console.log(`Extension "${args.name}" not found.`);
|
||||
return;
|
||||
}
|
||||
let updateState: ExtensionUpdateState | undefined;
|
||||
if (!extension.installMetadata) {
|
||||
console.log(
|
||||
`Unable to install extension "${args.name}" due to missing install metadata`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
await checkForExtensionUpdate(extension, (newState) => {
|
||||
updateState = newState;
|
||||
});
|
||||
const updateState = await checkForExtensionUpdate(
|
||||
extension,
|
||||
extensionManager,
|
||||
);
|
||||
if (updateState !== ExtensionUpdateState.UPDATE_AVAILABLE) {
|
||||
console.log(`Extension "${args.name}" is already up to date.`);
|
||||
return;
|
||||
}
|
||||
// TODO(chrstnb): we should list extensions if the requested extension is not installed.
|
||||
const updatedExtensionInfo = (await updateExtension(
|
||||
const updatedExtensionInfo = (await extensionManager.updateExtension(
|
||||
extension,
|
||||
workingDir,
|
||||
requestConsentNonInteractive,
|
||||
updateState,
|
||||
() => {},
|
||||
))!;
|
||||
@@ -92,18 +71,15 @@ export async function handleUpdate(args: UpdateArgs) {
|
||||
if (args.all) {
|
||||
try {
|
||||
const extensionState = new Map();
|
||||
await checkForAllExtensionUpdates(extensions, (action) => {
|
||||
if (action.type === 'SET_STATE') {
|
||||
extensionState.set(action.payload.name, {
|
||||
status: action.payload.state,
|
||||
await extensionManager.checkForAllExtensionUpdates(
|
||||
(extensionName, state) => {
|
||||
extensionState.set(extensionName, {
|
||||
status: state,
|
||||
processed: true, // No need to process as we will force the update.
|
||||
});
|
||||
}
|
||||
});
|
||||
let updateInfos = await updateAllUpdatableExtensions(
|
||||
workingDir,
|
||||
requestConsentNonInteractive,
|
||||
extensions,
|
||||
},
|
||||
);
|
||||
let updateInfos = await extensionManager.updateAllUpdatableExtensions(
|
||||
extensionState,
|
||||
() => {},
|
||||
);
|
||||
|
||||
21
packages/cli/src/commands/extensions/utils.ts
Normal file
21
packages/cli/src/commands/extensions/utils.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { ExtensionManager } from '@qwen-code/qwen-code-core';
|
||||
import { loadSettings } from '../../config/settings.js';
|
||||
import { requestConsentNonInteractive } from './consent.js';
|
||||
import { isWorkspaceTrusted } from '../../config/trustedFolders.js';
|
||||
|
||||
export async function getExtensionManager(): Promise<ExtensionManager> {
|
||||
const workspaceDir = process.cwd();
|
||||
const extensionManager = new ExtensionManager({
|
||||
workspaceDir,
|
||||
requestConsent: requestConsentNonInteractive,
|
||||
isWorkspaceTrusted: !!isWorkspaceTrusted(loadSettings(workspaceDir).merged),
|
||||
});
|
||||
await extensionManager.refreshCache();
|
||||
return extensionManager;
|
||||
}
|
||||
@@ -7,7 +7,8 @@
|
||||
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { listMcpServers } from './list.js';
|
||||
import { loadSettings } from '../../config/settings.js';
|
||||
import { ExtensionStorage, loadExtensions } from '../../config/extension.js';
|
||||
import { loadExtensions } from '../../config/extension.js';
|
||||
import { ExtensionStorage } from '../../config/extensions/storage.js';
|
||||
import { createTransport } from '@qwen-code/qwen-code-core';
|
||||
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
||||
|
||||
|
||||
@@ -8,10 +8,13 @@
|
||||
import type { CommandModule } from 'yargs';
|
||||
import { loadSettings } from '../../config/settings.js';
|
||||
import type { MCPServerConfig } from '@qwen-code/qwen-code-core';
|
||||
import { MCPServerStatus, createTransport } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
MCPServerStatus,
|
||||
createTransport,
|
||||
ExtensionManager,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
||||
import { ExtensionStorage, loadExtensions } from '../../config/extension.js';
|
||||
import { ExtensionEnablementManager } from '../../config/extensions/extensionEnablement.js';
|
||||
import { isWorkspaceTrusted } from '../../config/trustedFolders.js';
|
||||
|
||||
const COLOR_GREEN = '\u001b[32m';
|
||||
const COLOR_YELLOW = '\u001b[33m';
|
||||
@@ -22,22 +25,27 @@ async function getMcpServersFromConfig(): Promise<
|
||||
Record<string, MCPServerConfig>
|
||||
> {
|
||||
const settings = loadSettings();
|
||||
const extensions = loadExtensions(
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
);
|
||||
const extensionManager = new ExtensionManager({
|
||||
isWorkspaceTrusted: !!isWorkspaceTrusted(settings.merged),
|
||||
telemetrySettings: settings.merged.telemetry,
|
||||
});
|
||||
await extensionManager.refreshCache();
|
||||
const extensions = extensionManager.getLoadedExtensions();
|
||||
const mcpServers = { ...(settings.merged.mcpServers || {}) };
|
||||
for (const extension of extensions) {
|
||||
Object.entries(extension.config.mcpServers || {}).forEach(
|
||||
([key, server]) => {
|
||||
if (mcpServers[key]) {
|
||||
return;
|
||||
}
|
||||
mcpServers[key] = {
|
||||
...server,
|
||||
extensionName: extension.config.name,
|
||||
};
|
||||
},
|
||||
);
|
||||
if (extension.isActive) {
|
||||
Object.entries(extension.config.mcpServers || {}).forEach(
|
||||
([key, server]) => {
|
||||
if (mcpServers[key]) {
|
||||
return;
|
||||
}
|
||||
mcpServers[key] = {
|
||||
...server,
|
||||
extensionName: extension.config.name,
|
||||
};
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
return mcpServers;
|
||||
}
|
||||
|
||||
@@ -26,5 +26,37 @@ export function validateAuthMethod(authMethod: string): string | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_ANTHROPIC) {
|
||||
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'ANTHROPIC_API_KEY environment variable not found.';
|
||||
}
|
||||
|
||||
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
|
||||
if (!hasBaseUrl) {
|
||||
return 'ANTHROPIC_BASE_URL environment variable not found.';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
const hasApiKey = process.env['GEMINI_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'GEMINI_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_VERTEX_AI) {
|
||||
const hasApiKey = process.env['GOOGLE_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'GOOGLE_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
|
||||
}
|
||||
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
return null;
|
||||
}
|
||||
|
||||
return 'Invalid auth method selected.';
|
||||
}
|
||||
|
||||
@@ -16,7 +16,8 @@ import {
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { loadCliConfig, parseArguments, type CliArgs } from './config.js';
|
||||
import type { Settings } from './settings.js';
|
||||
import { ExtensionStorage, type Extension } from './extension.js';
|
||||
import type { Extension } from './extension.js';
|
||||
import { ExtensionStorage } from './extensions/storage.js';
|
||||
import * as ServerConfig from '@qwen-code/qwen-code-core';
|
||||
import { isWorkspaceTrusted } from './trustedFolders.js';
|
||||
import { ExtensionEnablementManager } from './extensions/extensionEnablement.js';
|
||||
@@ -2114,7 +2115,14 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('always prefers model from argvs', async () => {
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
@@ -2134,7 +2142,14 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('selects the model from argvs if provided', async () => {
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
|
||||
@@ -9,7 +9,6 @@ import {
|
||||
AuthType,
|
||||
Config,
|
||||
DEFAULT_QWEN_EMBEDDING_MODEL,
|
||||
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
|
||||
EditTool,
|
||||
FileDiscoveryService,
|
||||
getCurrentGeminiMdFilename,
|
||||
@@ -25,7 +24,6 @@ import {
|
||||
SessionService,
|
||||
type ResumedSessionData,
|
||||
type FileFilteringOptions,
|
||||
type MCPServerConfig,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { extensionsCommand } from '../commands/extensions.js';
|
||||
import type { Settings } from './settings.js';
|
||||
@@ -37,14 +35,11 @@ import { homedir } from 'node:os';
|
||||
|
||||
import { resolvePath } from '../utils/resolvePath.js';
|
||||
import { getCliVersion } from '../utils/version.js';
|
||||
import type { Extension } from './extension.js';
|
||||
import { annotateActiveExtensions } from './extension.js';
|
||||
import { loadSandboxConfig } from './sandboxConfig.js';
|
||||
import { appEvents } from '../utils/events.js';
|
||||
import { mcpCommand } from '../commands/mcp.js';
|
||||
|
||||
import { isWorkspaceTrusted } from './trustedFolders.js';
|
||||
import type { ExtensionEnablementManager } from './extensions/extensionEnablement.js';
|
||||
import { buildWebSearchConfig } from './webSearch.js';
|
||||
|
||||
// Simple console logger for now - replace with actual logger if available
|
||||
@@ -112,6 +107,7 @@ export interface CliArgs {
|
||||
allowedMcpServerNames: string[] | undefined;
|
||||
allowedTools: string[] | undefined;
|
||||
experimentalAcp: boolean | undefined;
|
||||
experimentalSkills: boolean | undefined;
|
||||
extensions: string[] | undefined;
|
||||
listExtensions: boolean | undefined;
|
||||
openaiLogging: boolean | undefined;
|
||||
@@ -161,7 +157,7 @@ function normalizeOutputFormat(
|
||||
return OutputFormat.TEXT;
|
||||
}
|
||||
|
||||
export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
export async function parseArguments(): Promise<CliArgs> {
|
||||
const rawArgv = hideBin(process.argv);
|
||||
const yargsInstance = yargs(rawArgv)
|
||||
.locale('en')
|
||||
@@ -307,6 +303,11 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
type: 'boolean',
|
||||
description: 'Starts the agent in ACP mode',
|
||||
})
|
||||
.option('experimental-skills', {
|
||||
type: 'boolean',
|
||||
description: 'Enable experimental Skills feature',
|
||||
default: false,
|
||||
})
|
||||
.option('channel', {
|
||||
type: 'string',
|
||||
choices: ['VSCode', 'ACP', 'SDK', 'CI'],
|
||||
@@ -460,7 +461,13 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
})
|
||||
.option('auth-type', {
|
||||
type: 'string',
|
||||
choices: [AuthType.USE_OPENAI, AuthType.QWEN_OAUTH],
|
||||
choices: [
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
],
|
||||
description: 'Authentication type',
|
||||
})
|
||||
.deprecateOption(
|
||||
@@ -525,11 +532,9 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
}),
|
||||
)
|
||||
// Register MCP subcommands
|
||||
.command(mcpCommand);
|
||||
|
||||
if (settings?.experimental?.extensionManagement ?? true) {
|
||||
yargsInstance.command(extensionsCommand);
|
||||
}
|
||||
.command(mcpCommand)
|
||||
// Register Extension subcommands
|
||||
.command(extensionsCommand);
|
||||
|
||||
yargsInstance
|
||||
.version(await getCliVersion()) // This will enable the --version flag based on package.json
|
||||
@@ -593,11 +598,11 @@ export async function loadHierarchicalGeminiMemory(
|
||||
includeDirectoriesToReadGemini: readonly string[] = [],
|
||||
debugMode: boolean,
|
||||
fileService: FileDiscoveryService,
|
||||
settings: Settings,
|
||||
extensionContextFilePaths: string[] = [],
|
||||
folderTrust: boolean,
|
||||
memoryImportFormat: 'flat' | 'tree' = 'tree',
|
||||
fileFilteringOptions?: FileFilteringOptions,
|
||||
maxDirs: number = 200,
|
||||
): Promise<{ memoryContent: string; fileCount: number }> {
|
||||
// FIX: Use real, canonical paths for a reliable comparison to handle symlinks.
|
||||
const realCwd = fs.realpathSync(path.resolve(currentWorkingDirectory));
|
||||
@@ -624,7 +629,7 @@ export async function loadHierarchicalGeminiMemory(
|
||||
folderTrust,
|
||||
memoryImportFormat,
|
||||
fileFilteringOptions,
|
||||
settings.context?.discoveryMaxDirs,
|
||||
maxDirs,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -639,30 +644,17 @@ export function isDebugMode(argv: CliArgs): boolean {
|
||||
|
||||
export async function loadCliConfig(
|
||||
settings: Settings,
|
||||
extensions: Extension[],
|
||||
extensionEnablementManager: ExtensionEnablementManager,
|
||||
argv: CliArgs,
|
||||
cwd: string = process.cwd(),
|
||||
overrideExtensions?: string[],
|
||||
): Promise<Config> {
|
||||
const debugMode = isDebugMode(argv);
|
||||
|
||||
const memoryImportFormat = settings.context?.importFormat || 'tree';
|
||||
|
||||
const ideMode = settings.ide?.enabled ?? false;
|
||||
|
||||
const folderTrust = settings.security?.folderTrust?.enabled ?? false;
|
||||
const trustedFolder = isWorkspaceTrusted(settings)?.isTrusted ?? true;
|
||||
|
||||
const allExtensions = annotateActiveExtensions(
|
||||
extensions,
|
||||
cwd,
|
||||
extensionEnablementManager,
|
||||
);
|
||||
|
||||
const activeExtensions = extensions.filter(
|
||||
(_, i) => allExtensions[i].isActive,
|
||||
);
|
||||
|
||||
// Set the context filename in the server's memoryTool module BEFORE loading memory
|
||||
// TODO(b/343434939): This is a bit of a hack. The contextFileName should ideally be passed
|
||||
// directly to the Config constructor in core, and have core handle setGeminiMdFilename.
|
||||
@@ -674,51 +666,27 @@ export async function loadCliConfig(
|
||||
setServerGeminiMdFilename(getCurrentGeminiMdFilename());
|
||||
}
|
||||
|
||||
const extensionContextFilePaths = activeExtensions.flatMap(
|
||||
(e) => e.contextFiles,
|
||||
);
|
||||
|
||||
// Automatically load output-language.md if it exists
|
||||
const outputLanguageFilePath = path.join(
|
||||
let outputLanguageFilePath: string | undefined = path.join(
|
||||
Storage.getGlobalQwenDir(),
|
||||
'output-language.md',
|
||||
);
|
||||
if (fs.existsSync(outputLanguageFilePath)) {
|
||||
extensionContextFilePaths.push(outputLanguageFilePath);
|
||||
if (debugMode) {
|
||||
logger.debug(
|
||||
`Found output-language.md, adding to context files: ${outputLanguageFilePath}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
outputLanguageFilePath = undefined;
|
||||
}
|
||||
|
||||
const fileService = new FileDiscoveryService(cwd);
|
||||
|
||||
const fileFiltering = {
|
||||
...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
|
||||
...settings.context?.fileFiltering,
|
||||
};
|
||||
|
||||
const includeDirectories = (settings.context?.includeDirectories || [])
|
||||
.map(resolvePath)
|
||||
.concat((argv.includeDirectories || []).map(resolvePath));
|
||||
|
||||
// Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version
|
||||
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
|
||||
cwd,
|
||||
settings.context?.loadMemoryFromIncludeDirectories
|
||||
? includeDirectories
|
||||
: [],
|
||||
debugMode,
|
||||
fileService,
|
||||
settings,
|
||||
extensionContextFilePaths,
|
||||
trustedFolder,
|
||||
memoryImportFormat,
|
||||
fileFiltering,
|
||||
);
|
||||
|
||||
let mcpServers = mergeMcpServers(settings, activeExtensions);
|
||||
const question = argv.promptInteractive || argv.prompt || '';
|
||||
const inputFormat: InputFormat =
|
||||
(argv.inputFormat as InputFormat | undefined) ?? InputFormat.TEXT;
|
||||
@@ -832,44 +800,43 @@ export async function loadCliConfig(
|
||||
|
||||
const excludeTools = mergeExcludeTools(
|
||||
settings,
|
||||
activeExtensions,
|
||||
extraExcludes.length > 0 ? extraExcludes : undefined,
|
||||
argv.excludeTools,
|
||||
);
|
||||
const blockedMcpServers: Array<{ name: string; extensionName: string }> = [];
|
||||
|
||||
if (!argv.allowedMcpServerNames) {
|
||||
if (settings.mcp?.allowed) {
|
||||
mcpServers = allowedMcpServers(
|
||||
mcpServers,
|
||||
settings.mcp.allowed,
|
||||
blockedMcpServers,
|
||||
);
|
||||
}
|
||||
const allowedMcpServers = argv.allowedMcpServerNames
|
||||
? new Set(argv.allowedMcpServerNames.filter(Boolean))
|
||||
: settings.mcp?.allowed
|
||||
? new Set(settings.mcp.allowed.filter(Boolean))
|
||||
: undefined;
|
||||
const excludedMcpServers = settings.mcp?.excluded
|
||||
? new Set(settings.mcp.excluded.filter(Boolean))
|
||||
: undefined;
|
||||
|
||||
if (settings.mcp?.excluded) {
|
||||
const excludedNames = new Set(settings.mcp.excluded.filter(Boolean));
|
||||
if (excludedNames.size > 0) {
|
||||
mcpServers = Object.fromEntries(
|
||||
Object.entries(mcpServers).filter(([key]) => !excludedNames.has(key)),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (argv.allowedMcpServerNames) {
|
||||
mcpServers = allowedMcpServers(
|
||||
mcpServers,
|
||||
argv.allowedMcpServerNames,
|
||||
blockedMcpServers,
|
||||
);
|
||||
}
|
||||
const selectedAuthType =
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType;
|
||||
|
||||
const apiKey =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey
|
||||
: '') || '';
|
||||
const baseUrl =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl
|
||||
: '') || '';
|
||||
const resolvedModel =
|
||||
argv.model ||
|
||||
process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name;
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name
|
||||
: '') ||
|
||||
'';
|
||||
|
||||
const sandboxConfig = await loadSandboxConfig(settings, argv);
|
||||
const screenReader =
|
||||
@@ -912,6 +879,8 @@ export async function loadCliConfig(
|
||||
includeDirectories,
|
||||
loadMemoryFromIncludeDirectories:
|
||||
settings.context?.loadMemoryFromIncludeDirectories || false,
|
||||
importFormat: settings.context?.importFormat || 'tree',
|
||||
discoveryMaxDirs: settings.context?.discoveryMaxDirs || 200,
|
||||
debugMode,
|
||||
question,
|
||||
fullContext: argv.allFiles || false,
|
||||
@@ -921,9 +890,13 @@ export async function loadCliConfig(
|
||||
toolDiscoveryCommand: settings.tools?.discoveryCommand,
|
||||
toolCallCommand: settings.tools?.callCommand,
|
||||
mcpServerCommand: settings.mcp?.serverCommand,
|
||||
mcpServers,
|
||||
userMemory: memoryContent,
|
||||
geminiMdFileCount: fileCount,
|
||||
mcpServers: settings.mcpServers || {},
|
||||
allowedMcpServers: allowedMcpServers
|
||||
? Array.from(allowedMcpServers)
|
||||
: undefined,
|
||||
excludedMcpServers: excludedMcpServers
|
||||
? Array.from(excludedMcpServers)
|
||||
: undefined,
|
||||
approvalMode,
|
||||
showMemoryUsage:
|
||||
argv.showMemoryUsage || settings.ui?.showMemoryUsage || false,
|
||||
@@ -946,32 +919,24 @@ export async function loadCliConfig(
|
||||
fileDiscoveryService: fileService,
|
||||
bugCommand: settings.advanced?.bugCommand,
|
||||
model: resolvedModel,
|
||||
extensionContextFilePaths,
|
||||
outputLanguageFilePath,
|
||||
sessionTokenLimit: settings.model?.sessionTokenLimit ?? -1,
|
||||
maxSessionTurns:
|
||||
argv.maxSessionTurns ?? settings.model?.maxSessionTurns ?? -1,
|
||||
experimentalZedIntegration: argv.experimentalAcp || false,
|
||||
experimentalSkills: argv.experimentalSkills || false,
|
||||
listExtensions: argv.listExtensions || false,
|
||||
extensions: allExtensions,
|
||||
blockedMcpServers,
|
||||
overrideExtensions: overrideExtensions || argv.extensions,
|
||||
noBrowser: !!process.env['NO_BROWSER'],
|
||||
authType:
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType,
|
||||
authType: selectedAuthType,
|
||||
inputFormat,
|
||||
outputFormat,
|
||||
includePartialMessages,
|
||||
generationConfig: {
|
||||
...(settings.model?.generationConfig || {}),
|
||||
model: resolvedModel,
|
||||
apiKey:
|
||||
argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey,
|
||||
baseUrl:
|
||||
argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl,
|
||||
apiKey,
|
||||
baseUrl,
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.model?.enableOpenAILogging
|
||||
@@ -1016,61 +981,8 @@ export async function loadCliConfig(
|
||||
});
|
||||
}
|
||||
|
||||
function allowedMcpServers(
|
||||
mcpServers: { [x: string]: MCPServerConfig },
|
||||
allowMCPServers: string[],
|
||||
blockedMcpServers: Array<{ name: string; extensionName: string }>,
|
||||
) {
|
||||
const allowedNames = new Set(allowMCPServers.filter(Boolean));
|
||||
if (allowedNames.size > 0) {
|
||||
mcpServers = Object.fromEntries(
|
||||
Object.entries(mcpServers).filter(([key, server]) => {
|
||||
const isAllowed = allowedNames.has(key);
|
||||
if (!isAllowed) {
|
||||
blockedMcpServers.push({
|
||||
name: key,
|
||||
extensionName: server.extensionName || '',
|
||||
});
|
||||
}
|
||||
return isAllowed;
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
blockedMcpServers.push(
|
||||
...Object.entries(mcpServers).map(([key, server]) => ({
|
||||
name: key,
|
||||
extensionName: server.extensionName || '',
|
||||
})),
|
||||
);
|
||||
mcpServers = {};
|
||||
}
|
||||
return mcpServers;
|
||||
}
|
||||
|
||||
function mergeMcpServers(settings: Settings, extensions: Extension[]) {
|
||||
const mcpServers = { ...(settings.mcpServers || {}) };
|
||||
for (const extension of extensions) {
|
||||
Object.entries(extension.config.mcpServers || {}).forEach(
|
||||
([key, server]) => {
|
||||
if (mcpServers[key]) {
|
||||
logger.warn(
|
||||
`Skipping extension MCP config for server with key "${key}" as it already exists.`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
mcpServers[key] = {
|
||||
...server,
|
||||
extensionName: extension.config.name,
|
||||
};
|
||||
},
|
||||
);
|
||||
}
|
||||
return mcpServers;
|
||||
}
|
||||
|
||||
function mergeExcludeTools(
|
||||
settings: Settings,
|
||||
extensions: Extension[],
|
||||
extraExcludes?: string[] | undefined,
|
||||
cliExcludeTools?: string[] | undefined,
|
||||
): string[] {
|
||||
@@ -1079,10 +991,5 @@ function mergeExcludeTools(
|
||||
...(settings.tools?.exclude || []),
|
||||
...(extraExcludes || []),
|
||||
]);
|
||||
for (const extension of extensions) {
|
||||
for (const tool of extension.config.excludeTools || []) {
|
||||
allExcludeTools.add(tool);
|
||||
}
|
||||
}
|
||||
return [...allExcludeTools];
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,786 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
MCPServerConfig,
|
||||
GeminiCLIExtension,
|
||||
ExtensionInstallMetadata,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
QWEN_DIR,
|
||||
Storage,
|
||||
Config,
|
||||
ExtensionInstallEvent,
|
||||
ExtensionUninstallEvent,
|
||||
ExtensionDisableEvent,
|
||||
ExtensionEnableEvent,
|
||||
logExtensionEnable,
|
||||
logExtensionInstallEvent,
|
||||
logExtensionUninstall,
|
||||
logExtensionDisable,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import * as os from 'node:os';
|
||||
import { SettingScope, loadSettings } from '../config/settings.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
import { recursivelyHydrateStrings } from './extensions/variables.js';
|
||||
import { isWorkspaceTrusted } from './trustedFolders.js';
|
||||
import { resolveEnvVarsInObject } from '../utils/envVarResolver.js';
|
||||
import {
|
||||
cloneFromGit,
|
||||
downloadFromGitHubRelease,
|
||||
} from './extensions/github.js';
|
||||
import type { LoadExtensionContext } from './extensions/variableSchema.js';
|
||||
import { ExtensionEnablementManager } from './extensions/extensionEnablement.js';
|
||||
import chalk from 'chalk';
|
||||
import type { ConfirmationRequest } from '../ui/types.js';
|
||||
|
||||
export const EXTENSIONS_DIRECTORY_NAME = path.join(QWEN_DIR, 'extensions');
|
||||
|
||||
export const EXTENSIONS_CONFIG_FILENAME = 'qwen-extension.json';
|
||||
export const INSTALL_METADATA_FILENAME = '.qwen-extension-install.json';
|
||||
|
||||
export interface Extension {
|
||||
path: string;
|
||||
config: ExtensionConfig;
|
||||
contextFiles: string[];
|
||||
installMetadata?: ExtensionInstallMetadata | undefined;
|
||||
}
|
||||
|
||||
export interface ExtensionConfig {
|
||||
name: string;
|
||||
version: string;
|
||||
mcpServers?: Record<string, MCPServerConfig>;
|
||||
contextFileName?: string | string[];
|
||||
excludeTools?: string[];
|
||||
}
|
||||
|
||||
export interface ExtensionUpdateInfo {
|
||||
name: string;
|
||||
originalVersion: string;
|
||||
updatedVersion: string;
|
||||
}
|
||||
|
||||
export class ExtensionStorage {
|
||||
private readonly extensionName: string;
|
||||
|
||||
constructor(extensionName: string) {
|
||||
this.extensionName = extensionName;
|
||||
}
|
||||
|
||||
getExtensionDir(): string {
|
||||
return path.join(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
this.extensionName,
|
||||
);
|
||||
}
|
||||
|
||||
getConfigPath(): string {
|
||||
return path.join(this.getExtensionDir(), EXTENSIONS_CONFIG_FILENAME);
|
||||
}
|
||||
|
||||
static getUserExtensionsDir(): string {
|
||||
const storage = new Storage(os.homedir());
|
||||
return storage.getExtensionsDir();
|
||||
}
|
||||
|
||||
static async createTmpDir(): Promise<string> {
|
||||
return await fs.promises.mkdtemp(path.join(os.tmpdir(), 'qwen-extension'));
|
||||
}
|
||||
}
|
||||
|
||||
export function getWorkspaceExtensions(workspaceDir: string): Extension[] {
|
||||
// If the workspace dir is the user extensions dir, there are no workspace extensions.
|
||||
if (path.resolve(workspaceDir) === path.resolve(os.homedir())) {
|
||||
return [];
|
||||
}
|
||||
return loadExtensionsFromDir(workspaceDir);
|
||||
}
|
||||
|
||||
export async function copyExtension(
|
||||
source: string,
|
||||
destination: string,
|
||||
): Promise<void> {
|
||||
await fs.promises.cp(source, destination, { recursive: true });
|
||||
}
|
||||
|
||||
export async function performWorkspaceExtensionMigration(
|
||||
extensions: Extension[],
|
||||
requestConsent: (consent: string) => Promise<boolean>,
|
||||
): Promise<string[]> {
|
||||
const failedInstallNames: string[] = [];
|
||||
|
||||
for (const extension of extensions) {
|
||||
try {
|
||||
const installMetadata: ExtensionInstallMetadata = {
|
||||
source: extension.path,
|
||||
type: 'local',
|
||||
};
|
||||
await installExtension(installMetadata, requestConsent);
|
||||
} catch (_) {
|
||||
failedInstallNames.push(extension.config.name);
|
||||
}
|
||||
}
|
||||
return failedInstallNames;
|
||||
}
|
||||
|
||||
function getTelemetryConfig(cwd: string) {
|
||||
const settings = loadSettings(cwd);
|
||||
const config = new Config({
|
||||
telemetry: settings.merged.telemetry,
|
||||
interactive: false,
|
||||
targetDir: cwd,
|
||||
cwd,
|
||||
model: '',
|
||||
debugMode: false,
|
||||
});
|
||||
return config;
|
||||
}
|
||||
|
||||
export function loadExtensions(
|
||||
extensionEnablementManager: ExtensionEnablementManager,
|
||||
workspaceDir: string = process.cwd(),
|
||||
): Extension[] {
|
||||
const settings = loadSettings(workspaceDir).merged;
|
||||
const allExtensions = [...loadUserExtensions()];
|
||||
|
||||
if (
|
||||
(isWorkspaceTrusted(settings) ?? true) &&
|
||||
// Default management setting to true
|
||||
!(settings.experimental?.extensionManagement ?? true)
|
||||
) {
|
||||
allExtensions.push(...getWorkspaceExtensions(workspaceDir));
|
||||
}
|
||||
|
||||
const uniqueExtensions = new Map<string, Extension>();
|
||||
|
||||
for (const extension of allExtensions) {
|
||||
if (
|
||||
!uniqueExtensions.has(extension.config.name) &&
|
||||
extensionEnablementManager.isEnabled(extension.config.name, workspaceDir)
|
||||
) {
|
||||
uniqueExtensions.set(extension.config.name, extension);
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(uniqueExtensions.values());
|
||||
}
|
||||
|
||||
export function loadUserExtensions(): Extension[] {
|
||||
const userExtensions = loadExtensionsFromDir(os.homedir());
|
||||
|
||||
const uniqueExtensions = new Map<string, Extension>();
|
||||
for (const extension of userExtensions) {
|
||||
if (!uniqueExtensions.has(extension.config.name)) {
|
||||
uniqueExtensions.set(extension.config.name, extension);
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(uniqueExtensions.values());
|
||||
}
|
||||
|
||||
export function loadExtensionsFromDir(dir: string): Extension[] {
|
||||
const storage = new Storage(dir);
|
||||
const extensionsDir = storage.getExtensionsDir();
|
||||
if (!fs.existsSync(extensionsDir)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const extensions: Extension[] = [];
|
||||
for (const subdir of fs.readdirSync(extensionsDir)) {
|
||||
const extensionDir = path.join(extensionsDir, subdir);
|
||||
|
||||
const extension = loadExtension({ extensionDir, workspaceDir: dir });
|
||||
if (extension != null) {
|
||||
extensions.push(extension);
|
||||
}
|
||||
}
|
||||
return extensions;
|
||||
}
|
||||
|
||||
export function loadExtension(context: LoadExtensionContext): Extension | null {
|
||||
const { extensionDir, workspaceDir } = context;
|
||||
if (!fs.statSync(extensionDir).isDirectory()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const installMetadata = loadInstallMetadata(extensionDir);
|
||||
let effectiveExtensionPath = extensionDir;
|
||||
|
||||
if (installMetadata?.type === 'link') {
|
||||
effectiveExtensionPath = installMetadata.source;
|
||||
}
|
||||
|
||||
try {
|
||||
let config = loadExtensionConfig({
|
||||
extensionDir: effectiveExtensionPath,
|
||||
workspaceDir,
|
||||
});
|
||||
|
||||
config = resolveEnvVarsInObject(config);
|
||||
|
||||
if (config.mcpServers) {
|
||||
config.mcpServers = Object.fromEntries(
|
||||
Object.entries(config.mcpServers).map(([key, value]) => [
|
||||
key,
|
||||
filterMcpConfig(value),
|
||||
]),
|
||||
);
|
||||
}
|
||||
|
||||
const contextFiles = getContextFileNames(config)
|
||||
.map((contextFileName) =>
|
||||
path.join(effectiveExtensionPath, contextFileName),
|
||||
)
|
||||
.filter((contextFilePath) => fs.existsSync(contextFilePath));
|
||||
|
||||
return {
|
||||
path: effectiveExtensionPath,
|
||||
config,
|
||||
contextFiles,
|
||||
installMetadata,
|
||||
};
|
||||
} catch (e) {
|
||||
console.error(
|
||||
`Warning: Skipping extension in ${effectiveExtensionPath}: ${getErrorMessage(
|
||||
e,
|
||||
)}`,
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function loadExtensionByName(
|
||||
name: string,
|
||||
workspaceDir: string = process.cwd(),
|
||||
): Extension | null {
|
||||
const userExtensionsDir = ExtensionStorage.getUserExtensionsDir();
|
||||
if (!fs.existsSync(userExtensionsDir)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (const subdir of fs.readdirSync(userExtensionsDir)) {
|
||||
const extensionDir = path.join(userExtensionsDir, subdir);
|
||||
if (!fs.statSync(extensionDir).isDirectory()) {
|
||||
continue;
|
||||
}
|
||||
const extension = loadExtension({ extensionDir, workspaceDir });
|
||||
if (
|
||||
extension &&
|
||||
extension.config.name.toLowerCase() === name.toLowerCase()
|
||||
) {
|
||||
return extension;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function filterMcpConfig(original: MCPServerConfig): MCPServerConfig {
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
const { trust, ...rest } = original;
|
||||
return Object.freeze(rest);
|
||||
}
|
||||
|
||||
export function loadInstallMetadata(
|
||||
extensionDir: string,
|
||||
): ExtensionInstallMetadata | undefined {
|
||||
const metadataFilePath = path.join(extensionDir, INSTALL_METADATA_FILENAME);
|
||||
try {
|
||||
const configContent = fs.readFileSync(metadataFilePath, 'utf-8');
|
||||
const metadata = JSON.parse(configContent) as ExtensionInstallMetadata;
|
||||
return metadata;
|
||||
} catch (_e) {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
function getContextFileNames(config: ExtensionConfig): string[] {
|
||||
if (!config.contextFileName) {
|
||||
return ['QWEN.md'];
|
||||
} else if (!Array.isArray(config.contextFileName)) {
|
||||
return [config.contextFileName];
|
||||
}
|
||||
return config.contextFileName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an annotated list of extensions. If an extension is listed in enabledExtensionNames, it will be active.
|
||||
* If enabledExtensionNames is empty, an extension is active unless it is disabled.
|
||||
* @param extensions The base list of extensions.
|
||||
* @param enabledExtensionNames The names of explicitly enabled extensions.
|
||||
* @param workspaceDir The current workspace directory.
|
||||
*/
|
||||
export function annotateActiveExtensions(
|
||||
extensions: Extension[],
|
||||
workspaceDir: string,
|
||||
manager: ExtensionEnablementManager,
|
||||
): GeminiCLIExtension[] {
|
||||
manager.validateExtensionOverrides(extensions);
|
||||
return extensions.map((extension) => ({
|
||||
name: extension.config.name,
|
||||
version: extension.config.version,
|
||||
isActive: manager.isEnabled(extension.config.name, workspaceDir),
|
||||
path: extension.path,
|
||||
installMetadata: extension.installMetadata,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests consent from the user to perform an action, by reading a Y/n
|
||||
* character from stdin.
|
||||
*
|
||||
* This should not be called from interactive mode as it will break the CLI.
|
||||
*
|
||||
* @param consentDescription The description of the thing they will be consenting to.
|
||||
* @returns boolean, whether they consented or not.
|
||||
*/
|
||||
export async function requestConsentNonInteractive(
|
||||
consentDescription: string,
|
||||
): Promise<boolean> {
|
||||
console.info(consentDescription);
|
||||
const result = await promptForConsentNonInteractive(
|
||||
'Do you want to continue? [Y/n]: ',
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests consent from the user to perform an action, in interactive mode.
|
||||
*
|
||||
* This should not be called from non-interactive mode as it will not work.
|
||||
*
|
||||
* @param consentDescription The description of the thing they will be consenting to.
|
||||
* @param setExtensionUpdateConfirmationRequest A function to actually add a prompt to the UI.
|
||||
* @returns boolean, whether they consented or not.
|
||||
*/
|
||||
export async function requestConsentInteractive(
|
||||
consentDescription: string,
|
||||
addExtensionUpdateConfirmationRequest: (value: ConfirmationRequest) => void,
|
||||
): Promise<boolean> {
|
||||
return await promptForConsentInteractive(
|
||||
consentDescription + '\n\nDo you want to continue?',
|
||||
addExtensionUpdateConfirmationRequest,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks users a prompt and awaits for a y/n response on stdin.
|
||||
*
|
||||
* This should not be called from interactive mode as it will break the CLI.
|
||||
*
|
||||
* @param prompt A yes/no prompt to ask the user
|
||||
* @returns Whether or not the user answers 'y' (yes). Defaults to 'yes' on enter.
|
||||
*/
|
||||
async function promptForConsentNonInteractive(
|
||||
prompt: string,
|
||||
): Promise<boolean> {
|
||||
const readline = await import('node:readline');
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
});
|
||||
|
||||
return new Promise((resolve) => {
|
||||
rl.question(prompt, (answer) => {
|
||||
rl.close();
|
||||
resolve(['y', ''].includes(answer.trim().toLowerCase()));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks users an interactive yes/no prompt.
|
||||
*
|
||||
* This should not be called from non-interactive mode as it will break the CLI.
|
||||
*
|
||||
* @param prompt A markdown prompt to ask the user
|
||||
* @param setExtensionUpdateConfirmationRequest Function to update the UI state with the confirmation request.
|
||||
* @returns Whether or not the user answers yes.
|
||||
*/
|
||||
async function promptForConsentInteractive(
|
||||
prompt: string,
|
||||
addExtensionUpdateConfirmationRequest: (value: ConfirmationRequest) => void,
|
||||
): Promise<boolean> {
|
||||
return await new Promise<boolean>((resolve) => {
|
||||
addExtensionUpdateConfirmationRequest({
|
||||
prompt,
|
||||
onConfirm: (resolvedConfirmed) => {
|
||||
resolve(resolvedConfirmed);
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export async function installExtension(
|
||||
installMetadata: ExtensionInstallMetadata,
|
||||
requestConsent: (consent: string) => Promise<boolean>,
|
||||
cwd: string = process.cwd(),
|
||||
previousExtensionConfig?: ExtensionConfig,
|
||||
): Promise<string> {
|
||||
const telemetryConfig = getTelemetryConfig(cwd);
|
||||
let newExtensionConfig: ExtensionConfig | null = null;
|
||||
let localSourcePath: string | undefined;
|
||||
|
||||
try {
|
||||
const settings = loadSettings(cwd).merged;
|
||||
if (!isWorkspaceTrusted(settings)) {
|
||||
throw new Error(
|
||||
`Could not install extension from untrusted folder at ${installMetadata.source}`,
|
||||
);
|
||||
}
|
||||
|
||||
const extensionsDir = ExtensionStorage.getUserExtensionsDir();
|
||||
await fs.promises.mkdir(extensionsDir, { recursive: true });
|
||||
|
||||
if (
|
||||
!path.isAbsolute(installMetadata.source) &&
|
||||
(installMetadata.type === 'local' || installMetadata.type === 'link')
|
||||
) {
|
||||
installMetadata.source = path.resolve(cwd, installMetadata.source);
|
||||
}
|
||||
|
||||
let tempDir: string | undefined;
|
||||
|
||||
if (
|
||||
installMetadata.type === 'git' ||
|
||||
installMetadata.type === 'github-release'
|
||||
) {
|
||||
tempDir = await ExtensionStorage.createTmpDir();
|
||||
try {
|
||||
const result = await downloadFromGitHubRelease(
|
||||
installMetadata,
|
||||
tempDir,
|
||||
);
|
||||
installMetadata.type = result.type;
|
||||
installMetadata.releaseTag = result.tagName;
|
||||
} catch (_error) {
|
||||
await cloneFromGit(installMetadata, tempDir);
|
||||
installMetadata.type = 'git';
|
||||
}
|
||||
localSourcePath = tempDir;
|
||||
} else if (
|
||||
installMetadata.type === 'local' ||
|
||||
installMetadata.type === 'link'
|
||||
) {
|
||||
localSourcePath = installMetadata.source;
|
||||
} else {
|
||||
throw new Error(`Unsupported install type: ${installMetadata.type}`);
|
||||
}
|
||||
|
||||
try {
|
||||
newExtensionConfig = loadExtensionConfig({
|
||||
extensionDir: localSourcePath,
|
||||
workspaceDir: cwd,
|
||||
});
|
||||
|
||||
const newExtensionName = newExtensionConfig.name;
|
||||
const extensionStorage = new ExtensionStorage(newExtensionName);
|
||||
const destinationPath = extensionStorage.getExtensionDir();
|
||||
|
||||
const installedExtensions = loadUserExtensions();
|
||||
if (
|
||||
installedExtensions.some(
|
||||
(installed) => installed.config.name === newExtensionName,
|
||||
)
|
||||
) {
|
||||
throw new Error(
|
||||
`Extension "${newExtensionName}" is already installed. Please uninstall it first.`,
|
||||
);
|
||||
}
|
||||
await maybeRequestConsentOrFail(
|
||||
newExtensionConfig,
|
||||
requestConsent,
|
||||
previousExtensionConfig,
|
||||
);
|
||||
await fs.promises.mkdir(destinationPath, { recursive: true });
|
||||
|
||||
if (
|
||||
installMetadata.type === 'local' ||
|
||||
installMetadata.type === 'git' ||
|
||||
installMetadata.type === 'github-release'
|
||||
) {
|
||||
await copyExtension(localSourcePath, destinationPath);
|
||||
}
|
||||
|
||||
const metadataString = JSON.stringify(installMetadata, null, 2);
|
||||
const metadataPath = path.join(
|
||||
destinationPath,
|
||||
INSTALL_METADATA_FILENAME,
|
||||
);
|
||||
await fs.promises.writeFile(metadataPath, metadataString);
|
||||
} finally {
|
||||
if (tempDir) {
|
||||
await fs.promises.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
logExtensionInstallEvent(
|
||||
telemetryConfig,
|
||||
new ExtensionInstallEvent(
|
||||
newExtensionConfig!.name,
|
||||
newExtensionConfig!.version,
|
||||
installMetadata.source,
|
||||
'success',
|
||||
),
|
||||
);
|
||||
|
||||
enableExtension(newExtensionConfig!.name, SettingScope.User);
|
||||
return newExtensionConfig!.name;
|
||||
} catch (error) {
|
||||
// Attempt to load config from the source path even if installation fails
|
||||
// to get the name and version for logging.
|
||||
if (!newExtensionConfig && localSourcePath) {
|
||||
try {
|
||||
newExtensionConfig = loadExtensionConfig({
|
||||
extensionDir: localSourcePath,
|
||||
workspaceDir: cwd,
|
||||
});
|
||||
} catch {
|
||||
// Ignore error, this is just for logging.
|
||||
}
|
||||
}
|
||||
logExtensionInstallEvent(
|
||||
telemetryConfig,
|
||||
new ExtensionInstallEvent(
|
||||
newExtensionConfig?.name ?? '',
|
||||
newExtensionConfig?.version ?? '',
|
||||
installMetadata.source,
|
||||
'error',
|
||||
),
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a consent string for installing an extension based on it's
|
||||
* extensionConfig.
|
||||
*/
|
||||
function extensionConsentString(extensionConfig: ExtensionConfig): string {
|
||||
const output: string[] = [];
|
||||
const mcpServerEntries = Object.entries(extensionConfig.mcpServers || {});
|
||||
output.push(`Installing extension "${extensionConfig.name}".`);
|
||||
output.push(
|
||||
'**Extensions may introduce unexpected behavior. Ensure you have investigated the extension source and trust the author.**',
|
||||
);
|
||||
|
||||
if (mcpServerEntries.length) {
|
||||
output.push('This extension will run the following MCP servers:');
|
||||
for (const [key, mcpServer] of mcpServerEntries) {
|
||||
const isLocal = !!mcpServer.command;
|
||||
const source =
|
||||
mcpServer.httpUrl ??
|
||||
`${mcpServer.command || ''}${mcpServer.args ? ' ' + mcpServer.args.join(' ') : ''}`;
|
||||
output.push(` * ${key} (${isLocal ? 'local' : 'remote'}): ${source}`);
|
||||
}
|
||||
}
|
||||
if (extensionConfig.contextFileName) {
|
||||
output.push(
|
||||
`This extension will append info to your QWEN.md context using ${extensionConfig.contextFileName}`,
|
||||
);
|
||||
}
|
||||
if (extensionConfig.excludeTools) {
|
||||
output.push(
|
||||
`This extension will exclude the following core tools: ${extensionConfig.excludeTools}`,
|
||||
);
|
||||
}
|
||||
return output.join('\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests consent from the user to install an extension (extensionConfig), if
|
||||
* there is any difference between the consent string for `extensionConfig` and
|
||||
* `previousExtensionConfig`.
|
||||
*
|
||||
* Always requests consent if previousExtensionConfig is null.
|
||||
*
|
||||
* Throws if the user does not consent.
|
||||
*/
|
||||
async function maybeRequestConsentOrFail(
|
||||
extensionConfig: ExtensionConfig,
|
||||
requestConsent: (consent: string) => Promise<boolean>,
|
||||
previousExtensionConfig?: ExtensionConfig,
|
||||
) {
|
||||
const extensionConsent = extensionConsentString(extensionConfig);
|
||||
if (previousExtensionConfig) {
|
||||
const previousExtensionConsent = extensionConsentString(
|
||||
previousExtensionConfig,
|
||||
);
|
||||
if (previousExtensionConsent === extensionConsent) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!(await requestConsent(extensionConsent))) {
|
||||
throw new Error(`Installation cancelled for "${extensionConfig.name}".`);
|
||||
}
|
||||
}
|
||||
|
||||
export function validateName(name: string) {
|
||||
if (!/^[a-zA-Z0-9-]+$/.test(name)) {
|
||||
throw new Error(
|
||||
`Invalid extension name: "${name}". Only letters (a-z, A-Z), numbers (0-9), and dashes (-) are allowed.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export function loadExtensionConfig(
|
||||
context: LoadExtensionContext,
|
||||
): ExtensionConfig {
|
||||
const { extensionDir, workspaceDir } = context;
|
||||
const configFilePath = path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME);
|
||||
if (!fs.existsSync(configFilePath)) {
|
||||
throw new Error(`Configuration file not found at ${configFilePath}`);
|
||||
}
|
||||
try {
|
||||
const configContent = fs.readFileSync(configFilePath, 'utf-8');
|
||||
const config = recursivelyHydrateStrings(JSON.parse(configContent), {
|
||||
extensionPath: extensionDir,
|
||||
workspacePath: workspaceDir,
|
||||
'/': path.sep,
|
||||
pathSeparator: path.sep,
|
||||
}) as unknown as ExtensionConfig;
|
||||
if (!config.name || !config.version) {
|
||||
throw new Error(
|
||||
`Invalid configuration in ${configFilePath}: missing ${!config.name ? '"name"' : '"version"'}`,
|
||||
);
|
||||
}
|
||||
validateName(config.name);
|
||||
return config;
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
`Failed to load extension config from ${configFilePath}: ${getErrorMessage(
|
||||
e,
|
||||
)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export async function uninstallExtension(
|
||||
extensionIdentifier: string,
|
||||
cwd: string = process.cwd(),
|
||||
): Promise<void> {
|
||||
const telemetryConfig = getTelemetryConfig(cwd);
|
||||
const installedExtensions = loadUserExtensions();
|
||||
const extensionName = installedExtensions.find(
|
||||
(installed) =>
|
||||
installed.config.name.toLowerCase() ===
|
||||
extensionIdentifier.toLowerCase() ||
|
||||
installed.installMetadata?.source.toLowerCase() ===
|
||||
extensionIdentifier.toLowerCase(),
|
||||
)?.config.name;
|
||||
if (!extensionName) {
|
||||
throw new Error(`Extension not found.`);
|
||||
}
|
||||
const manager = new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
[extensionName],
|
||||
);
|
||||
manager.remove(extensionName);
|
||||
const storage = new ExtensionStorage(extensionName);
|
||||
|
||||
await fs.promises.rm(storage.getExtensionDir(), {
|
||||
recursive: true,
|
||||
force: true,
|
||||
});
|
||||
logExtensionUninstall(
|
||||
telemetryConfig,
|
||||
new ExtensionUninstallEvent(extensionName, 'success'),
|
||||
);
|
||||
}
|
||||
|
||||
export function toOutputString(
|
||||
extension: Extension,
|
||||
workspaceDir: string,
|
||||
): string {
|
||||
const manager = new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
);
|
||||
const userEnabled = manager.isEnabled(extension.config.name, os.homedir());
|
||||
const workspaceEnabled = manager.isEnabled(
|
||||
extension.config.name,
|
||||
workspaceDir,
|
||||
);
|
||||
|
||||
const status = workspaceEnabled ? chalk.green('✓') : chalk.red('✗');
|
||||
let output = `${status} ${extension.config.name} (${extension.config.version})`;
|
||||
output += `\n Path: ${extension.path}`;
|
||||
if (extension.installMetadata) {
|
||||
output += `\n Source: ${extension.installMetadata.source} (Type: ${extension.installMetadata.type})`;
|
||||
if (extension.installMetadata.ref) {
|
||||
output += `\n Ref: ${extension.installMetadata.ref}`;
|
||||
}
|
||||
if (extension.installMetadata.releaseTag) {
|
||||
output += `\n Release tag: ${extension.installMetadata.releaseTag}`;
|
||||
}
|
||||
}
|
||||
output += `\n Enabled (User): ${userEnabled}`;
|
||||
output += `\n Enabled (Workspace): ${workspaceEnabled}`;
|
||||
if (extension.contextFiles.length > 0) {
|
||||
output += `\n Context files:`;
|
||||
extension.contextFiles.forEach((contextFile) => {
|
||||
output += `\n ${contextFile}`;
|
||||
});
|
||||
}
|
||||
if (extension.config.mcpServers) {
|
||||
output += `\n MCP servers:`;
|
||||
Object.keys(extension.config.mcpServers).forEach((key) => {
|
||||
output += `\n ${key}`;
|
||||
});
|
||||
}
|
||||
if (extension.config.excludeTools) {
|
||||
output += `\n Excluded tools:`;
|
||||
extension.config.excludeTools.forEach((tool) => {
|
||||
output += `\n ${tool}`;
|
||||
});
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
export function disableExtension(
|
||||
name: string,
|
||||
scope: SettingScope,
|
||||
cwd: string = process.cwd(),
|
||||
) {
|
||||
const config = getTelemetryConfig(cwd);
|
||||
if (scope === SettingScope.System || scope === SettingScope.SystemDefaults) {
|
||||
throw new Error('System and SystemDefaults scopes are not supported.');
|
||||
}
|
||||
const extension = loadExtensionByName(name, cwd);
|
||||
if (!extension) {
|
||||
throw new Error(`Extension with name ${name} does not exist.`);
|
||||
}
|
||||
|
||||
const manager = new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
[name],
|
||||
);
|
||||
const scopePath = scope === SettingScope.Workspace ? cwd : os.homedir();
|
||||
manager.disable(name, true, scopePath);
|
||||
logExtensionDisable(config, new ExtensionDisableEvent(name, scope));
|
||||
}
|
||||
|
||||
export function enableExtension(
|
||||
name: string,
|
||||
scope: SettingScope,
|
||||
cwd: string = process.cwd(),
|
||||
) {
|
||||
if (scope === SettingScope.System || scope === SettingScope.SystemDefaults) {
|
||||
throw new Error('System and SystemDefaults scopes are not supported.');
|
||||
}
|
||||
const extension = loadExtensionByName(name, cwd);
|
||||
if (!extension) {
|
||||
throw new Error(`Extension with name ${name} does not exist.`);
|
||||
}
|
||||
const manager = new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
);
|
||||
const scopePath = scope === SettingScope.Workspace ? cwd : os.homedir();
|
||||
manager.enable(name, true, scopePath);
|
||||
const config = getTelemetryConfig(cwd);
|
||||
logExtensionEnable(config, new ExtensionEnableEvent(name, scope));
|
||||
}
|
||||
@@ -1,424 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as path from 'node:path';
|
||||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { ExtensionEnablementManager, Override } from './extensionEnablement.js';
|
||||
import type { Extension } from '../extension.js';
|
||||
|
||||
// Helper to create a temporary directory for testing
|
||||
function createTestDir() {
|
||||
const dirPath = fs.mkdtempSync(path.join(os.tmpdir(), 'gemini-test-'));
|
||||
return {
|
||||
path: dirPath,
|
||||
cleanup: () => fs.rmSync(dirPath, { recursive: true, force: true }),
|
||||
};
|
||||
}
|
||||
|
||||
let testDir: { path: string; cleanup: () => void };
|
||||
let configDir: string;
|
||||
let manager: ExtensionEnablementManager;
|
||||
|
||||
describe('ExtensionEnablementManager', () => {
|
||||
beforeEach(() => {
|
||||
testDir = createTestDir();
|
||||
configDir = path.join(testDir.path, '.gemini');
|
||||
manager = new ExtensionEnablementManager(configDir);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
testDir.cleanup();
|
||||
// Reset the singleton instance for test isolation
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(ExtensionEnablementManager as any).instance = undefined;
|
||||
});
|
||||
|
||||
describe('isEnabled', () => {
|
||||
it('should return true if extension is not configured', () => {
|
||||
expect(manager.isEnabled('ext-test', '/any/path')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true if no overrides match', () => {
|
||||
manager.disable('ext-test', false, '/another/path');
|
||||
expect(manager.isEnabled('ext-test', '/any/path')).toBe(true);
|
||||
});
|
||||
|
||||
it('should enable a path based on an override rule', () => {
|
||||
manager.disable('ext-test', true, '/');
|
||||
manager.enable('ext-test', true, '/home/user/projects/');
|
||||
expect(manager.isEnabled('ext-test', '/home/user/projects/my-app')).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it('should disable a path based on a disable override rule', () => {
|
||||
manager.enable('ext-test', true, '/');
|
||||
manager.disable('ext-test', true, '/home/user/projects/');
|
||||
expect(manager.isEnabled('ext-test', '/home/user/projects/my-app')).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it('should respect the last matching rule (enable wins)', () => {
|
||||
manager.disable('ext-test', true, '/home/user/projects/');
|
||||
manager.enable('ext-test', false, '/home/user/projects/my-app');
|
||||
expect(manager.isEnabled('ext-test', '/home/user/projects/my-app')).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it('should respect the last matching rule (disable wins)', () => {
|
||||
manager.enable('ext-test', true, '/home/user/projects/');
|
||||
manager.disable('ext-test', false, '/home/user/projects/my-app');
|
||||
expect(manager.isEnabled('ext-test', '/home/user/projects/my-app')).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle', () => {
|
||||
manager.enable('ext-test', true, '/home/user/projects');
|
||||
manager.disable('ext-test', false, '/home/user/projects/my-app');
|
||||
expect(manager.isEnabled('ext-test', '/home/user/projects/my-app')).toBe(
|
||||
false,
|
||||
);
|
||||
expect(
|
||||
manager.isEnabled('ext-test', '/home/user/projects/something-else'),
|
||||
).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('includeSubdirs', () => {
|
||||
it('should add a glob when enabling with includeSubdirs', () => {
|
||||
manager.enable('ext-test', true, '/path/to/dir');
|
||||
const config = manager.readConfig();
|
||||
expect(config['ext-test'].overrides).toContain('/path/to/dir/*');
|
||||
});
|
||||
|
||||
it('should not add a glob when enabling without includeSubdirs', () => {
|
||||
manager.enable('ext-test', false, '/path/to/dir');
|
||||
const config = manager.readConfig();
|
||||
expect(config['ext-test'].overrides).toContain('/path/to/dir/');
|
||||
expect(config['ext-test'].overrides).not.toContain('/path/to/dir/*');
|
||||
});
|
||||
|
||||
it('should add a glob when disabling with includeSubdirs', () => {
|
||||
manager.disable('ext-test', true, '/path/to/dir');
|
||||
const config = manager.readConfig();
|
||||
expect(config['ext-test'].overrides).toContain('!/path/to/dir/*');
|
||||
});
|
||||
|
||||
it('should remove conflicting glob rule when enabling without subdirs', () => {
|
||||
manager.enable('ext-test', true, '/path/to/dir'); // Adds /path/to/dir*
|
||||
manager.enable('ext-test', false, '/path/to/dir'); // Should remove the glob
|
||||
const config = manager.readConfig();
|
||||
expect(config['ext-test'].overrides).toContain('/path/to/dir/');
|
||||
expect(config['ext-test'].overrides).not.toContain('/path/to/dir/*');
|
||||
});
|
||||
|
||||
it('should remove conflicting non-glob rule when enabling with subdirs', () => {
|
||||
manager.enable('ext-test', false, '/path/to/dir'); // Adds /path/to/dir
|
||||
manager.enable('ext-test', true, '/path/to/dir'); // Should remove the non-glob
|
||||
const config = manager.readConfig();
|
||||
expect(config['ext-test'].overrides).toContain('/path/to/dir/*');
|
||||
expect(config['ext-test'].overrides).not.toContain('/path/to/dir/');
|
||||
});
|
||||
|
||||
it('should remove conflicting rules when disabling', () => {
|
||||
manager.enable('ext-test', true, '/path/to/dir'); // enabled with glob
|
||||
manager.disable('ext-test', false, '/path/to/dir'); // disabled without
|
||||
const config = manager.readConfig();
|
||||
expect(config['ext-test'].overrides).toContain('!/path/to/dir/');
|
||||
expect(config['ext-test'].overrides).not.toContain('/path/to/dir/*');
|
||||
});
|
||||
|
||||
it('should correctly evaluate isEnabled with subdirs', () => {
|
||||
manager.disable('ext-test', true, '/');
|
||||
manager.enable('ext-test', true, '/path/to/dir');
|
||||
expect(manager.isEnabled('ext-test', '/path/to/dir/')).toBe(true);
|
||||
expect(manager.isEnabled('ext-test', '/path/to/dir/sub/')).toBe(true);
|
||||
expect(manager.isEnabled('ext-test', '/path/to/another/')).toBe(false);
|
||||
});
|
||||
|
||||
it('should correctly evaluate isEnabled without subdirs', () => {
|
||||
manager.disable('ext-test', true, '/*');
|
||||
manager.enable('ext-test', false, '/path/to/dir');
|
||||
expect(manager.isEnabled('ext-test', '/path/to/dir')).toBe(true);
|
||||
expect(manager.isEnabled('ext-test', '/path/to/dir/sub')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('pruning child rules', () => {
|
||||
it('should remove child rules when enabling a parent with subdirs', () => {
|
||||
// Pre-existing rules for children
|
||||
manager.enable('ext-test', false, '/path/to/dir/subdir1');
|
||||
manager.disable('ext-test', true, '/path/to/dir/subdir2');
|
||||
manager.enable('ext-test', false, '/path/to/another/dir');
|
||||
|
||||
// Enable the parent directory
|
||||
manager.enable('ext-test', true, '/path/to/dir');
|
||||
|
||||
const config = manager.readConfig();
|
||||
const overrides = config['ext-test'].overrides;
|
||||
|
||||
// The new parent rule should be present
|
||||
expect(overrides).toContain(`/path/to/dir/*`);
|
||||
|
||||
// Child rules should be removed
|
||||
expect(overrides).not.toContain('/path/to/dir/subdir1/');
|
||||
expect(overrides).not.toContain(`!/path/to/dir/subdir2/*`);
|
||||
|
||||
// Unrelated rules should remain
|
||||
expect(overrides).toContain('/path/to/another/dir/');
|
||||
});
|
||||
|
||||
it('should remove child rules when disabling a parent with subdirs', () => {
|
||||
// Pre-existing rules for children
|
||||
manager.enable('ext-test', false, '/path/to/dir/subdir1');
|
||||
manager.disable('ext-test', true, '/path/to/dir/subdir2');
|
||||
manager.enable('ext-test', false, '/path/to/another/dir');
|
||||
|
||||
// Disable the parent directory
|
||||
manager.disable('ext-test', true, '/path/to/dir');
|
||||
|
||||
const config = manager.readConfig();
|
||||
const overrides = config['ext-test'].overrides;
|
||||
|
||||
// The new parent rule should be present
|
||||
expect(overrides).toContain(`!/path/to/dir/*`);
|
||||
|
||||
// Child rules should be removed
|
||||
expect(overrides).not.toContain('/path/to/dir/subdir1/');
|
||||
expect(overrides).not.toContain(`!/path/to/dir/subdir2/*`);
|
||||
|
||||
// Unrelated rules should remain
|
||||
expect(overrides).toContain('/path/to/another/dir/');
|
||||
});
|
||||
|
||||
it('should not remove child rules if includeSubdirs is false', () => {
|
||||
manager.enable('ext-test', false, '/path/to/dir/subdir1');
|
||||
manager.enable('ext-test', false, '/path/to/dir'); // Not including subdirs
|
||||
|
||||
const config = manager.readConfig();
|
||||
const overrides = config['ext-test'].overrides;
|
||||
|
||||
expect(overrides).toContain('/path/to/dir/subdir1/');
|
||||
expect(overrides).toContain('/path/to/dir/');
|
||||
});
|
||||
});
|
||||
|
||||
it('should enable a path based on an enable override', () => {
|
||||
manager.disable('ext-test', true, '/Users/chrstn');
|
||||
manager.enable('ext-test', true, '/Users/chrstn/gemini-cli');
|
||||
|
||||
expect(manager.isEnabled('ext-test', '/Users/chrstn/gemini-cli')).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it('should ignore subdirs', () => {
|
||||
manager.disable('ext-test', false, '/Users/chrstn');
|
||||
expect(manager.isEnabled('ext-test', '/Users/chrstn/gemini-cli')).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
describe('extension overrides (-e <name>)', () => {
|
||||
beforeEach(() => {
|
||||
manager = new ExtensionEnablementManager(configDir, ['ext-test']);
|
||||
});
|
||||
|
||||
it('can enable extensions, case-insensitive', () => {
|
||||
manager.disable('ext-test', true, '/');
|
||||
expect(manager.isEnabled('ext-test', '/')).toBe(true);
|
||||
expect(manager.isEnabled('Ext-Test', '/')).toBe(true);
|
||||
// Double check that it would have been disabled otherwise
|
||||
expect(
|
||||
new ExtensionEnablementManager(configDir).isEnabled('ext-test', '/'),
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it('disable all other extensions', () => {
|
||||
manager = new ExtensionEnablementManager(configDir, ['ext-test']);
|
||||
manager.enable('ext-test-2', true, '/');
|
||||
expect(manager.isEnabled('ext-test-2', '/')).toBe(false);
|
||||
// Double check that it would have been enabled otherwise
|
||||
expect(
|
||||
new ExtensionEnablementManager(configDir).isEnabled('ext-test-2', '/'),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('none disables all extensions', () => {
|
||||
manager = new ExtensionEnablementManager(configDir, ['none']);
|
||||
manager.enable('ext-test', true, '/');
|
||||
expect(manager.isEnabled('ext-test', '/path/to/dir')).toBe(false);
|
||||
// Double check that it would have been enabled otherwise
|
||||
expect(
|
||||
new ExtensionEnablementManager(configDir).isEnabled('ext-test', '/'),
|
||||
).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateExtensionOverrides', () => {
|
||||
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
|
||||
|
||||
beforeEach(() => {
|
||||
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleErrorSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should not log an error if enabledExtensionNamesOverride is empty', () => {
|
||||
const manager = new ExtensionEnablementManager(configDir, []);
|
||||
manager.validateExtensionOverrides([]);
|
||||
expect(consoleErrorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not log an error if all enabledExtensionNamesOverride are valid', () => {
|
||||
const manager = new ExtensionEnablementManager(configDir, [
|
||||
'ext-one',
|
||||
'ext-two',
|
||||
]);
|
||||
const extensions = [
|
||||
{ config: { name: 'ext-one' } },
|
||||
{ config: { name: 'ext-two' } },
|
||||
] as Extension[];
|
||||
manager.validateExtensionOverrides(extensions);
|
||||
expect(consoleErrorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should log an error for each invalid extension name in enabledExtensionNamesOverride', () => {
|
||||
const manager = new ExtensionEnablementManager(configDir, [
|
||||
'ext-one',
|
||||
'ext-invalid',
|
||||
'ext-another-invalid',
|
||||
]);
|
||||
const extensions = [
|
||||
{ config: { name: 'ext-one' } },
|
||||
{ config: { name: 'ext-two' } },
|
||||
] as Extension[];
|
||||
manager.validateExtensionOverrides(extensions);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledTimes(2);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'Extension not found: ext-invalid',
|
||||
);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'Extension not found: ext-another-invalid',
|
||||
);
|
||||
});
|
||||
|
||||
it('should not log an error if "none" is in enabledExtensionNamesOverride', () => {
|
||||
const manager = new ExtensionEnablementManager(configDir, ['none']);
|
||||
manager.validateExtensionOverrides([]);
|
||||
expect(consoleErrorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Override', () => {
|
||||
it('should create an override from input', () => {
|
||||
const override = Override.fromInput('/path/to/dir', true);
|
||||
expect(override.baseRule).toBe(`/path/to/dir/`);
|
||||
expect(override.isDisable).toBe(false);
|
||||
expect(override.includeSubdirs).toBe(true);
|
||||
});
|
||||
|
||||
it('should create a disable override from input', () => {
|
||||
const override = Override.fromInput('!/path/to/dir', false);
|
||||
expect(override.baseRule).toBe(`/path/to/dir/`);
|
||||
expect(override.isDisable).toBe(true);
|
||||
expect(override.includeSubdirs).toBe(false);
|
||||
});
|
||||
|
||||
it('should create an override from a file rule', () => {
|
||||
const override = Override.fromFileRule('/path/to/dir');
|
||||
expect(override.baseRule).toBe('/path/to/dir');
|
||||
expect(override.isDisable).toBe(false);
|
||||
expect(override.includeSubdirs).toBe(false);
|
||||
});
|
||||
|
||||
it('should create a disable override from a file rule', () => {
|
||||
const override = Override.fromFileRule('!/path/to/dir/');
|
||||
expect(override.isDisable).toBe(true);
|
||||
expect(override.baseRule).toBe('/path/to/dir/');
|
||||
expect(override.includeSubdirs).toBe(false);
|
||||
});
|
||||
|
||||
it('should create an override with subdirs from a file rule', () => {
|
||||
const override = Override.fromFileRule('/path/to/dir/*');
|
||||
expect(override.baseRule).toBe('/path/to/dir/');
|
||||
expect(override.isDisable).toBe(false);
|
||||
expect(override.includeSubdirs).toBe(true);
|
||||
});
|
||||
|
||||
it('should correctly identify conflicting overrides', () => {
|
||||
const override1 = Override.fromInput('/path/to/dir', true);
|
||||
const override2 = Override.fromInput('/path/to/dir', false);
|
||||
expect(override1.conflictsWith(override2)).toBe(true);
|
||||
});
|
||||
|
||||
it('should correctly identify non-conflicting overrides', () => {
|
||||
const override1 = Override.fromInput('/path/to/dir', true);
|
||||
const override2 = Override.fromInput('/path/to/another/dir', true);
|
||||
expect(override1.conflictsWith(override2)).toBe(false);
|
||||
});
|
||||
|
||||
it('should correctly identify equal overrides', () => {
|
||||
const override1 = Override.fromInput('/path/to/dir', true);
|
||||
const override2 = Override.fromInput('/path/to/dir', true);
|
||||
expect(override1.isEqualTo(override2)).toBe(true);
|
||||
});
|
||||
|
||||
it('should correctly identify unequal overrides', () => {
|
||||
const override1 = Override.fromInput('/path/to/dir', true);
|
||||
const override2 = Override.fromInput('!/path/to/dir', true);
|
||||
expect(override1.isEqualTo(override2)).toBe(false);
|
||||
});
|
||||
|
||||
it('should generate the correct regex', () => {
|
||||
const override = Override.fromInput('/path/to/dir', true);
|
||||
const regex = override.asRegex();
|
||||
expect(regex.test('/path/to/dir/')).toBe(true);
|
||||
expect(regex.test('/path/to/dir/subdir')).toBe(true);
|
||||
expect(regex.test('/path/to/another/dir')).toBe(false);
|
||||
});
|
||||
|
||||
it('should correctly identify child overrides', () => {
|
||||
const parent = Override.fromInput('/path/to/dir', true);
|
||||
const child = Override.fromInput('/path/to/dir/subdir', false);
|
||||
expect(child.isChildOf(parent)).toBe(true);
|
||||
});
|
||||
|
||||
it('should correctly identify child overrides with glob', () => {
|
||||
const parent = Override.fromInput('/path/to/dir/*', true);
|
||||
const child = Override.fromInput('/path/to/dir/subdir', false);
|
||||
expect(child.isChildOf(parent)).toBe(true);
|
||||
});
|
||||
|
||||
it('should correctly identify non-child overrides', () => {
|
||||
const parent = Override.fromInput('/path/to/dir', true);
|
||||
const other = Override.fromInput('/path/to/another/dir', false);
|
||||
expect(other.isChildOf(parent)).toBe(false);
|
||||
});
|
||||
|
||||
it('should generate the correct output string', () => {
|
||||
const override = Override.fromInput('/path/to/dir', true);
|
||||
expect(override.output()).toBe(`/path/to/dir/*`);
|
||||
});
|
||||
|
||||
it('should generate the correct output string for a disable override', () => {
|
||||
const override = Override.fromInput('!/path/to/dir', false);
|
||||
expect(override.output()).toBe(`!/path/to/dir/`);
|
||||
});
|
||||
|
||||
it('should disable a path based on a disable override rule', () => {
|
||||
const override = Override.fromInput('!/path/to/dir', false);
|
||||
expect(override.output()).toBe(`!/path/to/dir/`);
|
||||
});
|
||||
});
|
||||
@@ -1,239 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { type Extension } from '../extension.js';
|
||||
|
||||
export interface ExtensionEnablementConfig {
|
||||
overrides: string[];
|
||||
}
|
||||
|
||||
export interface AllExtensionsEnablementConfig {
|
||||
[extensionName: string]: ExtensionEnablementConfig;
|
||||
}
|
||||
|
||||
export class Override {
|
||||
constructor(
|
||||
public baseRule: string,
|
||||
public isDisable: boolean,
|
||||
public includeSubdirs: boolean,
|
||||
) {}
|
||||
|
||||
static fromInput(inputRule: string, includeSubdirs: boolean): Override {
|
||||
const isDisable = inputRule.startsWith('!');
|
||||
let baseRule = isDisable ? inputRule.substring(1) : inputRule;
|
||||
baseRule = ensureLeadingAndTrailingSlash(baseRule);
|
||||
return new Override(baseRule, isDisable, includeSubdirs);
|
||||
}
|
||||
|
||||
static fromFileRule(fileRule: string): Override {
|
||||
const isDisable = fileRule.startsWith('!');
|
||||
let baseRule = isDisable ? fileRule.substring(1) : fileRule;
|
||||
const includeSubdirs = baseRule.endsWith('*');
|
||||
baseRule = includeSubdirs
|
||||
? baseRule.substring(0, baseRule.length - 1)
|
||||
: baseRule;
|
||||
return new Override(baseRule, isDisable, includeSubdirs);
|
||||
}
|
||||
|
||||
conflictsWith(other: Override): boolean {
|
||||
if (this.baseRule === other.baseRule) {
|
||||
return (
|
||||
this.includeSubdirs !== other.includeSubdirs ||
|
||||
this.isDisable !== other.isDisable
|
||||
);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
isEqualTo(other: Override): boolean {
|
||||
return (
|
||||
this.baseRule === other.baseRule &&
|
||||
this.includeSubdirs === other.includeSubdirs &&
|
||||
this.isDisable === other.isDisable
|
||||
);
|
||||
}
|
||||
|
||||
asRegex(): RegExp {
|
||||
return globToRegex(`${this.baseRule}${this.includeSubdirs ? '*' : ''}`);
|
||||
}
|
||||
|
||||
isChildOf(parent: Override) {
|
||||
if (!parent.includeSubdirs) {
|
||||
return false;
|
||||
}
|
||||
return parent.asRegex().test(this.baseRule);
|
||||
}
|
||||
|
||||
output(): string {
|
||||
return `${this.isDisable ? '!' : ''}${this.baseRule}${this.includeSubdirs ? '*' : ''}`;
|
||||
}
|
||||
|
||||
matchesPath(path: string) {
|
||||
return this.asRegex().test(path);
|
||||
}
|
||||
}
|
||||
|
||||
const ensureLeadingAndTrailingSlash = function (dirPath: string): string {
|
||||
// Normalize separators to forward slashes for consistent matching across platforms.
|
||||
let result = dirPath.replace(/\\/g, '/');
|
||||
if (result.charAt(0) !== '/') {
|
||||
result = '/' + result;
|
||||
}
|
||||
if (result.charAt(result.length - 1) !== '/') {
|
||||
result = result + '/';
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Converts a glob pattern to a RegExp object.
|
||||
* This is a simplified implementation that supports `*`.
|
||||
*
|
||||
* @param glob The glob pattern to convert.
|
||||
* @returns A RegExp object.
|
||||
*/
|
||||
function globToRegex(glob: string): RegExp {
|
||||
const regexString = glob
|
||||
.replace(/[.+?^${}()|[\]\\]/g, '\\$&') // Escape special regex characters
|
||||
.replace(/(\/?)\*/g, '($1.*)?'); // Convert * to optional group
|
||||
|
||||
return new RegExp(`^${regexString}$`);
|
||||
}
|
||||
|
||||
export class ExtensionEnablementManager {
|
||||
private configFilePath: string;
|
||||
private configDir: string;
|
||||
// If non-empty, this overrides all other extension configuration and enables
|
||||
// only the ones in this list.
|
||||
private enabledExtensionNamesOverride: string[];
|
||||
|
||||
constructor(configDir: string, enabledExtensionNames?: string[]) {
|
||||
this.configDir = configDir;
|
||||
this.configFilePath = path.join(configDir, 'extension-enablement.json');
|
||||
this.enabledExtensionNamesOverride =
|
||||
enabledExtensionNames?.map((name) => name.toLowerCase()) ?? [];
|
||||
}
|
||||
|
||||
validateExtensionOverrides(extensions: Extension[]) {
|
||||
for (const name of this.enabledExtensionNamesOverride) {
|
||||
if (name === 'none') continue;
|
||||
if (
|
||||
!extensions.some(
|
||||
(ext) => ext.config.name.toLowerCase() === name.toLowerCase(),
|
||||
)
|
||||
) {
|
||||
console.error(`Extension not found: ${name}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if an extension is enabled based on its name and the current
|
||||
* path. The last matching rule in the overrides list wins.
|
||||
*
|
||||
* @param extensionName The name of the extension.
|
||||
* @param currentPath The absolute path of the current working directory.
|
||||
* @returns True if the extension is enabled, false otherwise.
|
||||
*/
|
||||
isEnabled(extensionName: string, currentPath: string): boolean {
|
||||
// If we have a single override called 'none', this disables all extensions.
|
||||
// Typically, this comes from the user passing `-e none`.
|
||||
if (
|
||||
this.enabledExtensionNamesOverride.length === 1 &&
|
||||
this.enabledExtensionNamesOverride[0] === 'none'
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we have explicit overrides, only enable those extensions.
|
||||
if (this.enabledExtensionNamesOverride.length > 0) {
|
||||
// When checking against overrides ONLY, we use a case insensitive match.
|
||||
// The override names are already lowercased in the constructor.
|
||||
return this.enabledExtensionNamesOverride.includes(
|
||||
extensionName.toLocaleLowerCase(),
|
||||
);
|
||||
}
|
||||
|
||||
// Otherwise, we use the configuration settings
|
||||
const config = this.readConfig();
|
||||
const extensionConfig = config[extensionName];
|
||||
// Extensions are enabled by default.
|
||||
let enabled = true;
|
||||
const allOverrides = extensionConfig?.overrides ?? [];
|
||||
for (const rule of allOverrides) {
|
||||
const override = Override.fromFileRule(rule);
|
||||
if (override.matchesPath(ensureLeadingAndTrailingSlash(currentPath))) {
|
||||
enabled = !override.isDisable;
|
||||
}
|
||||
}
|
||||
return enabled;
|
||||
}
|
||||
|
||||
readConfig(): AllExtensionsEnablementConfig {
|
||||
try {
|
||||
const content = fs.readFileSync(this.configFilePath, 'utf-8');
|
||||
return JSON.parse(content);
|
||||
} catch (error) {
|
||||
if (
|
||||
error instanceof Error &&
|
||||
'code' in error &&
|
||||
error.code === 'ENOENT'
|
||||
) {
|
||||
return {};
|
||||
}
|
||||
console.error('Error reading extension enablement config:', error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
writeConfig(config: AllExtensionsEnablementConfig): void {
|
||||
fs.mkdirSync(this.configDir, { recursive: true });
|
||||
fs.writeFileSync(this.configFilePath, JSON.stringify(config, null, 2));
|
||||
}
|
||||
|
||||
enable(
|
||||
extensionName: string,
|
||||
includeSubdirs: boolean,
|
||||
scopePath: string,
|
||||
): void {
|
||||
const config = this.readConfig();
|
||||
if (!config[extensionName]) {
|
||||
config[extensionName] = { overrides: [] };
|
||||
}
|
||||
const override = Override.fromInput(scopePath, includeSubdirs);
|
||||
const overrides = config[extensionName].overrides.filter((rule) => {
|
||||
const fileOverride = Override.fromFileRule(rule);
|
||||
if (
|
||||
fileOverride.conflictsWith(override) ||
|
||||
fileOverride.isEqualTo(override)
|
||||
) {
|
||||
return false; // Remove conflicts and equivalent values.
|
||||
}
|
||||
return !fileOverride.isChildOf(override);
|
||||
});
|
||||
overrides.push(override.output());
|
||||
config[extensionName].overrides = overrides;
|
||||
this.writeConfig(config);
|
||||
}
|
||||
|
||||
disable(
|
||||
extensionName: string,
|
||||
includeSubdirs: boolean,
|
||||
scopePath: string,
|
||||
): void {
|
||||
this.enable(extensionName, includeSubdirs, `!${scopePath}`);
|
||||
}
|
||||
|
||||
remove(extensionName: string): void {
|
||||
const config = this.readConfig();
|
||||
if (config[extensionName]) {
|
||||
delete config[extensionName];
|
||||
this.writeConfig(config);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,457 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { vi } from 'vitest';
|
||||
import * as fs from 'node:fs';
|
||||
import * as os from 'node:os';
|
||||
import * as path from 'node:path';
|
||||
import {
|
||||
EXTENSIONS_CONFIG_FILENAME,
|
||||
ExtensionStorage,
|
||||
INSTALL_METADATA_FILENAME,
|
||||
annotateActiveExtensions,
|
||||
loadExtension,
|
||||
} from '../extension.js';
|
||||
import { checkForAllExtensionUpdates, updateExtension } from './update.js';
|
||||
import { QWEN_DIR } from '@qwen-code/qwen-code-core';
|
||||
import { isWorkspaceTrusted } from '../trustedFolders.js';
|
||||
import { ExtensionUpdateState } from '../../ui/state/extensions.js';
|
||||
import { createExtension } from '../../test-utils/createExtension.js';
|
||||
import { ExtensionEnablementManager } from './extensionEnablement.js';
|
||||
|
||||
const mockGit = {
|
||||
clone: vi.fn(),
|
||||
getRemotes: vi.fn(),
|
||||
fetch: vi.fn(),
|
||||
checkout: vi.fn(),
|
||||
listRemote: vi.fn(),
|
||||
revparse: vi.fn(),
|
||||
// Not a part of the actual API, but we need to use this to do the correct
|
||||
// file system interactions.
|
||||
path: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock('simple-git', () => ({
|
||||
simpleGit: vi.fn((path: string) => {
|
||||
mockGit.path.mockReturnValue(path);
|
||||
return mockGit;
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock('os', async (importOriginal) => {
|
||||
const mockedOs = await importOriginal<typeof os>();
|
||||
return {
|
||||
...mockedOs,
|
||||
homedir: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('../trustedFolders.js', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('../trustedFolders.js')>();
|
||||
return {
|
||||
...actual,
|
||||
isWorkspaceTrusted: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
const mockLogExtensionInstallEvent = vi.hoisted(() => vi.fn());
|
||||
const mockLogExtensionUninstall = vi.hoisted(() => vi.fn());
|
||||
|
||||
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
const actual =
|
||||
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
|
||||
return {
|
||||
...actual,
|
||||
logExtensionInstallEvent: mockLogExtensionInstallEvent,
|
||||
logExtensionUninstall: mockLogExtensionUninstall,
|
||||
ExtensionInstallEvent: vi.fn(),
|
||||
ExtensionUninstallEvent: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
describe('update tests', () => {
|
||||
let tempHomeDir: string;
|
||||
let tempWorkspaceDir: string;
|
||||
let userExtensionsDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempHomeDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), 'qwen-code-test-home-'),
|
||||
);
|
||||
tempWorkspaceDir = fs.mkdtempSync(
|
||||
path.join(tempHomeDir, 'qwen-code-test-workspace-'),
|
||||
);
|
||||
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
|
||||
userExtensionsDir = path.join(tempHomeDir, QWEN_DIR, 'extensions');
|
||||
// Clean up before each test
|
||||
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
|
||||
fs.mkdirSync(userExtensionsDir, { recursive: true });
|
||||
vi.mocked(isWorkspaceTrusted).mockReturnValue({
|
||||
isTrusted: true,
|
||||
source: 'file',
|
||||
});
|
||||
vi.spyOn(process, 'cwd').mockReturnValue(tempWorkspaceDir);
|
||||
Object.values(mockGit).forEach((fn) => fn.mockReset());
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(tempHomeDir, { recursive: true, force: true });
|
||||
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
describe('updateExtension', () => {
|
||||
it('should update a git-installed extension', async () => {
|
||||
const gitUrl = 'https://github.com/google/gemini-extensions.git';
|
||||
const extensionName = 'qwen-extensions';
|
||||
const targetExtDir = path.join(userExtensionsDir, extensionName);
|
||||
const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME);
|
||||
|
||||
fs.mkdirSync(targetExtDir, { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME),
|
||||
JSON.stringify({ name: extensionName, version: '1.0.0' }),
|
||||
);
|
||||
fs.writeFileSync(
|
||||
metadataPath,
|
||||
JSON.stringify({ source: gitUrl, type: 'git' }),
|
||||
);
|
||||
|
||||
mockGit.clone.mockImplementation(async (_, destination) => {
|
||||
fs.mkdirSync(path.join(mockGit.path(), destination), {
|
||||
recursive: true,
|
||||
});
|
||||
fs.writeFileSync(
|
||||
path.join(mockGit.path(), destination, EXTENSIONS_CONFIG_FILENAME),
|
||||
JSON.stringify({ name: extensionName, version: '1.1.0' }),
|
||||
);
|
||||
});
|
||||
mockGit.getRemotes.mockResolvedValue([{ name: 'origin' }]);
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir: targetExtDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
const updateInfo = await updateExtension(
|
||||
extension,
|
||||
tempHomeDir,
|
||||
async (_) => true,
|
||||
ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
() => {},
|
||||
);
|
||||
|
||||
expect(updateInfo).toEqual({
|
||||
name: 'qwen-extensions',
|
||||
originalVersion: '1.0.0',
|
||||
updatedVersion: '1.1.0',
|
||||
});
|
||||
|
||||
const updatedConfig = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME),
|
||||
'utf-8',
|
||||
),
|
||||
);
|
||||
expect(updatedConfig.version).toBe('1.1.0');
|
||||
});
|
||||
|
||||
it('should call setExtensionUpdateState with UPDATING and then UPDATED_NEEDS_RESTART on success', async () => {
|
||||
const extensionName = 'test-extension';
|
||||
const extensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
name: extensionName,
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo',
|
||||
type: 'git',
|
||||
},
|
||||
});
|
||||
|
||||
mockGit.clone.mockImplementation(async (_, destination) => {
|
||||
fs.mkdirSync(path.join(mockGit.path(), destination), {
|
||||
recursive: true,
|
||||
});
|
||||
fs.writeFileSync(
|
||||
path.join(mockGit.path(), destination, EXTENSIONS_CONFIG_FILENAME),
|
||||
JSON.stringify({ name: extensionName, version: '1.1.0' }),
|
||||
);
|
||||
});
|
||||
mockGit.getRemotes.mockResolvedValue([{ name: 'origin' }]);
|
||||
|
||||
const dispatch = vi.fn();
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
await updateExtension(
|
||||
extension,
|
||||
tempHomeDir,
|
||||
async (_) => true,
|
||||
ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
dispatch,
|
||||
);
|
||||
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: extensionName,
|
||||
state: ExtensionUpdateState.UPDATING,
|
||||
},
|
||||
});
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: extensionName,
|
||||
state: ExtensionUpdateState.UPDATED_NEEDS_RESTART,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should call setExtensionUpdateState with ERROR on failure', async () => {
|
||||
const extensionName = 'test-extension';
|
||||
const extensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
name: extensionName,
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo',
|
||||
type: 'git',
|
||||
},
|
||||
});
|
||||
|
||||
mockGit.clone.mockRejectedValue(new Error('Git clone failed'));
|
||||
mockGit.getRemotes.mockResolvedValue([{ name: 'origin' }]);
|
||||
|
||||
const dispatch = vi.fn();
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
await expect(
|
||||
updateExtension(
|
||||
extension,
|
||||
tempHomeDir,
|
||||
async (_) => true,
|
||||
ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
dispatch,
|
||||
),
|
||||
).rejects.toThrow();
|
||||
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: extensionName,
|
||||
state: ExtensionUpdateState.UPDATING,
|
||||
},
|
||||
});
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: extensionName,
|
||||
state: ExtensionUpdateState.ERROR,
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkForAllExtensionUpdates', () => {
|
||||
it('should return UpdateAvailable for a git extension with updates', async () => {
|
||||
const extensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
name: 'test-extension',
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo',
|
||||
type: 'git',
|
||||
},
|
||||
});
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
|
||||
mockGit.getRemotes.mockResolvedValue([
|
||||
{ name: 'origin', refs: { fetch: 'https://some.git/repo' } },
|
||||
]);
|
||||
mockGit.listRemote.mockResolvedValue('remoteHash HEAD');
|
||||
mockGit.revparse.mockResolvedValue('localHash');
|
||||
|
||||
const dispatch = vi.fn();
|
||||
await checkForAllExtensionUpdates([extension], dispatch);
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'test-extension',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should return UpToDate for a git extension with no updates', async () => {
|
||||
const extensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
name: 'test-extension',
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo',
|
||||
type: 'git',
|
||||
},
|
||||
});
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
|
||||
mockGit.getRemotes.mockResolvedValue([
|
||||
{ name: 'origin', refs: { fetch: 'https://some.git/repo' } },
|
||||
]);
|
||||
mockGit.listRemote.mockResolvedValue('sameHash HEAD');
|
||||
mockGit.revparse.mockResolvedValue('sameHash');
|
||||
|
||||
const dispatch = vi.fn();
|
||||
await checkForAllExtensionUpdates([extension], dispatch);
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'test-extension',
|
||||
state: ExtensionUpdateState.UP_TO_DATE,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should return UpToDate for a local extension with no updates', async () => {
|
||||
const localExtensionSourcePath = path.join(tempHomeDir, 'local-source');
|
||||
const sourceExtensionDir = createExtension({
|
||||
extensionsDir: localExtensionSourcePath,
|
||||
name: 'my-local-ext',
|
||||
version: '1.0.0',
|
||||
});
|
||||
|
||||
const installedExtensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
name: 'local-extension',
|
||||
version: '1.0.0',
|
||||
installMetadata: { source: sourceExtensionDir, type: 'local' },
|
||||
});
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir: installedExtensionDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
const dispatch = vi.fn();
|
||||
await checkForAllExtensionUpdates([extension], dispatch);
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'local-extension',
|
||||
state: ExtensionUpdateState.UP_TO_DATE,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should return UpdateAvailable for a local extension with updates', async () => {
|
||||
const localExtensionSourcePath = path.join(tempHomeDir, 'local-source');
|
||||
const sourceExtensionDir = createExtension({
|
||||
extensionsDir: localExtensionSourcePath,
|
||||
name: 'my-local-ext',
|
||||
version: '1.1.0',
|
||||
});
|
||||
|
||||
const installedExtensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
name: 'local-extension',
|
||||
version: '1.0.0',
|
||||
installMetadata: { source: sourceExtensionDir, type: 'local' },
|
||||
});
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir: installedExtensionDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
const dispatch = vi.fn();
|
||||
await checkForAllExtensionUpdates([extension], dispatch);
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'local-extension',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should return Error when git check fails', async () => {
|
||||
const extensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
name: 'error-extension',
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo',
|
||||
type: 'git',
|
||||
},
|
||||
});
|
||||
const extension = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir,
|
||||
workspaceDir: tempWorkspaceDir,
|
||||
})!,
|
||||
],
|
||||
process.cwd(),
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
|
||||
mockGit.getRemotes.mockRejectedValue(new Error('Git error'));
|
||||
|
||||
const dispatch = vi.fn();
|
||||
await checkForAllExtensionUpdates([extension], dispatch);
|
||||
expect(dispatch).toHaveBeenCalledWith({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'error-extension',
|
||||
state: ExtensionUpdateState.ERROR,
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,182 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
type ExtensionUpdateAction,
|
||||
ExtensionUpdateState,
|
||||
type ExtensionUpdateStatus,
|
||||
} from '../../ui/state/extensions.js';
|
||||
import {
|
||||
copyExtension,
|
||||
installExtension,
|
||||
uninstallExtension,
|
||||
loadExtension,
|
||||
loadInstallMetadata,
|
||||
ExtensionStorage,
|
||||
loadExtensionConfig,
|
||||
} from '../extension.js';
|
||||
import { checkForExtensionUpdate } from './github.js';
|
||||
import type { GeminiCLIExtension } from '@qwen-code/qwen-code-core';
|
||||
import * as fs from 'node:fs';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
|
||||
export interface ExtensionUpdateInfo {
|
||||
name: string;
|
||||
originalVersion: string;
|
||||
updatedVersion: string;
|
||||
}
|
||||
|
||||
export async function updateExtension(
|
||||
extension: GeminiCLIExtension,
|
||||
cwd: string = process.cwd(),
|
||||
requestConsent: (consent: string) => Promise<boolean>,
|
||||
currentState: ExtensionUpdateState,
|
||||
dispatchExtensionStateUpdate: (action: ExtensionUpdateAction) => void,
|
||||
): Promise<ExtensionUpdateInfo | undefined> {
|
||||
if (currentState === ExtensionUpdateState.UPDATING) {
|
||||
return undefined;
|
||||
}
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extension.name, state: ExtensionUpdateState.UPDATING },
|
||||
});
|
||||
const installMetadata = loadInstallMetadata(extension.path);
|
||||
|
||||
if (!installMetadata?.type) {
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extension.name, state: ExtensionUpdateState.ERROR },
|
||||
});
|
||||
throw new Error(
|
||||
`Extension ${extension.name} cannot be updated, type is unknown.`,
|
||||
);
|
||||
}
|
||||
if (installMetadata?.type === 'link') {
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extension.name, state: ExtensionUpdateState.UP_TO_DATE },
|
||||
});
|
||||
throw new Error(`Extension is linked so does not need to be updated`);
|
||||
}
|
||||
const originalVersion = extension.version;
|
||||
|
||||
const tempDir = await ExtensionStorage.createTmpDir();
|
||||
try {
|
||||
await copyExtension(extension.path, tempDir);
|
||||
const previousExtensionConfig = await loadExtensionConfig({
|
||||
extensionDir: extension.path,
|
||||
workspaceDir: cwd,
|
||||
});
|
||||
await uninstallExtension(extension.name, cwd);
|
||||
await installExtension(
|
||||
installMetadata,
|
||||
requestConsent,
|
||||
cwd,
|
||||
previousExtensionConfig,
|
||||
);
|
||||
|
||||
const updatedExtensionStorage = new ExtensionStorage(extension.name);
|
||||
const updatedExtension = loadExtension({
|
||||
extensionDir: updatedExtensionStorage.getExtensionDir(),
|
||||
workspaceDir: cwd,
|
||||
});
|
||||
if (!updatedExtension) {
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extension.name, state: ExtensionUpdateState.ERROR },
|
||||
});
|
||||
throw new Error('Updated extension not found after installation.');
|
||||
}
|
||||
const updatedVersion = updatedExtension.config.version;
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: extension.name,
|
||||
state: ExtensionUpdateState.UPDATED_NEEDS_RESTART,
|
||||
},
|
||||
});
|
||||
return {
|
||||
name: extension.name,
|
||||
originalVersion,
|
||||
updatedVersion,
|
||||
};
|
||||
} catch (e) {
|
||||
console.error(
|
||||
`Error updating extension, rolling back. ${getErrorMessage(e)}`,
|
||||
);
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extension.name, state: ExtensionUpdateState.ERROR },
|
||||
});
|
||||
await copyExtension(tempDir, extension.path);
|
||||
throw e;
|
||||
} finally {
|
||||
await fs.promises.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
export async function updateAllUpdatableExtensions(
|
||||
cwd: string = process.cwd(),
|
||||
requestConsent: (consent: string) => Promise<boolean>,
|
||||
extensions: GeminiCLIExtension[],
|
||||
extensionsState: Map<string, ExtensionUpdateStatus>,
|
||||
dispatch: (action: ExtensionUpdateAction) => void,
|
||||
): Promise<ExtensionUpdateInfo[]> {
|
||||
return (
|
||||
await Promise.all(
|
||||
extensions
|
||||
.filter(
|
||||
(extension) =>
|
||||
extensionsState.get(extension.name)?.status ===
|
||||
ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
)
|
||||
.map((extension) =>
|
||||
updateExtension(
|
||||
extension,
|
||||
cwd,
|
||||
requestConsent,
|
||||
extensionsState.get(extension.name)!.status,
|
||||
dispatch,
|
||||
),
|
||||
),
|
||||
)
|
||||
).filter((updateInfo) => !!updateInfo);
|
||||
}
|
||||
|
||||
export interface ExtensionUpdateCheckResult {
|
||||
state: ExtensionUpdateState;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export async function checkForAllExtensionUpdates(
|
||||
extensions: GeminiCLIExtension[],
|
||||
dispatch: (action: ExtensionUpdateAction) => void,
|
||||
): Promise<void> {
|
||||
dispatch({ type: 'BATCH_CHECK_START' });
|
||||
const promises: Array<Promise<void>> = [];
|
||||
for (const extension of extensions) {
|
||||
if (!extension.installMetadata) {
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: extension.name,
|
||||
state: ExtensionUpdateState.NOT_UPDATABLE,
|
||||
},
|
||||
});
|
||||
continue;
|
||||
}
|
||||
promises.push(
|
||||
checkForExtensionUpdate(extension, (updatedState) => {
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extension.name, state: updatedState },
|
||||
});
|
||||
}),
|
||||
);
|
||||
}
|
||||
await Promise.all(promises);
|
||||
dispatch({ type: 'BATCH_CHECK_END' });
|
||||
}
|
||||
@@ -51,7 +51,6 @@ import {
|
||||
import * as fs from 'node:fs'; // fs will be mocked separately
|
||||
import stripJsonComments from 'strip-json-comments'; // Will be mocked separately
|
||||
import { isWorkspaceTrusted } from './trustedFolders.js';
|
||||
import { disableExtension } from './extension.js';
|
||||
|
||||
// These imports will get the versions from the vi.mock('./settings.js', ...) factory.
|
||||
import {
|
||||
@@ -64,8 +63,6 @@ import {
|
||||
needsMigration,
|
||||
type Settings,
|
||||
loadEnvironment,
|
||||
migrateDeprecatedSettings,
|
||||
SettingScope,
|
||||
SETTINGS_VERSION,
|
||||
SETTINGS_VERSION_KEY,
|
||||
} from './settings.js';
|
||||
@@ -2649,122 +2646,4 @@ describe('Settings Loading and Merging', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('migrateDeprecatedSettings', () => {
|
||||
let mockFsExistsSync: Mocked<typeof fs.existsSync>;
|
||||
let mockFsReadFileSync: Mocked<typeof fs.readFileSync>;
|
||||
let mockDisableExtension: Mocked<typeof disableExtension>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
|
||||
mockFsExistsSync = vi.mocked(fs.existsSync);
|
||||
mockFsReadFileSync = vi.mocked(fs.readFileSync);
|
||||
mockDisableExtension = vi.mocked(disableExtension);
|
||||
|
||||
(mockFsExistsSync as Mock).mockReturnValue(true);
|
||||
vi.mocked(isWorkspaceTrusted).mockReturnValue(true);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should migrate disabled extensions from user and workspace settings', () => {
|
||||
const userSettingsContent = {
|
||||
extensions: {
|
||||
disabled: ['user-ext-1', 'shared-ext'],
|
||||
},
|
||||
};
|
||||
const workspaceSettingsContent = {
|
||||
extensions: {
|
||||
disabled: ['workspace-ext-1', 'shared-ext'],
|
||||
},
|
||||
};
|
||||
|
||||
(mockFsReadFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(userSettingsContent);
|
||||
if (p === MOCK_WORKSPACE_SETTINGS_PATH)
|
||||
return JSON.stringify(workspaceSettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
const loadedSettings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
const setValueSpy = vi.spyOn(loadedSettings, 'setValue');
|
||||
|
||||
migrateDeprecatedSettings(loadedSettings, MOCK_WORKSPACE_DIR);
|
||||
|
||||
// Check user settings migration
|
||||
expect(mockDisableExtension).toHaveBeenCalledWith(
|
||||
'user-ext-1',
|
||||
SettingScope.User,
|
||||
MOCK_WORKSPACE_DIR,
|
||||
);
|
||||
expect(mockDisableExtension).toHaveBeenCalledWith(
|
||||
'shared-ext',
|
||||
SettingScope.User,
|
||||
MOCK_WORKSPACE_DIR,
|
||||
);
|
||||
|
||||
// Check workspace settings migration
|
||||
expect(mockDisableExtension).toHaveBeenCalledWith(
|
||||
'workspace-ext-1',
|
||||
SettingScope.Workspace,
|
||||
MOCK_WORKSPACE_DIR,
|
||||
);
|
||||
expect(mockDisableExtension).toHaveBeenCalledWith(
|
||||
'shared-ext',
|
||||
SettingScope.Workspace,
|
||||
MOCK_WORKSPACE_DIR,
|
||||
);
|
||||
|
||||
// Check that setValue was called to remove the deprecated setting
|
||||
expect(setValueSpy).toHaveBeenCalledWith(
|
||||
SettingScope.User,
|
||||
'extensions',
|
||||
{
|
||||
disabled: undefined,
|
||||
},
|
||||
);
|
||||
expect(setValueSpy).toHaveBeenCalledWith(
|
||||
SettingScope.Workspace,
|
||||
'extensions',
|
||||
{
|
||||
disabled: undefined,
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('should not do anything if there are no deprecated settings', () => {
|
||||
const userSettingsContent = {
|
||||
extensions: {
|
||||
enabled: ['user-ext-1'],
|
||||
},
|
||||
};
|
||||
const workspaceSettingsContent = {
|
||||
someOtherSetting: 'value',
|
||||
};
|
||||
|
||||
(mockFsReadFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(userSettingsContent);
|
||||
if (p === MOCK_WORKSPACE_SETTINGS_PATH)
|
||||
return JSON.stringify(workspaceSettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
const loadedSettings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
const setValueSpy = vi.spyOn(loadedSettings, 'setValue');
|
||||
|
||||
migrateDeprecatedSettings(loadedSettings, MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(mockDisableExtension).not.toHaveBeenCalled();
|
||||
expect(setValueSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -30,7 +30,6 @@ import {
|
||||
import { resolveEnvVarsInObject } from '../utils/envVarResolver.js';
|
||||
import { customDeepMerge, type MergeableObject } from '../utils/deepMerge.js';
|
||||
import { updateSettingsFilePreservingFormat } from '../utils/commentJson.js';
|
||||
import { disableExtension } from './extension.js';
|
||||
|
||||
function getMergeStrategyForPath(path: string[]): MergeStrategy | undefined {
|
||||
let current: SettingDefinition | undefined = undefined;
|
||||
@@ -81,7 +80,6 @@ const MIGRATION_MAP: Record<string, string> = {
|
||||
excludeTools: 'tools.exclude',
|
||||
excludeMCPServers: 'mcp.excluded',
|
||||
excludedProjectEnvVars: 'advanced.excludedEnvVars',
|
||||
extensionManagement: 'experimental.extensionManagement',
|
||||
extensions: 'extensions',
|
||||
fileFiltering: 'context.fileFiltering',
|
||||
folderTrustFeature: 'security.folderTrust.featureEnabled',
|
||||
@@ -812,31 +810,6 @@ export function loadSettings(
|
||||
);
|
||||
}
|
||||
|
||||
export function migrateDeprecatedSettings(
|
||||
loadedSettings: LoadedSettings,
|
||||
workspaceDir: string = process.cwd(),
|
||||
): void {
|
||||
const processScope = (scope: SettingScope) => {
|
||||
const settings = loadedSettings.forScope(scope).settings;
|
||||
if (settings.extensions?.disabled) {
|
||||
console.log(
|
||||
`Migrating deprecated extensions.disabled settings from ${scope} settings...`,
|
||||
);
|
||||
for (const extension of settings.extensions.disabled ?? []) {
|
||||
disableExtension(extension, scope, workspaceDir);
|
||||
}
|
||||
|
||||
const newExtensionsValue = { ...settings.extensions };
|
||||
newExtensionsValue.disabled = undefined;
|
||||
|
||||
loadedSettings.setValue(scope, 'extensions', newExtensionsValue);
|
||||
}
|
||||
};
|
||||
|
||||
processScope(SettingScope.User);
|
||||
processScope(SettingScope.Workspace);
|
||||
}
|
||||
|
||||
export function saveSettings(settingsFile: SettingsFile): void {
|
||||
try {
|
||||
// Ensure the directory exists
|
||||
|
||||
@@ -1192,15 +1192,6 @@ const SETTINGS_SCHEMA = {
|
||||
description: 'Setting to enable experimental features',
|
||||
showInDialog: false,
|
||||
properties: {
|
||||
extensionManagement: {
|
||||
type: 'boolean',
|
||||
label: 'Extension Management',
|
||||
category: 'Experimental',
|
||||
requiresRestart: true,
|
||||
default: true,
|
||||
description: 'Enable extension management features.',
|
||||
showInDialog: false,
|
||||
},
|
||||
visionModelPreview: {
|
||||
type: 'boolean',
|
||||
label: 'Vision Model Preview',
|
||||
@@ -1223,39 +1214,6 @@ const SETTINGS_SCHEMA = {
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
extensions: {
|
||||
type: 'object',
|
||||
label: 'Extensions',
|
||||
category: 'Extensions',
|
||||
requiresRestart: true,
|
||||
default: {},
|
||||
description: 'Settings for extensions.',
|
||||
showInDialog: false,
|
||||
properties: {
|
||||
disabled: {
|
||||
type: 'array',
|
||||
label: 'Disabled Extensions',
|
||||
category: 'Extensions',
|
||||
requiresRestart: true,
|
||||
default: [] as string[],
|
||||
description: 'List of disabled extensions.',
|
||||
showInDialog: false,
|
||||
mergeStrategy: MergeStrategy.UNION,
|
||||
},
|
||||
workspacesWithMigrationNudge: {
|
||||
type: 'array',
|
||||
label: 'Workspaces with Migration Nudge',
|
||||
category: 'Extensions',
|
||||
requiresRestart: false,
|
||||
default: [] as string[],
|
||||
description:
|
||||
'List of workspaces for which the migration nudge has been shown.',
|
||||
showInDialog: false,
|
||||
mergeStrategy: MergeStrategy.UNION,
|
||||
},
|
||||
},
|
||||
},
|
||||
} as const satisfies SettingsSchema;
|
||||
|
||||
export type SettingsSchemaType = typeof SETTINGS_SCHEMA;
|
||||
|
||||
@@ -15,6 +15,7 @@ import { type LoadedSettings, SettingScope } from '../config/settings.js';
|
||||
import { performInitialAuth } from './auth.js';
|
||||
import { validateTheme } from './theme.js';
|
||||
import { initializeI18n } from '../i18n/index.js';
|
||||
import { initializeLlmOutputLanguage } from '../ui/commands/languageCommand.js';
|
||||
|
||||
export interface InitializationResult {
|
||||
authError: string | null;
|
||||
@@ -41,6 +42,9 @@ export async function initializeApp(
|
||||
'auto';
|
||||
await initializeI18n(languageSetting);
|
||||
|
||||
// Auto-detect and set LLM output language on first use
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
const authType = settings.merged.security?.auth?.selectedType;
|
||||
const authError = await performInitialAuth(config, authType);
|
||||
|
||||
|
||||
@@ -262,7 +262,6 @@ describe('gemini.tsx main function', () => {
|
||||
);
|
||||
const { loadSettings } = await import('./config/settings.js');
|
||||
const cleanupModule = await import('./utils/cleanup.js');
|
||||
const extensionModule = await import('./config/extension.js');
|
||||
const validatorModule = await import('./validateNonInterActiveAuth.js');
|
||||
const streamJsonModule = await import('./nonInteractive/session.js');
|
||||
const initializerModule = await import('./core/initializer.js');
|
||||
@@ -275,11 +274,6 @@ describe('gemini.tsx main function', () => {
|
||||
vi.mocked(cleanupModule.registerCleanup).mockImplementation(() => {});
|
||||
const runExitCleanupMock = vi.mocked(cleanupModule.runExitCleanup);
|
||||
runExitCleanupMock.mockResolvedValue(undefined);
|
||||
vi.spyOn(extensionModule, 'loadExtensions').mockReturnValue([]);
|
||||
vi.spyOn(
|
||||
extensionModule.ExtensionStorage,
|
||||
'getUserExtensionsDir',
|
||||
).mockReturnValue('/tmp/extensions');
|
||||
vi.spyOn(initializerModule, 'initializeApp').mockResolvedValue({
|
||||
authError: null,
|
||||
themeError: null,
|
||||
@@ -461,6 +455,7 @@ describe('gemini.tsx main function kitty protocol', () => {
|
||||
allowedMcpServerNames: undefined,
|
||||
allowedTools: undefined,
|
||||
experimentalAcp: undefined,
|
||||
experimentalSkills: undefined,
|
||||
extensions: undefined,
|
||||
listExtensions: undefined,
|
||||
openaiLogging: undefined,
|
||||
|
||||
@@ -4,13 +4,8 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
AuthType,
|
||||
getOauthClient,
|
||||
InputFormat,
|
||||
logUserPrompt,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { Config, AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { InputFormat, logUserPrompt } from '@qwen-code/qwen-code-core';
|
||||
import { render } from 'ink';
|
||||
import dns from 'node:dns';
|
||||
import os from 'node:os';
|
||||
@@ -20,9 +15,8 @@ import React from 'react';
|
||||
import { validateAuthMethod } from './config/auth.js';
|
||||
import * as cliConfig from './config/config.js';
|
||||
import { loadCliConfig, parseArguments } from './config/config.js';
|
||||
import { ExtensionStorage, loadExtensions } from './config/extension.js';
|
||||
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
|
||||
import { loadSettings, migrateDeprecatedSettings } from './config/settings.js';
|
||||
import { loadSettings } from './config/settings.js';
|
||||
import {
|
||||
initializeApp,
|
||||
type InitializationResult,
|
||||
@@ -108,7 +102,6 @@ function getNodeMemoryArgs(isDebugMode: boolean): string[] {
|
||||
return [];
|
||||
}
|
||||
|
||||
import { ExtensionEnablementManager } from './config/extensions/extensionEnablement.js';
|
||||
import { loadSandboxConfig } from './config/sandboxConfig.js';
|
||||
import { runAcpAgent } from './acp-integration/acpAgent.js';
|
||||
|
||||
@@ -205,10 +198,9 @@ export async function startInteractiveUI(
|
||||
export async function main() {
|
||||
setupUnhandledRejectionHandler();
|
||||
const settings = loadSettings();
|
||||
migrateDeprecatedSettings(settings);
|
||||
await cleanupCheckpoints();
|
||||
|
||||
let argv = await parseArguments(settings.merged);
|
||||
let argv = await parseArguments();
|
||||
|
||||
// Check for invalid input combinations early to prevent crashes
|
||||
if (argv.promptInteractive && !process.stdin.isTTY) {
|
||||
@@ -250,9 +242,9 @@ export async function main() {
|
||||
if (sandboxConfig) {
|
||||
const partialConfig = await loadCliConfig(
|
||||
settings.merged,
|
||||
[],
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
argv,
|
||||
undefined,
|
||||
[],
|
||||
);
|
||||
|
||||
if (
|
||||
@@ -336,25 +328,21 @@ export async function main() {
|
||||
// to run Gemini CLI. It is now safe to perform expensive initialization that
|
||||
// may have side effects.
|
||||
{
|
||||
const extensionEnablementManager = new ExtensionEnablementManager(
|
||||
ExtensionStorage.getUserExtensionsDir(),
|
||||
argv.extensions,
|
||||
);
|
||||
const extensions = loadExtensions(extensionEnablementManager);
|
||||
const config = await loadCliConfig(
|
||||
settings.merged,
|
||||
extensions,
|
||||
extensionEnablementManager,
|
||||
argv,
|
||||
process.cwd(),
|
||||
argv.extensions,
|
||||
);
|
||||
|
||||
if (config.getListExtensions()) {
|
||||
console.log('Installed extensions:');
|
||||
for (const extension of extensions) {
|
||||
console.log(`- ${extension.config.name}`);
|
||||
}
|
||||
process.exit(0);
|
||||
}
|
||||
// FIXME: list extensions after the config initialize
|
||||
// if (config.getListExtensions()) {
|
||||
// console.log('Installed extensions:');
|
||||
// for (const extension of extensions) {
|
||||
// console.log(`- ${extension.config.name}`);
|
||||
// }
|
||||
// process.exit(0);
|
||||
// }
|
||||
|
||||
// Setup unified ConsolePatcher based on interactive mode
|
||||
const isInteractive = config.isInteractive();
|
||||
@@ -399,17 +387,8 @@ export async function main() {
|
||||
initializationResult = await initializeApp(config, settings);
|
||||
}
|
||||
|
||||
if (
|
||||
settings.merged.security?.auth?.selectedType ===
|
||||
AuthType.LOGIN_WITH_GOOGLE &&
|
||||
config.isBrowserLaunchSuppressed()
|
||||
) {
|
||||
// Do oauth before app renders to make copying the link possible.
|
||||
await getOauthClient(settings.merged.security.auth.selectedType, config);
|
||||
}
|
||||
|
||||
if (config.getExperimentalZedIntegration()) {
|
||||
return runAcpAgent(config, settings, extensions, argv);
|
||||
return runAcpAgent(config, settings, argv);
|
||||
}
|
||||
|
||||
let input = config.getQuestion();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* Copyright 2025 Qwen team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
@@ -8,15 +8,21 @@ import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { fileURLToPath, pathToFileURL } from 'node:url';
|
||||
import { homedir } from 'node:os';
|
||||
import {
|
||||
type SupportedLanguage,
|
||||
getLanguageNameFromLocale,
|
||||
} from './languages.js';
|
||||
|
||||
export type SupportedLanguage = 'en' | 'zh' | 'ru' | string; // Allow custom language codes
|
||||
export type { SupportedLanguage };
|
||||
export { getLanguageNameFromLocale };
|
||||
|
||||
// State
|
||||
let currentLanguage: SupportedLanguage = 'en';
|
||||
let translations: Record<string, string> = {};
|
||||
let translations: Record<string, string | string[]> = {};
|
||||
|
||||
// Cache
|
||||
type TranslationDict = Record<string, string>;
|
||||
type TranslationValue = string | string[];
|
||||
type TranslationDict = Record<string, TranslationValue>;
|
||||
const translationCache: Record<string, TranslationDict> = {};
|
||||
const loadingPromises: Record<string, Promise<TranslationDict>> = {};
|
||||
|
||||
@@ -52,11 +58,13 @@ export function detectSystemLanguage(): SupportedLanguage {
|
||||
if (envLang?.startsWith('zh')) return 'zh';
|
||||
if (envLang?.startsWith('en')) return 'en';
|
||||
if (envLang?.startsWith('ru')) return 'ru';
|
||||
if (envLang?.startsWith('de')) return 'de';
|
||||
|
||||
try {
|
||||
const locale = Intl.DateTimeFormat().resolvedOptions().locale;
|
||||
if (locale.startsWith('zh')) return 'zh';
|
||||
if (locale.startsWith('ru')) return 'ru';
|
||||
if (locale.startsWith('de')) return 'de';
|
||||
} catch {
|
||||
// Fallback to default
|
||||
}
|
||||
@@ -224,9 +232,25 @@ export function getCurrentLanguage(): SupportedLanguage {
|
||||
|
||||
export function t(key: string, params?: Record<string, string>): string {
|
||||
const translation = translations[key] ?? key;
|
||||
if (Array.isArray(translation)) {
|
||||
return key;
|
||||
}
|
||||
return interpolate(translation, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a translation that is an array of strings.
|
||||
* @param key The translation key
|
||||
* @returns The array of strings, or an empty array if not found or not an array
|
||||
*/
|
||||
export function ta(key: string): string[] {
|
||||
const translation = translations[key];
|
||||
if (Array.isArray(translation)) {
|
||||
return translation;
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
export async function initializeI18n(
|
||||
lang?: SupportedLanguage | 'auto',
|
||||
): Promise<void> {
|
||||
|
||||
48
packages/cli/src/i18n/languages.ts
Normal file
48
packages/cli/src/i18n/languages.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export type SupportedLanguage = 'en' | 'zh' | 'ru' | 'de' | string;
|
||||
|
||||
export interface LanguageDefinition {
|
||||
/** The internal locale code used by the i18n system (e.g., 'en', 'zh'). */
|
||||
code: SupportedLanguage;
|
||||
/** The standard name used in UI settings (e.g., 'en-US', 'zh-CN'). */
|
||||
id: string;
|
||||
/** The full English name of the language (e.g., 'English', 'Chinese'). */
|
||||
fullName: string;
|
||||
}
|
||||
|
||||
export const SUPPORTED_LANGUAGES: readonly LanguageDefinition[] = [
|
||||
{
|
||||
code: 'en',
|
||||
id: 'en-US',
|
||||
fullName: 'English',
|
||||
},
|
||||
{
|
||||
code: 'zh',
|
||||
id: 'zh-CN',
|
||||
fullName: 'Chinese',
|
||||
},
|
||||
{
|
||||
code: 'ru',
|
||||
id: 'ru-RU',
|
||||
fullName: 'Russian',
|
||||
},
|
||||
{
|
||||
code: 'de',
|
||||
id: 'de-DE',
|
||||
fullName: 'German',
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Maps a locale code to its English language name.
|
||||
* Used for LLM output language instructions.
|
||||
*/
|
||||
export function getLanguageNameFromLocale(locale: SupportedLanguage): string {
|
||||
const lang = SUPPORTED_LANGUAGES.find((l) => l.code === locale);
|
||||
return lang?.fullName || 'English';
|
||||
}
|
||||
@@ -102,8 +102,8 @@ export default {
|
||||
'Theme "{{themeName}}" not found.': 'Theme "{{themeName}}" not found.',
|
||||
'Theme "{{themeName}}" not found in selected scope.':
|
||||
'Theme "{{themeName}}" not found in selected scope.',
|
||||
'clear the screen and conversation history':
|
||||
'clear the screen and conversation history',
|
||||
'Clear conversation history and free up context':
|
||||
'Clear conversation history and free up context',
|
||||
'Compresses the context by replacing it with a summary.':
|
||||
'Compresses the context by replacing it with a summary.',
|
||||
'open full Qwen Code documentation in your browser':
|
||||
@@ -258,6 +258,8 @@ export default {
|
||||
', Tab to change focus': ', Tab to change focus',
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.',
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.':
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.',
|
||||
// ============================================================================
|
||||
// Settings Labels
|
||||
// ============================================================================
|
||||
@@ -590,6 +592,12 @@ export default {
|
||||
'No conversation found to summarize.': 'No conversation found to summarize.',
|
||||
'Failed to generate project context summary: {{error}}':
|
||||
'Failed to generate project context summary: {{error}}',
|
||||
'Saved project summary to {{filePathForDisplay}}.':
|
||||
'Saved project summary to {{filePathForDisplay}}.',
|
||||
'Saving project summary...': 'Saving project summary...',
|
||||
'Generating project summary...': 'Generating project summary...',
|
||||
'Failed to generate summary - no text content received from LLM response':
|
||||
'Failed to generate summary - no text content received from LLM response',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Model
|
||||
@@ -604,9 +612,10 @@ export default {
|
||||
// ============================================================================
|
||||
// Commands - Clear
|
||||
// ============================================================================
|
||||
'Clearing terminal and resetting chat.':
|
||||
'Clearing terminal and resetting chat.',
|
||||
'Clearing terminal.': 'Clearing terminal.',
|
||||
'Starting a new session, resetting chat, and clearing terminal.':
|
||||
'Starting a new session, resetting chat, and clearing terminal.',
|
||||
'Starting a new session and clearing.':
|
||||
'Starting a new session and clearing.',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Compress
|
||||
@@ -927,192 +936,138 @@ export default {
|
||||
// ============================================================================
|
||||
'Waiting for user confirmation...': 'Waiting for user confirmation...',
|
||||
'(esc to cancel, {{time}})': '(esc to cancel, {{time}})',
|
||||
"I'm Feeling Lucky": "I'm Feeling Lucky",
|
||||
'Shipping awesomeness... ': 'Shipping awesomeness... ',
|
||||
'Painting the serifs back on...': 'Painting the serifs back on...',
|
||||
'Navigating the slime mold...': 'Navigating the slime mold...',
|
||||
'Consulting the digital spirits...': 'Consulting the digital spirits...',
|
||||
'Reticulating splines...': 'Reticulating splines...',
|
||||
'Warming up the AI hamsters...': 'Warming up the AI hamsters...',
|
||||
'Asking the magic conch shell...': 'Asking the magic conch shell...',
|
||||
'Generating witty retort...': 'Generating witty retort...',
|
||||
'Polishing the algorithms...': 'Polishing the algorithms...',
|
||||
"Don't rush perfection (or my code)...":
|
||||
|
||||
// ============================================================================
|
||||
// Loading Phrases
|
||||
// ============================================================================
|
||||
WITTY_LOADING_PHRASES: [
|
||||
"I'm Feeling Lucky",
|
||||
'Shipping awesomeness... ',
|
||||
'Painting the serifs back on...',
|
||||
'Navigating the slime mold...',
|
||||
'Consulting the digital spirits...',
|
||||
'Reticulating splines...',
|
||||
'Warming up the AI hamsters...',
|
||||
'Asking the magic conch shell...',
|
||||
'Generating witty retort...',
|
||||
'Polishing the algorithms...',
|
||||
"Don't rush perfection (or my code)...",
|
||||
'Brewing fresh bytes...': 'Brewing fresh bytes...',
|
||||
'Counting electrons...': 'Counting electrons...',
|
||||
'Engaging cognitive processors...': 'Engaging cognitive processors...',
|
||||
'Checking for syntax errors in the universe...':
|
||||
'Brewing fresh bytes...',
|
||||
'Counting electrons...',
|
||||
'Engaging cognitive processors...',
|
||||
'Checking for syntax errors in the universe...',
|
||||
'One moment, optimizing humor...': 'One moment, optimizing humor...',
|
||||
'Shuffling punchlines...': 'Shuffling punchlines...',
|
||||
'Untangling neural nets...': 'Untangling neural nets...',
|
||||
'Compiling brilliance...': 'Compiling brilliance...',
|
||||
'Loading wit.exe...': 'Loading wit.exe...',
|
||||
'Summoning the cloud of wisdom...': 'Summoning the cloud of wisdom...',
|
||||
'Preparing a witty response...': 'Preparing a witty response...',
|
||||
"Just a sec, I'm debugging reality...":
|
||||
'One moment, optimizing humor...',
|
||||
'Shuffling punchlines...',
|
||||
'Untangling neural nets...',
|
||||
'Compiling brilliance...',
|
||||
'Loading wit.exe...',
|
||||
'Summoning the cloud of wisdom...',
|
||||
'Preparing a witty response...',
|
||||
"Just a sec, I'm debugging reality...",
|
||||
'Confuzzling the options...': 'Confuzzling the options...',
|
||||
'Tuning the cosmic frequencies...': 'Tuning the cosmic frequencies...',
|
||||
'Crafting a response worthy of your patience...':
|
||||
'Confuzzling the options...',
|
||||
'Tuning the cosmic frequencies...',
|
||||
'Crafting a response worthy of your patience...',
|
||||
'Compiling the 1s and 0s...': 'Compiling the 1s and 0s...',
|
||||
'Resolving dependencies... and existential crises...':
|
||||
'Compiling the 1s and 0s...',
|
||||
'Resolving dependencies... and existential crises...',
|
||||
'Defragmenting memories... both RAM and personal...':
|
||||
'Defragmenting memories... both RAM and personal...',
|
||||
'Rebooting the humor module...': 'Rebooting the humor module...',
|
||||
'Caching the essentials (mostly cat memes)...':
|
||||
'Rebooting the humor module...',
|
||||
'Caching the essentials (mostly cat memes)...',
|
||||
'Optimizing for ludicrous speed': 'Optimizing for ludicrous speed',
|
||||
"Swapping bits... don't tell the bytes...":
|
||||
'Optimizing for ludicrous speed',
|
||||
"Swapping bits... don't tell the bytes...",
|
||||
'Garbage collecting... be right back...':
|
||||
'Garbage collecting... be right back...',
|
||||
'Assembling the interwebs...': 'Assembling the interwebs...',
|
||||
'Converting coffee into code...': 'Converting coffee into code...',
|
||||
'Updating the syntax for reality...': 'Updating the syntax for reality...',
|
||||
'Rewiring the synapses...': 'Rewiring the synapses...',
|
||||
'Looking for a misplaced semicolon...':
|
||||
'Assembling the interwebs...',
|
||||
'Converting coffee into code...',
|
||||
'Updating the syntax for reality...',
|
||||
'Rewiring the synapses...',
|
||||
'Looking for a misplaced semicolon...',
|
||||
"Greasin' the cogs of the machine...": "Greasin' the cogs of the machine...",
|
||||
'Pre-heating the servers...': 'Pre-heating the servers...',
|
||||
'Calibrating the flux capacitor...': 'Calibrating the flux capacitor...',
|
||||
'Engaging the improbability drive...': 'Engaging the improbability drive...',
|
||||
'Channeling the Force...': 'Channeling the Force...',
|
||||
'Aligning the stars for optimal response...':
|
||||
"Greasin' the cogs of the machine...",
|
||||
'Pre-heating the servers...',
|
||||
'Calibrating the flux capacitor...',
|
||||
'Engaging the improbability drive...',
|
||||
'Channeling the Force...',
|
||||
'Aligning the stars for optimal response...',
|
||||
'So say we all...': 'So say we all...',
|
||||
'Loading the next great idea...': 'Loading the next great idea...',
|
||||
"Just a moment, I'm in the zone...": "Just a moment, I'm in the zone...",
|
||||
'Preparing to dazzle you with brilliance...':
|
||||
'So say we all...',
|
||||
'Loading the next great idea...',
|
||||
"Just a moment, I'm in the zone...",
|
||||
'Preparing to dazzle you with brilliance...',
|
||||
"Just a tick, I'm polishing my wit...":
|
||||
"Just a tick, I'm polishing my wit...",
|
||||
"Hold tight, I'm crafting a masterpiece...":
|
||||
"Hold tight, I'm crafting a masterpiece...",
|
||||
"Just a jiffy, I'm debugging the universe...":
|
||||
"Just a jiffy, I'm debugging the universe...",
|
||||
"Just a moment, I'm aligning the pixels...":
|
||||
"Just a moment, I'm aligning the pixels...",
|
||||
"Just a sec, I'm optimizing the humor...":
|
||||
"Just a sec, I'm optimizing the humor...",
|
||||
"Just a moment, I'm tuning the algorithms...":
|
||||
"Just a moment, I'm tuning the algorithms...",
|
||||
'Warp speed engaged...': 'Warp speed engaged...',
|
||||
'Mining for more Dilithium crystals...':
|
||||
'Warp speed engaged...',
|
||||
'Mining for more Dilithium crystals...',
|
||||
"Don't panic...": "Don't panic...",
|
||||
'Following the white rabbit...': 'Following the white rabbit...',
|
||||
'The truth is in here... somewhere...':
|
||||
"Don't panic...",
|
||||
'Following the white rabbit...',
|
||||
'The truth is in here... somewhere...',
|
||||
'Blowing on the cartridge...': 'Blowing on the cartridge...',
|
||||
'Loading... Do a barrel roll!': 'Loading... Do a barrel roll!',
|
||||
'Waiting for the respawn...': 'Waiting for the respawn...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...':
|
||||
'Blowing on the cartridge...',
|
||||
'Loading... Do a barrel roll!',
|
||||
'Waiting for the respawn...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...',
|
||||
"The cake is not a lie, it's just still loading...":
|
||||
"The cake is not a lie, it's just still loading...",
|
||||
'Fiddling with the character creation screen...':
|
||||
'Fiddling with the character creation screen...',
|
||||
"Just a moment, I'm finding the right meme...":
|
||||
"Just a moment, I'm finding the right meme...",
|
||||
"Pressing 'A' to continue...": "Pressing 'A' to continue...",
|
||||
'Herding digital cats...': 'Herding digital cats...',
|
||||
'Polishing the pixels...': 'Polishing the pixels...',
|
||||
'Finding a suitable loading screen pun...':
|
||||
"Pressing 'A' to continue...",
|
||||
'Herding digital cats...',
|
||||
'Polishing the pixels...',
|
||||
'Finding a suitable loading screen pun...',
|
||||
'Distracting you with this witty phrase...':
|
||||
'Distracting you with this witty phrase...',
|
||||
'Almost there... probably...': 'Almost there... probably...',
|
||||
'Our hamsters are working as fast as they can...':
|
||||
'Almost there... probably...',
|
||||
'Our hamsters are working as fast as they can...',
|
||||
'Giving Cloudy a pat on the head...': 'Giving Cloudy a pat on the head...',
|
||||
'Petting the cat...': 'Petting the cat...',
|
||||
'Rickrolling my boss...': 'Rickrolling my boss...',
|
||||
'Never gonna give you up, never gonna let you down...':
|
||||
'Giving Cloudy a pat on the head...',
|
||||
'Petting the cat...',
|
||||
'Rickrolling my boss...',
|
||||
'Never gonna give you up, never gonna let you down...',
|
||||
'Slapping the bass...': 'Slapping the bass...',
|
||||
'Tasting the snozberries...': 'Tasting the snozberries...',
|
||||
"I'm going the distance, I'm going for speed...":
|
||||
'Slapping the bass...',
|
||||
'Tasting the snozberries...',
|
||||
"I'm going the distance, I'm going for speed...",
|
||||
'Is this the real life? Is this just fantasy?...':
|
||||
'Is this the real life? Is this just fantasy?...',
|
||||
"I've got a good feeling about this...":
|
||||
"I've got a good feeling about this...",
|
||||
'Poking the bear...': 'Poking the bear...',
|
||||
'Doing research on the latest memes...':
|
||||
'Poking the bear...',
|
||||
'Doing research on the latest memes...',
|
||||
'Figuring out how to make this more witty...':
|
||||
'Figuring out how to make this more witty...',
|
||||
'Hmmm... let me think...': 'Hmmm... let me think...',
|
||||
'What do you call a fish with no eyes? A fsh...':
|
||||
'Hmmm... let me think...',
|
||||
'What do you call a fish with no eyes? A fsh...',
|
||||
'Why did the computer go to therapy? It had too many bytes...':
|
||||
'Why did the computer go to therapy? It had too many bytes...',
|
||||
"Why don't programmers like nature? It has too many bugs...":
|
||||
"Why don't programmers like nature? It has too many bugs...",
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...':
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...',
|
||||
'Why did the developer go broke? Because they used up all their cache...':
|
||||
'Why did the developer go broke? Because they used up all their cache...',
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...":
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...",
|
||||
'Applying percussive maintenance...': 'Applying percussive maintenance...',
|
||||
'Searching for the correct USB orientation...':
|
||||
'Applying percussive maintenance...',
|
||||
'Searching for the correct USB orientation...',
|
||||
'Ensuring the magic smoke stays inside the wires...':
|
||||
'Ensuring the magic smoke stays inside the wires...',
|
||||
'Rewriting in Rust for no particular reason...':
|
||||
'Rewriting in Rust for no particular reason...',
|
||||
'Trying to exit Vim...': 'Trying to exit Vim...',
|
||||
'Spinning up the hamster wheel...': 'Spinning up the hamster wheel...',
|
||||
"That's not a bug, it's an undocumented feature...":
|
||||
'Trying to exit Vim...',
|
||||
'Spinning up the hamster wheel...',
|
||||
"That's not a bug, it's an undocumented feature...",
|
||||
'Engage.': 'Engage.',
|
||||
"I'll be back... with an answer.": "I'll be back... with an answer.",
|
||||
'My other process is a TARDIS...': 'My other process is a TARDIS...',
|
||||
'Communing with the machine spirit...':
|
||||
'Engage.',
|
||||
"I'll be back... with an answer.",
|
||||
'My other process is a TARDIS...',
|
||||
'Communing with the machine spirit...',
|
||||
'Letting the thoughts marinate...': 'Letting the thoughts marinate...',
|
||||
'Just remembered where I put my keys...':
|
||||
'Letting the thoughts marinate...',
|
||||
'Just remembered where I put my keys...',
|
||||
'Pondering the orb...': 'Pondering the orb...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
|
||||
'Pondering the orb...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
|
||||
'Initiating thoughtful gaze...': 'Initiating thoughtful gaze...',
|
||||
"What's a computer's favorite snack? Microchips.":
|
||||
'Initiating thoughtful gaze...',
|
||||
"What's a computer's favorite snack? Microchips.",
|
||||
"Why do Java developers wear glasses? Because they don't C#.":
|
||||
"Why do Java developers wear glasses? Because they don't C#.",
|
||||
'Charging the laser... pew pew!': 'Charging the laser... pew pew!',
|
||||
'Dividing by zero... just kidding!': 'Dividing by zero... just kidding!',
|
||||
'Looking for an adult superviso... I mean, processing.':
|
||||
'Charging the laser... pew pew!',
|
||||
'Dividing by zero... just kidding!',
|
||||
'Looking for an adult superviso... I mean, processing.',
|
||||
'Making it go beep boop.': 'Making it go beep boop.',
|
||||
'Buffering... because even AIs need a moment.':
|
||||
'Making it go beep boop.',
|
||||
'Buffering... because even AIs need a moment.',
|
||||
'Entangling quantum particles for a faster response...':
|
||||
'Entangling quantum particles for a faster response...',
|
||||
'Polishing the chrome... on the algorithms.':
|
||||
'Polishing the chrome... on the algorithms.',
|
||||
'Are you not entertained? (Working on it!)':
|
||||
'Are you not entertained? (Working on it!)',
|
||||
'Summoning the code gremlins... to help, of course.':
|
||||
'Summoning the code gremlins... to help, of course.',
|
||||
'Just waiting for the dial-up tone to finish...':
|
||||
'Just waiting for the dial-up tone to finish...',
|
||||
'Recalibrating the humor-o-meter.': 'Recalibrating the humor-o-meter.',
|
||||
'My other loading screen is even funnier.':
|
||||
'Recalibrating the humor-o-meter.',
|
||||
'My other loading screen is even funnier.',
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...":
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...",
|
||||
'Enhancing... Enhancing... Still loading.':
|
||||
'Enhancing... Enhancing... Still loading.',
|
||||
"It's not a bug, it's a feature... of this loading screen.":
|
||||
"It's not a bug, it's a feature... of this loading screen.",
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)':
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)',
|
||||
'Constructing additional pylons...': 'Constructing additional pylons...',
|
||||
'Constructing additional pylons...',
|
||||
],
|
||||
};
|
||||
|
||||
@@ -103,8 +103,8 @@ export default {
|
||||
'Theme "{{themeName}}" not found.': 'Тема "{{themeName}}" не найдена.',
|
||||
'Theme "{{themeName}}" not found in selected scope.':
|
||||
'Тема "{{themeName}}" не найдена в выбранной области.',
|
||||
'clear the screen and conversation history':
|
||||
'Очистка экрана и истории диалога',
|
||||
'Clear conversation history and free up context':
|
||||
'Очистить историю диалога и освободить контекст',
|
||||
'Compresses the context by replacing it with a summary.':
|
||||
'Сжатие контекста заменой на краткую сводку',
|
||||
'open full Qwen Code documentation in your browser':
|
||||
@@ -260,7 +260,8 @@ export default {
|
||||
', Tab to change focus': ', Tab для смены фокуса',
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
|
||||
'Для применения изменений необходимо перезапустить Qwen Code. Нажмите r для выхода и применения изменений.',
|
||||
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.':
|
||||
'Команда "/{{command}}" не поддерживается в неинтерактивном режиме.',
|
||||
// ============================================================================
|
||||
// Метки настроек
|
||||
// ============================================================================
|
||||
@@ -313,6 +314,7 @@ export default {
|
||||
'Tool Output Truncation Lines': 'Лимит строк вывода инструментов',
|
||||
'Folder Trust': 'Доверие к папке',
|
||||
'Vision Model Preview': 'Визуальная модель (предпросмотр)',
|
||||
'Tool Schema Compliance': 'Соответствие схеме инструмента',
|
||||
// Варианты перечислений настроек
|
||||
'Auto (detect from system)': 'Авто (определить из системы)',
|
||||
Text: 'Текст',
|
||||
@@ -341,8 +343,8 @@ export default {
|
||||
'Установка предпочитаемого внешнего редактора',
|
||||
'Manage extensions': 'Управление расширениями',
|
||||
'List active extensions': 'Показать активные расширения',
|
||||
'Update extensions. Usage: update |--all':
|
||||
'Обновить расширения. Использование: update |--all',
|
||||
'Update extensions. Usage: update <extension-names>|--all':
|
||||
'Обновить расширения. Использование: update <extension-names>|--all',
|
||||
'manage IDE integration': 'Управление интеграцией с IDE',
|
||||
'check status of IDE integration': 'Проверить статус интеграции с IDE',
|
||||
'install required IDE companion for {{ideName}}':
|
||||
@@ -400,7 +402,8 @@ export default {
|
||||
'Set LLM output language': 'Установка языка вывода LLM',
|
||||
'Usage: /language ui [zh-CN|en-US]':
|
||||
'Использование: /language ui [zh-CN|en-US|ru-RU]',
|
||||
'Usage: /language output ': 'Использование: /language output ',
|
||||
'Usage: /language output <language>':
|
||||
'Использование: /language output <language>',
|
||||
'Example: /language output 中文': 'Пример: /language output 中文',
|
||||
'Example: /language output English': 'Пример: /language output English',
|
||||
'Example: /language output 日本語': 'Пример: /language output 日本語',
|
||||
@@ -417,9 +420,8 @@ export default {
|
||||
'To request additional UI language packs, please open an issue on GitHub.':
|
||||
'Для запроса дополнительных языковых пакетов интерфейса, пожалуйста, создайте обращение на GitHub.',
|
||||
'Available options:': 'Доступные варианты:',
|
||||
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
|
||||
' - en-US: English': ' - en-US: Английский',
|
||||
' - ru-RU: Russian': ' - ru-RU: Русский',
|
||||
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
|
||||
' - en-US: English': ' - en-US: Английский',
|
||||
'Set UI language to Simplified Chinese (zh-CN)':
|
||||
'Установить язык интерфейса на упрощенный китайский (zh-CN)',
|
||||
'Set UI language to English (en-US)':
|
||||
@@ -435,8 +437,8 @@ export default {
|
||||
'Режим подтверждения изменен на: {{mode}}',
|
||||
'Approval mode changed to: {{mode}} (saved to {{scope}} settings{{location}})':
|
||||
'Режим подтверждения изменен на: {{mode}} (сохранено в настройках {{scope}}{{location}})',
|
||||
'Usage: /approval-mode [--session|--user|--project]':
|
||||
'Использование: /approval-mode [--session|--user|--project]',
|
||||
'Usage: /approval-mode <mode> [--session|--user|--project]':
|
||||
'Использование: /approval-mode <mode> [--session|--user|--project]',
|
||||
'Scope subcommands do not accept additional arguments.':
|
||||
'Подкоманды области не принимают дополнительных аргументов.',
|
||||
'Plan mode - Analyze only, do not modify files or execute commands':
|
||||
@@ -588,8 +590,8 @@ export default {
|
||||
'Ошибка при экспорте диалога: {{error}}',
|
||||
'Conversation shared to {{filePath}}': 'Диалог экспортирован в {{filePath}}',
|
||||
'No conversation found to share.': 'Нет диалога для экспорта.',
|
||||
'Share the current conversation to a markdown or json file. Usage: /chat share <путь-к-файлу>':
|
||||
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <путь-к-файлу>',
|
||||
'Share the current conversation to a markdown or json file. Usage: /chat share <file>':
|
||||
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <файл>',
|
||||
|
||||
// ============================================================================
|
||||
// Команды - Резюме
|
||||
@@ -604,6 +606,12 @@ export default {
|
||||
'Не найдено диалогов для создания сводки.',
|
||||
'Failed to generate project context summary: {{error}}':
|
||||
'Не удалось сгенерировать сводку контекста проекта: {{error}}',
|
||||
'Saved project summary to {{filePathForDisplay}}.':
|
||||
'Сводка проекта сохранена в {{filePathForDisplay}}',
|
||||
'Saving project summary...': 'Сохранение сводки проекта...',
|
||||
'Generating project summary...': 'Генерация сводки проекта...',
|
||||
'Failed to generate summary - no text content received from LLM response':
|
||||
'Не удалось сгенерировать сводку - не получен текстовый контент из ответа LLM',
|
||||
|
||||
// ============================================================================
|
||||
// Команды - Модель
|
||||
@@ -618,8 +626,9 @@ export default {
|
||||
// ============================================================================
|
||||
// Команды - Очистка
|
||||
// ============================================================================
|
||||
'Clearing terminal and resetting chat.': 'Очистка терминала и сброс чата.',
|
||||
'Clearing terminal.': 'Очистка терминала.',
|
||||
'Starting a new session, resetting chat, and clearing terminal.':
|
||||
'Начало новой сессии, сброс чата и очистка терминала.',
|
||||
'Starting a new session and clearing.': 'Начало новой сессии и очистка.',
|
||||
|
||||
// ============================================================================
|
||||
// Команды - Сжатие
|
||||
@@ -650,8 +659,8 @@ export default {
|
||||
'Команда /directory add не поддерживается в ограничительных профилях песочницы. Пожалуйста, используйте --include-directories при запуске сессии.',
|
||||
"Error adding '{{path}}': {{error}}":
|
||||
"Ошибка при добавлении '{{path}}': {{error}}",
|
||||
'Successfully added GEMINI.md files from the following directories if there are:\n- {{directories}}':
|
||||
'Успешно добавлены файлы GEMINI.md из следующих директорий (если они есть):\n- {{directories}}',
|
||||
'Successfully added QWEN.md files from the following directories if there are:\n- {{directories}}':
|
||||
'Успешно добавлены файлы QWEN.md из следующих директорий (если они есть):\n- {{directories}}',
|
||||
'Error refreshing memory: {{error}}':
|
||||
'Ошибка при обновлении памяти: {{error}}',
|
||||
'Successfully added directories:\n- {{directories}}':
|
||||
@@ -884,6 +893,7 @@ export default {
|
||||
// Экран выхода / Статистика
|
||||
// ============================================================================
|
||||
'Agent powering down. Goodbye!': 'Агент завершает работу. До свидания!',
|
||||
'To continue this session, run': 'Для продолжения этой сессии, выполните',
|
||||
'Interaction Summary': 'Сводка взаимодействия',
|
||||
'Session ID:': 'ID сессии:',
|
||||
'Tool Calls:': 'Вызовы инструментов:',
|
||||
@@ -943,179 +953,140 @@ export default {
|
||||
'Waiting for user confirmation...':
|
||||
'Ожидание подтверждения от пользователя...',
|
||||
'(esc to cancel, {{time}})': '(esc для отмены, {{time}})',
|
||||
"I'm Feeling Lucky": 'Мне повезёт!',
|
||||
'Shipping awesomeness... ': 'Доставляем крутизну... ',
|
||||
'Painting the serifs back on...': 'Рисуем засечки на буквах...',
|
||||
'Navigating the slime mold...': 'Пробираемся через слизевиков..',
|
||||
'Consulting the digital spirits...': 'Советуемся с цифровыми духами...',
|
||||
'Reticulating splines...': 'Сглаживание сплайнов...',
|
||||
'Warming up the AI hamsters...': 'Разогреваем ИИ-хомячков...',
|
||||
'Asking the magic conch shell...': 'Спрашиваем волшебную ракушку...',
|
||||
'Generating witty retort...': 'Генерируем остроумный ответ...',
|
||||
'Polishing the algorithms...': 'Полируем алгоритмы...',
|
||||
"Don't rush perfection (or my code)...":
|
||||
|
||||
// ============================================================================
|
||||
|
||||
// ============================================================================
|
||||
// Loading Phrases
|
||||
// ============================================================================
|
||||
WITTY_LOADING_PHRASES: [
|
||||
'Мне повезёт!',
|
||||
'Доставляем крутизну... ',
|
||||
'Рисуем засечки на буквах...',
|
||||
'Пробираемся через слизевиков..',
|
||||
'Советуемся с цифровыми духами...',
|
||||
'Сглаживание сплайнов...',
|
||||
'Разогреваем ИИ-хомячков...',
|
||||
'Спрашиваем волшебную ракушку...',
|
||||
'Генерируем остроумный ответ...',
|
||||
'Полируем алгоритмы...',
|
||||
'Не торопите совершенство (или мой код)...',
|
||||
'Brewing fresh bytes...': 'Завариваем свежие байты...',
|
||||
'Counting electrons...': 'Пересчитываем электроны...',
|
||||
'Engaging cognitive processors...': 'Задействуем когнитивные процессоры...',
|
||||
'Checking for syntax errors in the universe...':
|
||||
'Завариваем свежие байты...',
|
||||
'Пересчитываем электроны...',
|
||||
'Задействуем когнитивные процессоры...',
|
||||
'Ищем синтаксические ошибки во вселенной...',
|
||||
'One moment, optimizing humor...': 'Секундочку, оптимизируем юмор...',
|
||||
'Shuffling punchlines...': 'Перетасовываем панчлайны...',
|
||||
'Untangling neural nets...': 'Распутаваем нейросети...',
|
||||
'Compiling brilliance...': 'Компилируем гениальность...',
|
||||
'Loading wit.exe...': 'Загружаем yumor.exe...',
|
||||
'Summoning the cloud of wisdom...': 'Призываем облако мудрости...',
|
||||
'Preparing a witty response...': 'Готовим остроумный ответ...',
|
||||
"Just a sec, I'm debugging reality...": 'Секунду, идёт отладка реальности...',
|
||||
'Confuzzling the options...': 'Запутываем варианты...',
|
||||
'Tuning the cosmic frequencies...': 'Настраиваем космические частоты...',
|
||||
'Crafting a response worthy of your patience...':
|
||||
'Секундочку, оптимизируем юмор...',
|
||||
'Перетасовываем панчлайны...',
|
||||
'Распутаваем нейросети...',
|
||||
'Компилируем гениальность...',
|
||||
'Загружаем yumor.exe...',
|
||||
'Призываем облако мудрости...',
|
||||
'Готовим остроумный ответ...',
|
||||
'Секунду, идёт отладка реальности...',
|
||||
'Запутываем варианты...',
|
||||
'Настраиваем космические частоты...',
|
||||
'Создаем ответ, достойный вашего терпения...',
|
||||
'Compiling the 1s and 0s...': 'Компилируем единички и нолики...',
|
||||
'Resolving dependencies... and existential crises...':
|
||||
'Компилируем единички и нолики...',
|
||||
'Разрешаем зависимости... и экзистенциальные кризисы...',
|
||||
'Defragmenting memories... both RAM and personal...':
|
||||
'Дефрагментация памяти... и оперативной, и личной...',
|
||||
'Rebooting the humor module...': 'Перезагрузка модуля юмора...',
|
||||
'Caching the essentials (mostly cat memes)...':
|
||||
'Перезагрузка модуля юмора...',
|
||||
'Кэшируем самое важное (в основном мемы с котиками)...',
|
||||
'Optimizing for ludicrous speed': 'Оптимизация для безумной скорости',
|
||||
"Swapping bits... don't tell the bytes...":
|
||||
'Оптимизация для безумной скорости',
|
||||
'Меняем биты... только байтам не говорите...',
|
||||
'Garbage collecting... be right back...': 'Сборка мусора... скоро вернусь...',
|
||||
'Assembling the interwebs...': 'Сборка интернетов...',
|
||||
'Converting coffee into code...': 'Превращаем кофе в код...',
|
||||
'Updating the syntax for reality...': 'Обновляем синтаксис реальности...',
|
||||
'Rewiring the synapses...': 'Переподключаем синапсы...',
|
||||
'Looking for a misplaced semicolon...': 'Ищем лишнюю точку с запятой...',
|
||||
"Greasin' the cogs of the machine...": 'Смазываем шестерёнки машины...',
|
||||
'Pre-heating the servers...': 'Разогреваем серверы...',
|
||||
'Calibrating the flux capacitor...': 'Калибруем потоковый накопитель...',
|
||||
'Engaging the improbability drive...': 'Включаем двигатель невероятности...',
|
||||
'Channeling the Force...': 'Направляем Силу...',
|
||||
'Aligning the stars for optimal response...':
|
||||
'Сборка мусора... скоро вернусь...',
|
||||
'Сборка интернетов...',
|
||||
'Превращаем кофе в код...',
|
||||
'Обновляем синтаксис реальности...',
|
||||
'Переподключаем синапсы...',
|
||||
'Ищем лишнюю точку с запятой...',
|
||||
'Смазываем шестерёнки машины...',
|
||||
'Разогреваем серверы...',
|
||||
'Калибруем потоковый накопитель...',
|
||||
'Включаем двигатель невероятности...',
|
||||
'Направляем Силу...',
|
||||
'Выравниваем звёзды для оптимального ответа...',
|
||||
'So say we all...': 'Так скажем мы все...',
|
||||
'Loading the next great idea...': 'Загрузка следующей великой идеи...',
|
||||
"Just a moment, I'm in the zone...": 'Минутку, я в потоке...',
|
||||
'Preparing to dazzle you with brilliance...':
|
||||
'Так скажем мы все...',
|
||||
'Загрузка следующей великой идеи...',
|
||||
'Минутку, я в потоке...',
|
||||
'Готовлюсь ослепить вас гениальностью...',
|
||||
"Just a tick, I'm polishing my wit...": 'Секунду, полирую остроумие...',
|
||||
"Hold tight, I'm crafting a masterpiece...": 'Держитесь, создаю шедевр...',
|
||||
"Just a jiffy, I'm debugging the universe...":
|
||||
'Секунду, полирую остроумие...',
|
||||
'Держитесь, создаю шедевр...',
|
||||
'Мигом, отлаживаю вселенную...',
|
||||
"Just a moment, I'm aligning the pixels...": 'Момент, выравниваю пиксели...',
|
||||
"Just a sec, I'm optimizing the humor...": 'Секунду, оптимизирую юмор...',
|
||||
"Just a moment, I'm tuning the algorithms...":
|
||||
'Момент, выравниваю пиксели...',
|
||||
'Секунду, оптимизирую юмор...',
|
||||
'Момент, настраиваю алгоритмы...',
|
||||
'Warp speed engaged...': 'Варп-скорость включена...',
|
||||
'Mining for more Dilithium crystals...': 'Добываем кристаллы дилития...',
|
||||
"Don't panic...": 'Без паники...',
|
||||
'Following the white rabbit...': 'Следуем за белым кроликом...',
|
||||
'The truth is in here... somewhere...': 'Истина где-то здесь... внутри...',
|
||||
'Blowing on the cartridge...': 'Продуваем картридж...',
|
||||
'Loading... Do a barrel roll!': 'Загрузка... Сделай бочку!',
|
||||
'Waiting for the respawn...': 'Ждем респауна...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...':
|
||||
'Варп-прыжок активирован...',
|
||||
'Добываем кристаллы дилития...',
|
||||
'Без паники...',
|
||||
'Следуем за белым кроликом...',
|
||||
'Истина где-то здесь... внутри...',
|
||||
'Продуваем картридж...',
|
||||
'Загрузка... Сделай бочку!',
|
||||
'Ждем респауна...',
|
||||
'Делаем Дугу Кесселя менее чем за 12 парсеков...',
|
||||
"The cake is not a lie, it's just still loading...":
|
||||
'Тортик — не ложь, он просто ещё грузится...',
|
||||
'Fiddling with the character creation screen...':
|
||||
'Возимся с экраном создания персонажа...',
|
||||
"Just a moment, I'm finding the right meme...":
|
||||
'Минутку, ищу подходящий мем...',
|
||||
"Pressing 'A' to continue...": "Нажимаем 'A' для продолжения...",
|
||||
'Herding digital cats...': 'Пасём цифровых котов...',
|
||||
'Polishing the pixels...': 'Полируем пиксели...',
|
||||
'Finding a suitable loading screen pun...':
|
||||
"Нажимаем 'A' для продолжения...",
|
||||
'Пасём цифровых котов...',
|
||||
'Полируем пиксели...',
|
||||
'Ищем подходящий каламбур для экрана загрузки...',
|
||||
'Distracting you with this witty phrase...':
|
||||
'Отвлекаем вас этой остроумной фразой...',
|
||||
'Almost there... probably...': 'Почти готово... вроде...',
|
||||
'Our hamsters are working as fast as they can...':
|
||||
'Почти готово... вроде...',
|
||||
'Наши хомячки работают изо всех сил...',
|
||||
'Giving Cloudy a pat on the head...': 'Гладим Облачко по голове...',
|
||||
'Petting the cat...': 'Гладим кота...',
|
||||
'Rickrolling my boss...': 'Рикроллим начальника...',
|
||||
'Never gonna give you up, never gonna let you down...':
|
||||
'Гладим Облачко по голове...',
|
||||
'Гладим кота...',
|
||||
'Рикроллим начальника...',
|
||||
'Never gonna give you up, never gonna let you down...',
|
||||
'Slapping the bass...': 'Лабаем бас-гитару...',
|
||||
'Tasting the snozberries...': 'Пробуем снузберри на вкус...',
|
||||
"I'm going the distance, I'm going for speed...":
|
||||
'Лабаем бас-гитару...',
|
||||
'Пробуем снузберри на вкус...',
|
||||
'Иду до конца, иду на скорость...',
|
||||
'Is this the real life? Is this just fantasy?...':
|
||||
'Is this the real life? Is this just fantasy?...',
|
||||
"I've got a good feeling about this...": 'У меня хорошее предчувствие...',
|
||||
'Poking the bear...': 'Дразним медведя... (Не лезь...)',
|
||||
'Doing research on the latest memes...': 'Изучаем свежие мемы...',
|
||||
'Figuring out how to make this more witty...':
|
||||
'У меня хорошее предчувствие...',
|
||||
'Дразним медведя... (Не лезь...)',
|
||||
'Изучаем свежие мемы...',
|
||||
'Думаем, как сделать это остроумнее...',
|
||||
'Hmmm... let me think...': 'Хмм... дайте подумать...',
|
||||
'What do you call a fish with no eyes? A fsh...':
|
||||
'Хмм... дайте подумать...',
|
||||
'Как называется бумеранг, который не возвращается? Палка...',
|
||||
'Why did the computer go to therapy? It had too many bytes...':
|
||||
'Почему компьютер простудился? Потому что оставил окна открытыми...',
|
||||
"Why don't programmers like nature? It has too many bugs...":
|
||||
'Почему программисты не любят гулять на улице? Там среда не настроена...',
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...':
|
||||
'Почему программисты предпочитают тёмную тему? Потому что в темноте не видно багов...',
|
||||
'Why did the developer go broke? Because they used up all their cache...':
|
||||
'Почему разработчик разорился? Потому что потратил весь свой кэш...',
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...":
|
||||
'Что можно делать со сломанным карандашом? Ничего — он тупой...',
|
||||
'Applying percussive maintenance...': 'Провожу настройку методом тыка...',
|
||||
'Searching for the correct USB orientation...':
|
||||
'Провожу настройку методом тыка...',
|
||||
'Ищем, какой стороной вставлять флешку...',
|
||||
'Ensuring the magic smoke stays inside the wires...':
|
||||
'Следим, чтобы волшебный дым не вышел из проводов...',
|
||||
'Rewriting in Rust for no particular reason...':
|
||||
'Переписываем всё на Rust без особой причины...',
|
||||
'Trying to exit Vim...': 'Пытаемся выйти из Vim...',
|
||||
'Spinning up the hamster wheel...': 'Раскручиваем колесо для хомяка...',
|
||||
"That's not a bug, it's an undocumented feature...": 'Это не баг, а фича...',
|
||||
'Engage.': 'Поехали!',
|
||||
"I'll be back... with an answer.": 'Я вернусь... с ответом.',
|
||||
'My other process is a TARDIS...': 'Мой другой процесс — это ТАРДИС...',
|
||||
'Communing with the machine spirit...': 'Общаемся с духом машины...',
|
||||
'Letting the thoughts marinate...': 'Даем мыслям замариноваться...',
|
||||
'Just remembered where I put my keys...':
|
||||
'Пытаемся выйти из Vim...',
|
||||
'Раскручиваем колесо для хомяка...',
|
||||
'Это не баг, а фича...',
|
||||
'Поехали!',
|
||||
'Я вернусь... с ответом.',
|
||||
'Мой другой процесс — это ТАРДИС...',
|
||||
'Общаемся с духом машины...',
|
||||
'Даем мыслям замариноваться...',
|
||||
'Только что вспомнил, куда положил ключи...',
|
||||
'Pondering the orb...': 'Размышляю над сферой...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
|
||||
'Я видел такое, во что вы, люди, просто не поверите... например, пользователя, читающего сообщения загрузки.',
|
||||
'Initiating thoughtful gaze...': 'Инициируем задумчивый взгляд...',
|
||||
"What's a computer's favorite snack? Microchips.":
|
||||
'Размышляю над сферой...',
|
||||
'Я видел такое, что вам, людям, и не снилось... пользователя, читающего эти сообщения.',
|
||||
'Инициируем задумчивый взгляд...',
|
||||
'Что сервер заказывает в баре? Пинг-коладу.',
|
||||
"Why do Java developers wear glasses? Because they don't C#.":
|
||||
'Почему Java-разработчики не убираются дома? Они ждут сборщик мусора...',
|
||||
'Charging the laser... pew pew!': 'Заряжаем лазер... пиу-пиу!',
|
||||
'Dividing by zero... just kidding!': 'Делим на ноль... шучу!',
|
||||
'Looking for an adult superviso... I mean, processing.':
|
||||
'Заряжаем лазер... пиу-пиу!',
|
||||
'Делим на ноль... шучу!',
|
||||
'Ищу взрослых для присмот... в смысле, обрабатываю.',
|
||||
'Making it go beep boop.': 'Делаем бип-буп.',
|
||||
'Buffering... because even AIs need a moment.':
|
||||
'Буферизация... даже ИИ нужно мгновение.',
|
||||
'Entangling quantum particles for a faster response...':
|
||||
'Делаем бип-буп.',
|
||||
'Буферизация... даже ИИ нужно время подумать.',
|
||||
'Запутываем квантовые частицы для быстрого ответа...',
|
||||
'Polishing the chrome... on the algorithms.':
|
||||
'Полируем хром... на алгоритмах.',
|
||||
'Are you not entertained? (Working on it!)':
|
||||
'Вы ещё не развлеклись?! Разве вы не за этим сюда пришли?!',
|
||||
'Summoning the code gremlins... to help, of course.':
|
||||
'Призываем гремлинов кода... для помощи, конечно же.',
|
||||
'Just waiting for the dial-up tone to finish...':
|
||||
'Ждем, пока закончится звук dial-up модема...',
|
||||
'Recalibrating the humor-o-meter.': 'Перекалибровка юморометра.',
|
||||
'My other loading screen is even funnier.':
|
||||
'Перекалибровка юморометра.',
|
||||
'Мой другой экран загрузки ещё смешнее.',
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...":
|
||||
'Кажется, где-то по клавиатуре гуляет кот...',
|
||||
'Enhancing... Enhancing... Still loading.':
|
||||
'Улучшаем... Ещё улучшаем... Всё ещё грузится.',
|
||||
"It's not a bug, it's a feature... of this loading screen.":
|
||||
'Это не баг, это фича... экрана загрузки.',
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)':
|
||||
'Пробовали выключить и включить снова? (Экран загрузки, не меня!)',
|
||||
'Constructing additional pylons...': 'Нужно построить больше пилонов...',
|
||||
'Нужно построить больше пилонов...',
|
||||
],
|
||||
};
|
||||
|
||||
@@ -101,7 +101,7 @@ export default {
|
||||
'Theme "{{themeName}}" not found.': '未找到主题 "{{themeName}}"。',
|
||||
'Theme "{{themeName}}" not found in selected scope.':
|
||||
'在所选作用域中未找到主题 "{{themeName}}"。',
|
||||
'clear the screen and conversation history': '清屏并清除对话历史',
|
||||
'Clear conversation history and free up context': '清除对话历史并释放上下文',
|
||||
'Compresses the context by replacing it with a summary.':
|
||||
'通过用摘要替换来压缩上下文',
|
||||
'open full Qwen Code documentation in your browser':
|
||||
@@ -249,6 +249,8 @@ export default {
|
||||
', Tab to change focus': ',Tab 切换焦点',
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
|
||||
'要查看更改,必须重启 Qwen Code。按 r 退出并立即应用更改。',
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.':
|
||||
'不支持在非交互模式下使用命令 "/{{command}}"。',
|
||||
// ============================================================================
|
||||
// Settings Labels
|
||||
// ============================================================================
|
||||
@@ -560,6 +562,12 @@ export default {
|
||||
'No conversation found to summarize.': '未找到要总结的对话',
|
||||
'Failed to generate project context summary: {{error}}':
|
||||
'生成项目上下文摘要失败:{{error}}',
|
||||
'Saved project summary to {{filePathForDisplay}}.':
|
||||
'项目摘要已保存到 {{filePathForDisplay}}',
|
||||
'Saving project summary...': '正在保存项目摘要...',
|
||||
'Generating project summary...': '正在生成项目摘要...',
|
||||
'Failed to generate summary - no text content received from LLM response':
|
||||
'生成摘要失败 - 未从 LLM 响应中接收到文本内容',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Model
|
||||
@@ -573,8 +581,9 @@ export default {
|
||||
// ============================================================================
|
||||
// Commands - Clear
|
||||
// ============================================================================
|
||||
'Clearing terminal and resetting chat.': '正在清屏并重置聊天',
|
||||
'Clearing terminal.': '正在清屏',
|
||||
'Starting a new session, resetting chat, and clearing terminal.':
|
||||
'正在开始新会话,重置聊天并清屏。',
|
||||
'Starting a new session and clearing.': '正在开始新会话并清屏。',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Compress
|
||||
@@ -880,165 +889,39 @@ export default {
|
||||
// ============================================================================
|
||||
'Waiting for user confirmation...': '等待用户确认...',
|
||||
'(esc to cancel, {{time}})': '(按 esc 取消,{{time}})',
|
||||
"I'm Feeling Lucky": '我感觉很幸运',
|
||||
'Shipping awesomeness... ': '正在运送精彩内容... ',
|
||||
'Painting the serifs back on...': '正在重新绘制衬线...',
|
||||
'Navigating the slime mold...': '正在导航粘液霉菌...',
|
||||
'Consulting the digital spirits...': '正在咨询数字精灵...',
|
||||
'Reticulating splines...': '正在网格化样条曲线...',
|
||||
'Warming up the AI hamsters...': '正在预热 AI 仓鼠...',
|
||||
'Asking the magic conch shell...': '正在询问魔法海螺壳...',
|
||||
'Generating witty retort...': '正在生成机智的反驳...',
|
||||
'Polishing the algorithms...': '正在打磨算法...',
|
||||
"Don't rush perfection (or my code)...": '不要急于追求完美(或我的代码)...',
|
||||
'Brewing fresh bytes...': '正在酿造新鲜字节...',
|
||||
'Counting electrons...': '正在计算电子...',
|
||||
'Engaging cognitive processors...': '正在启动认知处理器...',
|
||||
'Checking for syntax errors in the universe...':
|
||||
'正在检查宇宙中的语法错误...',
|
||||
'One moment, optimizing humor...': '稍等片刻,正在优化幽默感...',
|
||||
'Shuffling punchlines...': '正在洗牌笑点...',
|
||||
'Untangling neural nets...': '正在解开神经网络...',
|
||||
'Compiling brilliance...': '正在编译智慧...',
|
||||
'Loading wit.exe...': '正在加载 wit.exe...',
|
||||
'Summoning the cloud of wisdom...': '正在召唤智慧云...',
|
||||
'Preparing a witty response...': '正在准备机智的回复...',
|
||||
"Just a sec, I'm debugging reality...": '稍等片刻,我正在调试现实...',
|
||||
'Confuzzling the options...': '正在混淆选项...',
|
||||
'Tuning the cosmic frequencies...': '正在调谐宇宙频率...',
|
||||
'Crafting a response worthy of your patience...':
|
||||
'正在制作值得您耐心等待的回复...',
|
||||
'Compiling the 1s and 0s...': '正在编译 1 和 0...',
|
||||
'Resolving dependencies... and existential crises...':
|
||||
'正在解决依赖关系...和存在主义危机...',
|
||||
'Defragmenting memories... both RAM and personal...':
|
||||
'正在整理记忆碎片...包括 RAM 和个人记忆...',
|
||||
'Rebooting the humor module...': '正在重启幽默模块...',
|
||||
'Caching the essentials (mostly cat memes)...':
|
||||
'正在缓存必需品(主要是猫咪表情包)...',
|
||||
'Optimizing for ludicrous speed': '正在优化到荒谬的速度',
|
||||
"Swapping bits... don't tell the bytes...": '正在交换位...不要告诉字节...',
|
||||
'Garbage collecting... be right back...': '正在垃圾回收...马上回来...',
|
||||
'Assembling the interwebs...': '正在组装互联网...',
|
||||
'Converting coffee into code...': '正在将咖啡转换为代码...',
|
||||
'Updating the syntax for reality...': '正在更新现实的语法...',
|
||||
'Rewiring the synapses...': '正在重新连接突触...',
|
||||
'Looking for a misplaced semicolon...': '正在寻找放错位置的分号...',
|
||||
"Greasin' the cogs of the machine...": '正在给机器的齿轮上油...',
|
||||
'Pre-heating the servers...': '正在预热服务器...',
|
||||
'Calibrating the flux capacitor...': '正在校准通量电容器...',
|
||||
'Engaging the improbability drive...': '正在启动不可能性驱动器...',
|
||||
'Channeling the Force...': '正在引导原力...',
|
||||
'Aligning the stars for optimal response...': '正在对齐星星以获得最佳回复...',
|
||||
'So say we all...': '我们都说...',
|
||||
'Loading the next great idea...': '正在加载下一个伟大的想法...',
|
||||
"Just a moment, I'm in the zone...": '稍等片刻,我正进入状态...',
|
||||
'Preparing to dazzle you with brilliance...': '正在准备用智慧让您眼花缭乱...',
|
||||
"Just a tick, I'm polishing my wit...": '稍等片刻,我正在打磨我的智慧...',
|
||||
"Hold tight, I'm crafting a masterpiece...": '请稍等,我正在制作杰作...',
|
||||
"Just a jiffy, I'm debugging the universe...": '稍等片刻,我正在调试宇宙...',
|
||||
"Just a moment, I'm aligning the pixels...": '稍等片刻,我正在对齐像素...',
|
||||
"Just a sec, I'm optimizing the humor...": '稍等片刻,我正在优化幽默感...',
|
||||
"Just a moment, I'm tuning the algorithms...": '稍等片刻,我正在调整算法...',
|
||||
'Warp speed engaged...': '曲速已启动...',
|
||||
'Mining for more Dilithium crystals...': '正在挖掘更多二锂晶体...',
|
||||
"Don't panic...": '不要惊慌...',
|
||||
'Following the white rabbit...': '正在跟随白兔...',
|
||||
'The truth is in here... somewhere...': '真相在这里...某个地方...',
|
||||
'Blowing on the cartridge...': '正在吹卡带...',
|
||||
'Loading... Do a barrel roll!': '正在加载...做个桶滚!',
|
||||
'Waiting for the respawn...': '等待重生...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...':
|
||||
'正在以不到 12 秒差距完成凯塞尔航线...',
|
||||
"The cake is not a lie, it's just still loading...":
|
||||
'蛋糕不是谎言,只是还在加载...',
|
||||
'Fiddling with the character creation screen...': '正在摆弄角色创建界面...',
|
||||
"Just a moment, I'm finding the right meme...":
|
||||
'稍等片刻,我正在寻找合适的表情包...',
|
||||
"Pressing 'A' to continue...": "按 'A' 继续...",
|
||||
'Herding digital cats...': '正在放牧数字猫...',
|
||||
'Polishing the pixels...': '正在打磨像素...',
|
||||
'Finding a suitable loading screen pun...': '正在寻找合适的加载屏幕双关语...',
|
||||
'Distracting you with this witty phrase...':
|
||||
'正在用这个机智的短语分散您的注意力...',
|
||||
'Almost there... probably...': '快到了...可能...',
|
||||
'Our hamsters are working as fast as they can...':
|
||||
'我们的仓鼠正在尽可能快地工作...',
|
||||
'Giving Cloudy a pat on the head...': '正在拍拍 Cloudy 的头...',
|
||||
'Petting the cat...': '正在抚摸猫咪...',
|
||||
'Rickrolling my boss...': '正在 Rickroll 我的老板...',
|
||||
'Never gonna give you up, never gonna let you down...':
|
||||
'永远不会放弃你,永远不会让你失望...',
|
||||
'Slapping the bass...': '正在拍打低音...',
|
||||
'Tasting the snozberries...': '正在品尝 snozberries...',
|
||||
"I'm going the distance, I'm going for speed...":
|
||||
'我要走得更远,我要追求速度...',
|
||||
'Is this the real life? Is this just fantasy?...':
|
||||
'这是真实的生活吗?还是只是幻想?...',
|
||||
"I've got a good feeling about this...": '我对这个感觉很好...',
|
||||
'Poking the bear...': '正在戳熊...',
|
||||
'Doing research on the latest memes...': '正在研究最新的表情包...',
|
||||
'Figuring out how to make this more witty...': '正在想办法让这更有趣...',
|
||||
'Hmmm... let me think...': '嗯...让我想想...',
|
||||
'What do you call a fish with no eyes? A fsh...':
|
||||
'没有眼睛的鱼叫什么?一条鱼...',
|
||||
'Why did the computer go to therapy? It had too many bytes...':
|
||||
'为什么电脑去看心理医生?因为它有太多字节...',
|
||||
"Why don't programmers like nature? It has too many bugs...":
|
||||
'为什么程序员不喜欢大自然?因为虫子太多了...',
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...':
|
||||
'为什么程序员喜欢暗色模式?因为光会吸引虫子...',
|
||||
'Why did the developer go broke? Because they used up all their cache...':
|
||||
'为什么开发者破产了?因为他们用完了所有缓存...',
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...":
|
||||
'你能用断了的铅笔做什么?什么都不能,因为它没有笔尖...',
|
||||
'Applying percussive maintenance...': '正在应用敲击维护...',
|
||||
'Searching for the correct USB orientation...': '正在寻找正确的 USB 方向...',
|
||||
'Ensuring the magic smoke stays inside the wires...':
|
||||
'确保魔法烟雾留在电线内...',
|
||||
'Rewriting in Rust for no particular reason...':
|
||||
'正在用 Rust 重写,没有特别的原因...',
|
||||
'Trying to exit Vim...': '正在尝试退出 Vim...',
|
||||
'Spinning up the hamster wheel...': '正在启动仓鼠轮...',
|
||||
"That's not a bug, it's an undocumented feature...":
|
||||
'这不是一个错误,这是一个未记录的功能...',
|
||||
'Engage.': '启动。',
|
||||
"I'll be back... with an answer.": '我会回来的...带着答案。',
|
||||
'My other process is a TARDIS...': '我的另一个进程是 TARDIS...',
|
||||
'Communing with the machine spirit...': '正在与机器精神交流...',
|
||||
'Letting the thoughts marinate...': '让想法慢慢酝酿...',
|
||||
'Just remembered where I put my keys...': '刚刚想起我把钥匙放在哪里了...',
|
||||
'Pondering the orb...': '正在思考球体...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
|
||||
'我见过你们不会相信的事情...比如一个阅读加载消息的用户。',
|
||||
'Initiating thoughtful gaze...': '正在启动深思凝视...',
|
||||
"What's a computer's favorite snack? Microchips.":
|
||||
'电脑最喜欢的零食是什么?微芯片。',
|
||||
"Why do Java developers wear glasses? Because they don't C#.":
|
||||
'为什么 Java 开发者戴眼镜?因为他们不会 C#。',
|
||||
'Charging the laser... pew pew!': '正在给激光充电...砰砰!',
|
||||
'Dividing by zero... just kidding!': '除以零...只是开玩笑!',
|
||||
'Looking for an adult superviso... I mean, processing.':
|
||||
'正在寻找成人监督...我是说,处理中。',
|
||||
'Making it go beep boop.': '让它发出哔哔声。',
|
||||
'Buffering... because even AIs need a moment.':
|
||||
'正在缓冲...因为即使是 AI 也需要片刻。',
|
||||
'Entangling quantum particles for a faster response...':
|
||||
'正在纠缠量子粒子以获得更快的回复...',
|
||||
'Polishing the chrome... on the algorithms.': '正在打磨铬...在算法上。',
|
||||
'Are you not entertained? (Working on it!)': '你不觉得有趣吗?(正在努力!)',
|
||||
'Summoning the code gremlins... to help, of course.':
|
||||
'正在召唤代码小精灵...当然是来帮忙的。',
|
||||
'Just waiting for the dial-up tone to finish...': '只是等待拨号音结束...',
|
||||
'Recalibrating the humor-o-meter.': '正在重新校准幽默计。',
|
||||
'My other loading screen is even funnier.': '我的另一个加载屏幕更有趣。',
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...":
|
||||
'很确定有只猫在某个地方键盘上走...',
|
||||
'Enhancing... Enhancing... Still loading.':
|
||||
'正在增强...正在增强...仍在加载。',
|
||||
"It's not a bug, it's a feature... of this loading screen.":
|
||||
'这不是一个错误,这是一个功能...这个加载屏幕的功能。',
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)':
|
||||
'你试过把它关掉再打开吗?(加载屏幕,不是我。)',
|
||||
'Constructing additional pylons...': '正在建造额外的能量塔...',
|
||||
WITTY_LOADING_PHRASES: [
|
||||
// --- 职场搬砖系列 ---
|
||||
'正在努力搬砖,请稍候...',
|
||||
'老板在身后,快加载啊!',
|
||||
'头发掉光前,一定能加载完...',
|
||||
'服务器正在深呼吸,准备放大招...',
|
||||
'正在向服务器投喂咖啡...',
|
||||
|
||||
// --- 大厂黑话系列 ---
|
||||
'正在赋能全链路,寻找关键抓手...',
|
||||
'正在降本增效,优化加载路径...',
|
||||
'正在打破部门壁垒,沉淀方法论...',
|
||||
'正在拥抱变化,迭代核心价值...',
|
||||
'正在对齐颗粒度,打磨底层逻辑...',
|
||||
'大力出奇迹,正在强行加载...',
|
||||
|
||||
// --- 程序员自嘲系列 ---
|
||||
'只要我不写代码,代码就没有 Bug...',
|
||||
'正在把 Bug 转化为 Feature...',
|
||||
'只要我不尴尬,Bug 就追不上我...',
|
||||
'正在试图理解去年的自己写了什么...',
|
||||
'正在猿力觉醒中,请耐心等待...',
|
||||
|
||||
// --- 合作愉快系列 ---
|
||||
'正在询问产品经理:这需求是真的吗?',
|
||||
'正在给产品经理画饼,请稍等...',
|
||||
|
||||
// --- 温暖治愈系列 ---
|
||||
'每一行代码,都在努力让世界变得更好一点点...',
|
||||
'每一个伟大的想法,都值得这份耐心的等待...',
|
||||
'别急,美好的事物总是需要一点时间去酝酿...',
|
||||
'愿你的代码永无 Bug,愿你的梦想终将成真...',
|
||||
'哪怕只有 0.1% 的进度,也是在向目标靠近...',
|
||||
'加载的是字节,承载的是对技术的热爱...',
|
||||
],
|
||||
};
|
||||
|
||||
@@ -20,8 +20,7 @@ import type {
|
||||
CLIControlSetModelRequest,
|
||||
CLIMcpServerConfig,
|
||||
} from '../../types.js';
|
||||
import { CommandService } from '../../../services/CommandService.js';
|
||||
import { BuiltinCommandLoader } from '../../../services/BuiltinCommandLoader.js';
|
||||
import { getAvailableCommands } from '../../../nonInteractiveCliCommands.js';
|
||||
import {
|
||||
MCPServerConfig,
|
||||
AuthProviderType,
|
||||
@@ -407,7 +406,7 @@ export class SystemController extends BaseController {
|
||||
}
|
||||
|
||||
/**
|
||||
* Load slash command names using CommandService
|
||||
* Load slash command names using getAvailableCommands
|
||||
*
|
||||
* @param signal - AbortSignal to respect for cancellation
|
||||
* @returns Promise resolving to array of slash command names
|
||||
@@ -418,21 +417,14 @@ export class SystemController extends BaseController {
|
||||
}
|
||||
|
||||
try {
|
||||
const service = await CommandService.create(
|
||||
[new BuiltinCommandLoader(this.context.config)],
|
||||
signal,
|
||||
);
|
||||
const commands = await getAvailableCommands(this.context.config, signal);
|
||||
|
||||
if (signal.aborted) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const names = new Set<string>();
|
||||
const commands = service.getCommands();
|
||||
for (const command of commands) {
|
||||
names.add(command.name);
|
||||
}
|
||||
return Array.from(names).sort();
|
||||
// Extract command names and sort
|
||||
return commands.map((cmd) => cmd.name).sort();
|
||||
} catch (error) {
|
||||
// Check if the error is due to abort
|
||||
if (signal.aborted) {
|
||||
|
||||
@@ -610,8 +610,6 @@ export abstract class BaseJsonOutputAdapter {
|
||||
const errorText = parseAndFormatApiError(
|
||||
event.value.error,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
this.config.getModel(),
|
||||
);
|
||||
this.appendText(state, errorText, null);
|
||||
break;
|
||||
|
||||
@@ -68,6 +68,7 @@ describe('runNonInteractive', () => {
|
||||
let mockShutdownTelemetry: Mock;
|
||||
let consoleErrorSpy: MockInstance;
|
||||
let processStdoutSpy: MockInstance;
|
||||
let processStderrSpy: MockInstance;
|
||||
let mockGeminiClient: {
|
||||
sendMessageStream: Mock;
|
||||
getChatRecordingService: Mock;
|
||||
@@ -86,6 +87,9 @@ describe('runNonInteractive', () => {
|
||||
processStdoutSpy = vi
|
||||
.spyOn(process.stdout, 'write')
|
||||
.mockImplementation(() => true);
|
||||
processStderrSpy = vi
|
||||
.spyOn(process.stderr, 'write')
|
||||
.mockImplementation(() => true);
|
||||
vi.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit(${code}) called`);
|
||||
});
|
||||
@@ -139,6 +143,8 @@ describe('runNonInteractive', () => {
|
||||
setModel: vi.fn(async (model: string) => {
|
||||
currentModel = model;
|
||||
}),
|
||||
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(false),
|
||||
} as unknown as Config;
|
||||
|
||||
mockSettings = {
|
||||
@@ -852,7 +858,7 @@ describe('runNonInteractive', () => {
|
||||
expect(processStdoutSpy).toHaveBeenCalledWith('Response from command');
|
||||
});
|
||||
|
||||
it('should throw FatalInputError if a command requires confirmation', async () => {
|
||||
it('should handle command that requires confirmation by returning early', async () => {
|
||||
const mockCommand = {
|
||||
name: 'confirm',
|
||||
description: 'a command that needs confirmation',
|
||||
@@ -864,15 +870,16 @@ describe('runNonInteractive', () => {
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
await expect(
|
||||
runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/confirm',
|
||||
'prompt-id-confirm',
|
||||
),
|
||||
).rejects.toThrow(
|
||||
'Exiting due to a confirmation prompt requested by the command.',
|
||||
await runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/confirm',
|
||||
'prompt-id-confirm',
|
||||
);
|
||||
|
||||
// Should write error message to stderr
|
||||
expect(processStderrSpy).toHaveBeenCalledWith(
|
||||
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.\n',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -909,7 +916,30 @@ describe('runNonInteractive', () => {
|
||||
expect(processStdoutSpy).toHaveBeenCalledWith('Response to unknown');
|
||||
});
|
||||
|
||||
it('should throw for unhandled command result types', async () => {
|
||||
it('should handle known but unsupported slash commands like /help by returning early', async () => {
|
||||
// Mock a built-in command that exists but is not in the allowed list
|
||||
const mockHelpCommand = {
|
||||
name: 'help',
|
||||
description: 'Show help',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockHelpCommand]);
|
||||
|
||||
await runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/help',
|
||||
'prompt-id-help',
|
||||
);
|
||||
|
||||
// Should write error message to stderr
|
||||
expect(processStderrSpy).toHaveBeenCalledWith(
|
||||
'The command "/help" is not supported in non-interactive mode.\n',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle unhandled command result types by returning early with error', async () => {
|
||||
const mockCommand = {
|
||||
name: 'noaction',
|
||||
description: 'unhandled type',
|
||||
@@ -920,15 +950,16 @@ describe('runNonInteractive', () => {
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
await expect(
|
||||
runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/noaction',
|
||||
'prompt-id-unhandled',
|
||||
),
|
||||
).rejects.toThrow(
|
||||
'Exiting due to command result that is not supported in non-interactive mode.',
|
||||
await runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/noaction',
|
||||
'prompt-id-unhandled',
|
||||
);
|
||||
|
||||
// Should write error message to stderr
|
||||
expect(processStderrSpy).toHaveBeenCalledWith(
|
||||
'Unknown command result type: unhandled\n',
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -42,6 +42,55 @@ import {
|
||||
computeUsageFromMetrics,
|
||||
} from './utils/nonInteractiveHelpers.js';
|
||||
|
||||
/**
|
||||
* Emits a final message for slash command results.
|
||||
* Note: systemMessage should already be emitted before calling this function.
|
||||
*/
|
||||
async function emitNonInteractiveFinalMessage(params: {
|
||||
message: string;
|
||||
isError: boolean;
|
||||
adapter?: JsonOutputAdapterInterface;
|
||||
config: Config;
|
||||
startTimeMs: number;
|
||||
}): Promise<void> {
|
||||
const { message, isError, adapter, config } = params;
|
||||
|
||||
if (!adapter) {
|
||||
// Text output mode: write directly to stdout/stderr
|
||||
const target = isError ? process.stderr : process.stdout;
|
||||
target.write(`${message}\n`);
|
||||
return;
|
||||
}
|
||||
|
||||
// JSON output mode: emit assistant message and result
|
||||
// (systemMessage should already be emitted by caller)
|
||||
adapter.startAssistantMessage();
|
||||
adapter.processEvent({
|
||||
type: GeminiEventType.Content,
|
||||
value: message,
|
||||
} as unknown as Parameters<JsonOutputAdapterInterface['processEvent']>[0]);
|
||||
adapter.finalizeAssistantMessage();
|
||||
|
||||
const metrics = uiTelemetryService.getMetrics();
|
||||
const usage = computeUsageFromMetrics(metrics);
|
||||
const outputFormat = config.getOutputFormat();
|
||||
const stats =
|
||||
outputFormat === OutputFormat.JSON
|
||||
? uiTelemetryService.getMetrics()
|
||||
: undefined;
|
||||
|
||||
adapter.emitResult({
|
||||
isError,
|
||||
durationMs: Date.now() - params.startTimeMs,
|
||||
apiDurationMs: 0,
|
||||
numTurns: 0,
|
||||
errorMessage: isError ? message : undefined,
|
||||
usage,
|
||||
stats,
|
||||
summary: message,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides optional overrides for `runNonInteractive` execution.
|
||||
*
|
||||
@@ -115,6 +164,16 @@ export async function runNonInteractive(
|
||||
process.on('SIGINT', shutdownHandler);
|
||||
process.on('SIGTERM', shutdownHandler);
|
||||
|
||||
// Emit systemMessage first (always the first message in JSON mode)
|
||||
if (adapter) {
|
||||
const systemMessage = await buildSystemMessage(
|
||||
config,
|
||||
sessionId,
|
||||
permissionMode,
|
||||
);
|
||||
adapter.emitMessage(systemMessage);
|
||||
}
|
||||
|
||||
let initialPartList: PartListUnion | null = extractPartsFromUserMessage(
|
||||
options.userMessage,
|
||||
);
|
||||
@@ -128,10 +187,45 @@ export async function runNonInteractive(
|
||||
config,
|
||||
settings,
|
||||
);
|
||||
if (slashCommandResult) {
|
||||
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
|
||||
initialPartList = slashCommandResult as PartListUnion;
|
||||
slashHandled = true;
|
||||
switch (slashCommandResult.type) {
|
||||
case 'submit_prompt':
|
||||
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
|
||||
initialPartList = slashCommandResult.content;
|
||||
slashHandled = true;
|
||||
break;
|
||||
case 'message': {
|
||||
// systemMessage already emitted above
|
||||
await emitNonInteractiveFinalMessage({
|
||||
message: slashCommandResult.content,
|
||||
isError: slashCommandResult.messageType === 'error',
|
||||
adapter,
|
||||
config,
|
||||
startTimeMs: startTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
case 'stream_messages':
|
||||
throw new FatalInputError(
|
||||
'Stream messages mode is not supported in non-interactive CLI',
|
||||
);
|
||||
case 'unsupported': {
|
||||
await emitNonInteractiveFinalMessage({
|
||||
message: slashCommandResult.reason,
|
||||
isError: true,
|
||||
adapter,
|
||||
config,
|
||||
startTimeMs: startTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
case 'no_command':
|
||||
break;
|
||||
default: {
|
||||
const _exhaustive: never = slashCommandResult;
|
||||
throw new FatalInputError(
|
||||
`Unhandled slash command result type: ${(_exhaustive as { type: string }).type}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,15 +257,6 @@ export async function runNonInteractive(
|
||||
const initialParts = normalizePartList(initialPartList);
|
||||
let currentMessages: Content[] = [{ role: 'user', parts: initialParts }];
|
||||
|
||||
if (adapter) {
|
||||
const systemMessage = await buildSystemMessage(
|
||||
config,
|
||||
sessionId,
|
||||
permissionMode,
|
||||
);
|
||||
adapter.emitMessage(systemMessage);
|
||||
}
|
||||
|
||||
let isFirstTurn = true;
|
||||
while (true) {
|
||||
turnCount++;
|
||||
@@ -221,8 +306,6 @@ export async function runNonInteractive(
|
||||
const errorText = parseAndFormatApiError(
|
||||
event.value.error,
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
);
|
||||
process.stderr.write(`${errorText}\n`);
|
||||
}
|
||||
|
||||
242
packages/cli/src/nonInteractiveCliCommands.test.ts
Normal file
242
packages/cli/src/nonInteractiveCliCommands.test.ts
Normal file
@@ -0,0 +1,242 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { handleSlashCommand } from './nonInteractiveCliCommands.js';
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import type { LoadedSettings } from './config/settings.js';
|
||||
import { CommandKind } from './ui/commands/types.js';
|
||||
|
||||
// Mock the CommandService
|
||||
const mockGetCommands = vi.hoisted(() => vi.fn());
|
||||
const mockCommandServiceCreate = vi.hoisted(() => vi.fn());
|
||||
vi.mock('./services/CommandService.js', () => ({
|
||||
CommandService: {
|
||||
create: mockCommandServiceCreate,
|
||||
},
|
||||
}));
|
||||
|
||||
describe('handleSlashCommand', () => {
|
||||
let mockConfig: Config;
|
||||
let mockSettings: LoadedSettings;
|
||||
let abortController: AbortController;
|
||||
|
||||
beforeEach(() => {
|
||||
mockCommandServiceCreate.mockResolvedValue({
|
||||
getCommands: mockGetCommands,
|
||||
});
|
||||
|
||||
mockConfig = {
|
||||
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(false),
|
||||
getSessionId: vi.fn().mockReturnValue('test-session'),
|
||||
getFolderTrustFeature: vi.fn().mockReturnValue(false),
|
||||
getFolderTrust: vi.fn().mockReturnValue(false),
|
||||
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
|
||||
storage: {},
|
||||
} as unknown as Config;
|
||||
|
||||
mockSettings = {
|
||||
system: { path: '', settings: {} },
|
||||
systemDefaults: { path: '', settings: {} },
|
||||
user: { path: '', settings: {} },
|
||||
workspace: { path: '', settings: {} },
|
||||
} as LoadedSettings;
|
||||
|
||||
abortController = new AbortController();
|
||||
});
|
||||
|
||||
it('should return no_command for non-slash input', async () => {
|
||||
const result = await handleSlashCommand(
|
||||
'regular text',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('no_command');
|
||||
});
|
||||
|
||||
it('should return no_command for unknown slash commands', async () => {
|
||||
mockGetCommands.mockReturnValue([]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/unknowncommand',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('no_command');
|
||||
});
|
||||
|
||||
it('should return unsupported for known built-in commands not in allowed list', async () => {
|
||||
const mockHelpCommand = {
|
||||
name: 'help',
|
||||
description: 'Show help',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockHelpCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/help',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
[], // Empty allowed list
|
||||
);
|
||||
|
||||
expect(result.type).toBe('unsupported');
|
||||
if (result.type === 'unsupported') {
|
||||
expect(result.reason).toContain('/help');
|
||||
expect(result.reason).toContain('not supported');
|
||||
}
|
||||
});
|
||||
|
||||
it('should return unsupported for /help when using default allowed list', async () => {
|
||||
const mockHelpCommand = {
|
||||
name: 'help',
|
||||
description: 'Show help',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockHelpCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/help',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
// Default allowed list: ['init', 'summary', 'compress']
|
||||
);
|
||||
|
||||
expect(result.type).toBe('unsupported');
|
||||
if (result.type === 'unsupported') {
|
||||
expect(result.reason).toBe(
|
||||
'The command "/help" is not supported in non-interactive mode.',
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it('should execute allowed built-in commands', async () => {
|
||||
const mockInitCommand = {
|
||||
name: 'init',
|
||||
description: 'Initialize project',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn().mockResolvedValue({
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: 'Project initialized',
|
||||
}),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockInitCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/init',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
['init'], // init is in the allowed list
|
||||
);
|
||||
|
||||
expect(result.type).toBe('message');
|
||||
if (result.type === 'message') {
|
||||
expect(result.content).toBe('Project initialized');
|
||||
}
|
||||
});
|
||||
|
||||
it('should execute file commands regardless of allowed list', async () => {
|
||||
const mockFileCommand = {
|
||||
name: 'custom',
|
||||
description: 'Custom file command',
|
||||
kind: CommandKind.FILE,
|
||||
action: vi.fn().mockResolvedValue({
|
||||
type: 'submit_prompt',
|
||||
content: [{ text: 'Custom prompt' }],
|
||||
}),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockFileCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/custom',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
[], // Empty allowed list, but FILE commands should still work
|
||||
);
|
||||
|
||||
expect(result.type).toBe('submit_prompt');
|
||||
if (result.type === 'submit_prompt') {
|
||||
expect(result.content).toEqual([{ text: 'Custom prompt' }]);
|
||||
}
|
||||
});
|
||||
|
||||
it('should return unsupported for other built-in commands like /quit', async () => {
|
||||
const mockQuitCommand = {
|
||||
name: 'quit',
|
||||
description: 'Quit application',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockQuitCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/quit',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('unsupported');
|
||||
if (result.type === 'unsupported') {
|
||||
expect(result.reason).toContain('/quit');
|
||||
expect(result.reason).toContain('not supported');
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle command with no action', async () => {
|
||||
const mockCommand = {
|
||||
name: 'noaction',
|
||||
description: 'Command without action',
|
||||
kind: CommandKind.FILE,
|
||||
// No action property
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/noaction',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('no_command');
|
||||
});
|
||||
|
||||
it('should return message when command returns void', async () => {
|
||||
const mockCommand = {
|
||||
name: 'voidcmd',
|
||||
description: 'Command that returns void',
|
||||
kind: CommandKind.FILE,
|
||||
action: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/voidcmd',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('message');
|
||||
if (result.type === 'message') {
|
||||
expect(result.content).toBe('Command executed successfully.');
|
||||
expect(result.messageType).toBe('info');
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -7,7 +7,6 @@
|
||||
import type { PartListUnion } from '@google/genai';
|
||||
import { parseSlashCommand } from './utils/commands.js';
|
||||
import {
|
||||
FatalInputError,
|
||||
Logger,
|
||||
uiTelemetryService,
|
||||
type Config,
|
||||
@@ -19,10 +18,164 @@ import {
|
||||
CommandKind,
|
||||
type CommandContext,
|
||||
type SlashCommand,
|
||||
type SlashCommandActionReturn,
|
||||
} from './ui/commands/types.js';
|
||||
import { createNonInteractiveUI } from './ui/noninteractive/nonInteractiveUi.js';
|
||||
import type { LoadedSettings } from './config/settings.js';
|
||||
import type { SessionStatsState } from './ui/contexts/SessionContext.js';
|
||||
import { t } from './i18n/index.js';
|
||||
|
||||
/**
|
||||
* Built-in commands that are allowed in non-interactive modes (CLI and ACP).
|
||||
* Only safe, read-only commands that don't require interactive UI.
|
||||
*
|
||||
* These commands are:
|
||||
* - init: Initialize project configuration
|
||||
* - summary: Generate session summary
|
||||
* - compress: Compress conversation history
|
||||
*/
|
||||
export const ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE = [
|
||||
'init',
|
||||
'summary',
|
||||
'compress',
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Result of handling a slash command in non-interactive mode.
|
||||
*
|
||||
* Supported types:
|
||||
* - 'submit_prompt': Submits content to the model (supports all modes)
|
||||
* - 'message': Returns a single message (supports non-interactive JSON/text only)
|
||||
* - 'stream_messages': Streams multiple messages (supports ACP only)
|
||||
* - 'unsupported': Command cannot be executed in this mode
|
||||
* - 'no_command': No command was found or executed
|
||||
*/
|
||||
export type NonInteractiveSlashCommandResult =
|
||||
| {
|
||||
type: 'submit_prompt';
|
||||
content: PartListUnion;
|
||||
}
|
||||
| {
|
||||
type: 'message';
|
||||
messageType: 'info' | 'error';
|
||||
content: string;
|
||||
}
|
||||
| {
|
||||
type: 'stream_messages';
|
||||
messages: AsyncGenerator<
|
||||
{ messageType: 'info' | 'error'; content: string },
|
||||
void,
|
||||
unknown
|
||||
>;
|
||||
}
|
||||
| {
|
||||
type: 'unsupported';
|
||||
reason: string;
|
||||
originalType: string;
|
||||
}
|
||||
| {
|
||||
type: 'no_command';
|
||||
};
|
||||
|
||||
/**
|
||||
* Converts a SlashCommandActionReturn to a NonInteractiveSlashCommandResult.
|
||||
*
|
||||
* Only the following result types are supported in non-interactive mode:
|
||||
* - submit_prompt: Submits content to the model (all modes)
|
||||
* - message: Returns a single message (non-interactive JSON/text only)
|
||||
* - stream_messages: Streams multiple messages (ACP only)
|
||||
*
|
||||
* All other result types are converted to 'unsupported'.
|
||||
*
|
||||
* @param result The result from executing a slash command action
|
||||
* @returns A NonInteractiveSlashCommandResult describing the outcome
|
||||
*/
|
||||
function handleCommandResult(
|
||||
result: SlashCommandActionReturn,
|
||||
): NonInteractiveSlashCommandResult {
|
||||
switch (result.type) {
|
||||
case 'submit_prompt':
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: result.content,
|
||||
};
|
||||
|
||||
case 'message':
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: result.messageType,
|
||||
content: result.content,
|
||||
};
|
||||
|
||||
case 'stream_messages':
|
||||
return {
|
||||
type: 'stream_messages',
|
||||
messages: result.messages,
|
||||
};
|
||||
|
||||
/**
|
||||
* Currently return types below are never generated due to the
|
||||
* whitelist of allowed slash commands in ACP and non-interactive mode.
|
||||
* We'll try to add more supported return types in the future.
|
||||
*/
|
||||
case 'tool':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Tool execution from slash commands is not supported in non-interactive mode.',
|
||||
originalType: 'tool',
|
||||
};
|
||||
|
||||
case 'quit':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Quit command is not supported in non-interactive mode. The process will exit naturally after completion.',
|
||||
originalType: 'quit',
|
||||
};
|
||||
|
||||
case 'dialog':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason: `Dialog '${result.dialog}' cannot be opened in non-interactive mode.`,
|
||||
originalType: 'dialog',
|
||||
};
|
||||
|
||||
case 'load_history':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Loading history is not supported in non-interactive mode. Each invocation starts with a fresh context.',
|
||||
originalType: 'load_history',
|
||||
};
|
||||
|
||||
case 'confirm_shell_commands':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.',
|
||||
originalType: 'confirm_shell_commands',
|
||||
};
|
||||
|
||||
case 'confirm_action':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Action confirmation is not supported in non-interactive mode. Commands requiring confirmation cannot be executed.',
|
||||
originalType: 'confirm_action',
|
||||
};
|
||||
|
||||
default: {
|
||||
// Exhaustiveness check
|
||||
const _exhaustive: never = result;
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason: `Unknown command result type: ${(_exhaustive as SlashCommandActionReturn).type}`,
|
||||
originalType: 'unknown',
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters commands based on the allowed built-in command names.
|
||||
@@ -62,122 +215,146 @@ function filterCommandsForNonInteractive(
|
||||
* @param config The configuration object
|
||||
* @param settings The loaded settings
|
||||
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
|
||||
* allowed. If not provided or empty, only file commands are available.
|
||||
* @returns A Promise that resolves to `PartListUnion` if a valid command is
|
||||
* found and results in a prompt, or `undefined` otherwise.
|
||||
* @throws {FatalInputError} if the command result is not supported in
|
||||
* non-interactive mode.
|
||||
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
|
||||
* Pass an empty array to only allow file commands.
|
||||
* @returns A Promise that resolves to a `NonInteractiveSlashCommandResult` describing
|
||||
* the outcome of the command execution.
|
||||
*/
|
||||
export const handleSlashCommand = async (
|
||||
rawQuery: string,
|
||||
abortController: AbortController,
|
||||
config: Config,
|
||||
settings: LoadedSettings,
|
||||
allowedBuiltinCommandNames?: string[],
|
||||
): Promise<PartListUnion | undefined> => {
|
||||
allowedBuiltinCommandNames: string[] = [
|
||||
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
|
||||
],
|
||||
): Promise<NonInteractiveSlashCommandResult> => {
|
||||
const trimmed = rawQuery.trim();
|
||||
if (!trimmed.startsWith('/')) {
|
||||
return;
|
||||
return { type: 'no_command' };
|
||||
}
|
||||
|
||||
const isAcpMode = config.getExperimentalZedIntegration();
|
||||
const isInteractive = config.isInteractive();
|
||||
|
||||
const executionMode = isAcpMode
|
||||
? 'acp'
|
||||
: isInteractive
|
||||
? 'interactive'
|
||||
: 'non_interactive';
|
||||
|
||||
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);
|
||||
|
||||
// Only load BuiltinCommandLoader if there are allowed built-in commands
|
||||
const loaders =
|
||||
allowedBuiltinSet.size > 0
|
||||
? [new BuiltinCommandLoader(config), new FileCommandLoader(config)]
|
||||
: [new FileCommandLoader(config)];
|
||||
// Load all commands to check if the command exists but is not allowed
|
||||
const allLoaders = [
|
||||
new BuiltinCommandLoader(config),
|
||||
new FileCommandLoader(config),
|
||||
];
|
||||
|
||||
const commandService = await CommandService.create(
|
||||
loaders,
|
||||
allLoaders,
|
||||
abortController.signal,
|
||||
);
|
||||
const commands = commandService.getCommands();
|
||||
const allCommands = commandService.getCommands();
|
||||
const filteredCommands = filterCommandsForNonInteractive(
|
||||
commands,
|
||||
allCommands,
|
||||
allowedBuiltinSet,
|
||||
);
|
||||
|
||||
// First, try to parse with filtered commands
|
||||
const { commandToExecute, args } = parseSlashCommand(
|
||||
rawQuery,
|
||||
filteredCommands,
|
||||
);
|
||||
|
||||
if (commandToExecute) {
|
||||
if (commandToExecute.action) {
|
||||
// Not used by custom commands but may be in the future.
|
||||
const sessionStats: SessionStatsState = {
|
||||
sessionId: config?.getSessionId(),
|
||||
sessionStartTime: new Date(),
|
||||
metrics: uiTelemetryService.getMetrics(),
|
||||
lastPromptTokenCount: 0,
|
||||
promptCount: 1,
|
||||
if (!commandToExecute) {
|
||||
// Check if this is a known command that's just not allowed
|
||||
const { commandToExecute: knownCommand } = parseSlashCommand(
|
||||
rawQuery,
|
||||
allCommands,
|
||||
);
|
||||
|
||||
if (knownCommand) {
|
||||
// Command exists but is not allowed in non-interactive mode
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason: t(
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.',
|
||||
{ command: knownCommand.name },
|
||||
),
|
||||
originalType: 'filtered_command',
|
||||
};
|
||||
|
||||
const logger = new Logger(config?.getSessionId() || '', config?.storage);
|
||||
|
||||
const context: CommandContext = {
|
||||
services: {
|
||||
config,
|
||||
settings,
|
||||
git: undefined,
|
||||
logger,
|
||||
},
|
||||
ui: createNonInteractiveUI(),
|
||||
session: {
|
||||
stats: sessionStats,
|
||||
sessionShellAllowlist: new Set(),
|
||||
},
|
||||
invocation: {
|
||||
raw: trimmed,
|
||||
name: commandToExecute.name,
|
||||
args,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await commandToExecute.action(context, args);
|
||||
|
||||
if (result) {
|
||||
switch (result.type) {
|
||||
case 'submit_prompt':
|
||||
return result.content;
|
||||
case 'confirm_shell_commands':
|
||||
// This result indicates a command attempted to confirm shell commands.
|
||||
// However note that currently, ShellTool is excluded in non-interactive
|
||||
// mode unless 'YOLO mode' is active, so confirmation actually won't
|
||||
// occur because of YOLO mode.
|
||||
// This ensures that if a command *does* request confirmation (e.g.
|
||||
// in the future with more granular permissions), it's handled appropriately.
|
||||
throw new FatalInputError(
|
||||
'Exiting due to a confirmation prompt requested by the command.',
|
||||
);
|
||||
default:
|
||||
throw new FatalInputError(
|
||||
'Exiting due to command result that is not supported in non-interactive mode.',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { type: 'no_command' };
|
||||
}
|
||||
|
||||
return;
|
||||
if (!commandToExecute.action) {
|
||||
return { type: 'no_command' };
|
||||
}
|
||||
|
||||
// Not used by custom commands but may be in the future.
|
||||
const sessionStats: SessionStatsState = {
|
||||
sessionId: config?.getSessionId(),
|
||||
sessionStartTime: new Date(),
|
||||
metrics: uiTelemetryService.getMetrics(),
|
||||
lastPromptTokenCount: 0,
|
||||
promptCount: 1,
|
||||
};
|
||||
|
||||
const logger = new Logger(config?.getSessionId() || '', config?.storage);
|
||||
|
||||
const context: CommandContext = {
|
||||
executionMode,
|
||||
services: {
|
||||
config,
|
||||
settings,
|
||||
git: undefined,
|
||||
logger,
|
||||
},
|
||||
ui: createNonInteractiveUI(),
|
||||
session: {
|
||||
stats: sessionStats,
|
||||
sessionShellAllowlist: new Set(),
|
||||
},
|
||||
invocation: {
|
||||
raw: trimmed,
|
||||
name: commandToExecute.name,
|
||||
args,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await commandToExecute.action(context, args);
|
||||
|
||||
if (!result) {
|
||||
// Command executed but returned no result (e.g., void return)
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: 'Command executed successfully.',
|
||||
};
|
||||
}
|
||||
|
||||
// Handle different result types
|
||||
return handleCommandResult(result);
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves all available slash commands for the current configuration.
|
||||
*
|
||||
* @param config The configuration object
|
||||
* @param settings The loaded settings
|
||||
* @param abortSignal Signal to cancel the loading process
|
||||
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
|
||||
* allowed. If not provided or empty, only file commands are available.
|
||||
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
|
||||
* Pass an empty array to only include file commands.
|
||||
* @returns A Promise that resolves to an array of SlashCommand objects
|
||||
*/
|
||||
export const getAvailableCommands = async (
|
||||
config: Config,
|
||||
settings: LoadedSettings,
|
||||
abortSignal: AbortSignal,
|
||||
allowedBuiltinCommandNames?: string[],
|
||||
allowedBuiltinCommandNames: string[] = [
|
||||
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
|
||||
],
|
||||
): Promise<SlashCommand[]> => {
|
||||
try {
|
||||
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);
|
||||
|
||||
327
packages/cli/src/services/FileCommandLoader-extension.test.ts
Normal file
327
packages/cli/src/services/FileCommandLoader-extension.test.ts
Normal file
@@ -0,0 +1,327 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import * as os from 'node:os';
|
||||
import { FileCommandLoader } from './FileCommandLoader.js';
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import { Storage } from '@qwen-code/qwen-code-core';
|
||||
|
||||
describe('FileCommandLoader - Extension Commands Support', () => {
|
||||
let tempDir: string;
|
||||
let mockConfig: Partial<Config>;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await fs.promises.mkdtemp(
|
||||
path.join(os.tmpdir(), 'file-command-loader-ext-test-'),
|
||||
);
|
||||
|
||||
mockConfig = {
|
||||
getFolderTrustFeature: () => false,
|
||||
getFolderTrust: () => true,
|
||||
getProjectRoot: () => tempDir,
|
||||
storage: new Storage(tempDir),
|
||||
getExtensions: () => [],
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.promises.rm(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should load commands from extension with config.commands path', async () => {
|
||||
// Setup extension structure
|
||||
const extensionDir = path.join(tempDir, '.qwen', 'extensions', 'test-ext');
|
||||
const customCommandsDir = path.join(extensionDir, 'custom-cmds');
|
||||
await fs.promises.mkdir(customCommandsDir, { recursive: true });
|
||||
|
||||
// Create extension config with custom commands path
|
||||
const extensionConfig = {
|
||||
name: 'test-ext',
|
||||
version: '1.0.0',
|
||||
commands: 'custom-cmds',
|
||||
};
|
||||
await fs.promises.writeFile(
|
||||
path.join(extensionDir, 'qwen-extension.json'),
|
||||
JSON.stringify(extensionConfig),
|
||||
);
|
||||
|
||||
// Create a test command in custom directory
|
||||
const commandContent =
|
||||
'---\ndescription: Test command from extension\n---\nDo something';
|
||||
await fs.promises.writeFile(
|
||||
path.join(customCommandsDir, 'test.md'),
|
||||
commandContent,
|
||||
);
|
||||
|
||||
// Mock config to return the extension
|
||||
mockConfig.getExtensions = () => [
|
||||
{
|
||||
id: 'test-ext',
|
||||
config: extensionConfig,
|
||||
name: 'test-ext',
|
||||
version: '1.0.0',
|
||||
isActive: true,
|
||||
path: extensionDir,
|
||||
contextFiles: [],
|
||||
},
|
||||
];
|
||||
|
||||
const loader = new FileCommandLoader(mockConfig as Config);
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
expect(commands).toHaveLength(1);
|
||||
expect(commands[0].name).toBe('test-ext:test');
|
||||
expect(commands[0].description).toBe(
|
||||
'[test-ext] Test command from extension',
|
||||
);
|
||||
});
|
||||
|
||||
it('should load commands from extension with multiple commands paths', async () => {
|
||||
// Setup extension structure
|
||||
const extensionDir = path.join(tempDir, '.qwen', 'extensions', 'multi-ext');
|
||||
const cmdsDir1 = path.join(extensionDir, 'commands1');
|
||||
const cmdsDir2 = path.join(extensionDir, 'commands2');
|
||||
await fs.promises.mkdir(cmdsDir1, { recursive: true });
|
||||
await fs.promises.mkdir(cmdsDir2, { recursive: true });
|
||||
|
||||
// Create extension config with multiple commands paths
|
||||
const extensionConfig = {
|
||||
name: 'multi-ext',
|
||||
version: '1.0.0',
|
||||
commands: ['commands1', 'commands2'],
|
||||
};
|
||||
await fs.promises.writeFile(
|
||||
path.join(extensionDir, 'qwen-extension.json'),
|
||||
JSON.stringify(extensionConfig),
|
||||
);
|
||||
|
||||
// Create test commands in both directories
|
||||
await fs.promises.writeFile(
|
||||
path.join(cmdsDir1, 'cmd1.md'),
|
||||
'---\n---\nCommand 1',
|
||||
);
|
||||
await fs.promises.writeFile(
|
||||
path.join(cmdsDir2, 'cmd2.md'),
|
||||
'---\n---\nCommand 2',
|
||||
);
|
||||
|
||||
// Mock config to return the extension
|
||||
mockConfig.getExtensions = () => [
|
||||
{
|
||||
id: 'multi-ext',
|
||||
config: extensionConfig,
|
||||
contextFiles: [],
|
||||
name: 'multi-ext',
|
||||
version: '1.0.0',
|
||||
isActive: true,
|
||||
path: extensionDir,
|
||||
},
|
||||
];
|
||||
|
||||
const loader = new FileCommandLoader(mockConfig as Config);
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
expect(commands).toHaveLength(2);
|
||||
const commandNames = commands.map((c) => c.name).sort();
|
||||
expect(commandNames).toEqual(['multi-ext:cmd1', 'multi-ext:cmd2']);
|
||||
});
|
||||
|
||||
it('should fallback to default "commands" directory when config.commands not specified', async () => {
|
||||
// Setup extension structure with default commands directory
|
||||
const extensionDir = path.join(
|
||||
tempDir,
|
||||
'.qwen',
|
||||
'extensions',
|
||||
'default-ext',
|
||||
);
|
||||
const defaultCommandsDir = path.join(extensionDir, 'commands');
|
||||
await fs.promises.mkdir(defaultCommandsDir, { recursive: true });
|
||||
|
||||
// Create extension config without commands field
|
||||
const extensionConfig = {
|
||||
name: 'default-ext',
|
||||
version: '1.0.0',
|
||||
};
|
||||
await fs.promises.writeFile(
|
||||
path.join(extensionDir, 'qwen-extension.json'),
|
||||
JSON.stringify(extensionConfig),
|
||||
);
|
||||
|
||||
// Create a test command in default directory
|
||||
await fs.promises.writeFile(
|
||||
path.join(defaultCommandsDir, 'default.md'),
|
||||
'---\n---\nDefault command',
|
||||
);
|
||||
|
||||
// Mock config to return the extension
|
||||
mockConfig.getExtensions = () => [
|
||||
{
|
||||
id: 'default-ext',
|
||||
config: extensionConfig,
|
||||
contextFiles: [],
|
||||
name: 'default-ext',
|
||||
version: '1.0.0',
|
||||
isActive: true,
|
||||
path: extensionDir,
|
||||
},
|
||||
];
|
||||
|
||||
const loader = new FileCommandLoader(mockConfig as Config);
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
expect(commands).toHaveLength(1);
|
||||
expect(commands[0].name).toBe('default-ext:default');
|
||||
});
|
||||
|
||||
it('should handle extension without commands directory gracefully', async () => {
|
||||
// Setup extension structure without commands directory
|
||||
const extensionDir = path.join(
|
||||
tempDir,
|
||||
'.qwen',
|
||||
'extensions',
|
||||
'no-cmds-ext',
|
||||
);
|
||||
await fs.promises.mkdir(extensionDir, { recursive: true });
|
||||
|
||||
// Create extension config
|
||||
const extensionConfig = {
|
||||
name: 'no-cmds-ext',
|
||||
version: '1.0.0',
|
||||
};
|
||||
await fs.promises.writeFile(
|
||||
path.join(extensionDir, 'qwen-extension.json'),
|
||||
JSON.stringify(extensionConfig),
|
||||
);
|
||||
|
||||
// Mock config to return the extension
|
||||
mockConfig.getExtensions = () => [
|
||||
{
|
||||
id: 'no-cmds-ext',
|
||||
config: extensionConfig,
|
||||
contextFiles: [],
|
||||
name: 'no-cmds-ext',
|
||||
version: '1.0.0',
|
||||
isActive: true,
|
||||
path: extensionDir,
|
||||
},
|
||||
];
|
||||
|
||||
const loader = new FileCommandLoader(mockConfig as Config);
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
// Should not throw and return empty array
|
||||
expect(commands).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should prefix extension commands with extension name', async () => {
|
||||
// Setup extension
|
||||
const extensionDir = path.join(
|
||||
tempDir,
|
||||
'.qwen',
|
||||
'extensions',
|
||||
'prefix-ext',
|
||||
);
|
||||
const commandsDir = path.join(extensionDir, 'commands');
|
||||
await fs.promises.mkdir(commandsDir, { recursive: true });
|
||||
|
||||
const extensionConfig = {
|
||||
name: 'prefix-ext',
|
||||
version: '1.0.0',
|
||||
};
|
||||
await fs.promises.writeFile(
|
||||
path.join(extensionDir, 'qwen-extension.json'),
|
||||
JSON.stringify(extensionConfig),
|
||||
);
|
||||
|
||||
await fs.promises.writeFile(
|
||||
path.join(commandsDir, 'mycommand.md'),
|
||||
'---\n---\nMy command',
|
||||
);
|
||||
|
||||
mockConfig.getExtensions = () => [
|
||||
{
|
||||
id: 'prefix-ext',
|
||||
config: extensionConfig,
|
||||
contextFiles: [],
|
||||
name: 'prefix-ext',
|
||||
version: '1.0.0',
|
||||
isActive: true,
|
||||
path: extensionDir,
|
||||
},
|
||||
];
|
||||
|
||||
const loader = new FileCommandLoader(mockConfig as Config);
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
expect(commands).toHaveLength(1);
|
||||
expect(commands[0].name).toBe('prefix-ext:mycommand');
|
||||
});
|
||||
|
||||
it('should load commands from multiple extensions in alphabetical order', async () => {
|
||||
// Setup two extensions
|
||||
const ext1Dir = path.join(tempDir, '.qwen', 'extensions', 'ext-b');
|
||||
const ext2Dir = path.join(tempDir, '.qwen', 'extensions', 'ext-a');
|
||||
|
||||
await fs.promises.mkdir(path.join(ext1Dir, 'commands'), {
|
||||
recursive: true,
|
||||
});
|
||||
await fs.promises.mkdir(path.join(ext2Dir, 'commands'), {
|
||||
recursive: true,
|
||||
});
|
||||
|
||||
// Extension B
|
||||
await fs.promises.writeFile(
|
||||
path.join(ext1Dir, 'qwen-extension.json'),
|
||||
JSON.stringify({ name: 'ext-b', version: '1.0.0' }),
|
||||
);
|
||||
await fs.promises.writeFile(
|
||||
path.join(ext1Dir, 'commands', 'cmd.md'),
|
||||
'---\n---\nCommand B',
|
||||
);
|
||||
|
||||
// Extension A
|
||||
await fs.promises.writeFile(
|
||||
path.join(ext2Dir, 'qwen-extension.json'),
|
||||
JSON.stringify({ name: 'ext-a', version: '1.0.0' }),
|
||||
);
|
||||
await fs.promises.writeFile(
|
||||
path.join(ext2Dir, 'commands', 'cmd.md'),
|
||||
'---\n---\nCommand A',
|
||||
);
|
||||
|
||||
mockConfig.getExtensions = () => [
|
||||
{
|
||||
id: 'ext-b',
|
||||
config: { name: 'ext-b', version: '1.0.0' },
|
||||
contextFiles: [],
|
||||
name: 'ext-b',
|
||||
version: '1.0.0',
|
||||
isActive: true,
|
||||
path: ext1Dir,
|
||||
},
|
||||
{
|
||||
id: 'ext-a',
|
||||
config: { name: 'ext-a', version: '1.0.0' },
|
||||
contextFiles: [],
|
||||
name: 'ext-a',
|
||||
version: '1.0.0',
|
||||
isActive: true,
|
||||
path: ext2Dir,
|
||||
},
|
||||
];
|
||||
|
||||
const loader = new FileCommandLoader(mockConfig as Config);
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
expect(commands).toHaveLength(2);
|
||||
// Extensions are sorted alphabetically, so ext-a comes before ext-b
|
||||
expect(commands[0].name).toBe('ext-a:cmd');
|
||||
expect(commands[1].name).toBe('ext-b:cmd');
|
||||
});
|
||||
});
|
||||
117
packages/cli/src/services/FileCommandLoader-markdown.test.ts
Normal file
117
packages/cli/src/services/FileCommandLoader-markdown.test.ts
Normal file
@@ -0,0 +1,117 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
|
||||
import { promises as fs } from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import os from 'node:os';
|
||||
import { FileCommandLoader } from './FileCommandLoader.js';
|
||||
|
||||
describe('FileCommandLoader - Markdown support', () => {
|
||||
let tempDir: string;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Create a temporary directory for test commands
|
||||
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'qwen-md-test-'));
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
// Clean up
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should load markdown commands with frontmatter', async () => {
|
||||
// Create a test markdown command file
|
||||
const mdContent = `---
|
||||
description: Test markdown command
|
||||
---
|
||||
|
||||
This is a test prompt from markdown.`;
|
||||
|
||||
const commandPath = path.join(tempDir, 'test-command.md');
|
||||
await fs.writeFile(commandPath, mdContent, 'utf-8');
|
||||
|
||||
// Create loader with temp dir as command source
|
||||
const loader = new FileCommandLoader(null);
|
||||
|
||||
// Mock the getCommandDirectories to return our temp dir
|
||||
const originalMethod = loader['getCommandDirectories'];
|
||||
loader['getCommandDirectories'] = () => [{ path: tempDir }];
|
||||
|
||||
try {
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
expect(commands).toHaveLength(1);
|
||||
expect(commands[0].name).toBe('test-command');
|
||||
expect(commands[0].description).toBe('Test markdown command');
|
||||
} finally {
|
||||
// Restore original method
|
||||
loader['getCommandDirectories'] = originalMethod;
|
||||
}
|
||||
});
|
||||
|
||||
it('should load markdown commands without frontmatter', async () => {
|
||||
// Create a test markdown command file without frontmatter
|
||||
const mdContent = 'This is a simple prompt without frontmatter.';
|
||||
|
||||
const commandPath = path.join(tempDir, 'simple-command.md');
|
||||
await fs.writeFile(commandPath, mdContent, 'utf-8');
|
||||
|
||||
const loader = new FileCommandLoader(null);
|
||||
const originalMethod = loader['getCommandDirectories'];
|
||||
loader['getCommandDirectories'] = () => [{ path: tempDir }];
|
||||
|
||||
try {
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
const simpleCommand = commands.find(
|
||||
(cmd) => cmd.name === 'simple-command',
|
||||
);
|
||||
expect(simpleCommand).toBeDefined();
|
||||
expect(simpleCommand?.description).toContain('Custom command from');
|
||||
} finally {
|
||||
loader['getCommandDirectories'] = originalMethod;
|
||||
}
|
||||
});
|
||||
|
||||
it('should load both toml and markdown commands', async () => {
|
||||
// Create both TOML and Markdown files
|
||||
const tomlContent = `prompt = "TOML prompt"
|
||||
description = "TOML command"`;
|
||||
|
||||
const mdContent = `---
|
||||
description: Markdown command
|
||||
---
|
||||
|
||||
Markdown prompt`;
|
||||
|
||||
await fs.writeFile(
|
||||
path.join(tempDir, 'toml-cmd.toml'),
|
||||
tomlContent,
|
||||
'utf-8',
|
||||
);
|
||||
await fs.writeFile(path.join(tempDir, 'md-cmd.md'), mdContent, 'utf-8');
|
||||
|
||||
const loader = new FileCommandLoader(null);
|
||||
const originalMethod = loader['getCommandDirectories'];
|
||||
loader['getCommandDirectories'] = () => [{ path: tempDir }];
|
||||
|
||||
try {
|
||||
const commands = await loader.loadCommands(new AbortController().signal);
|
||||
|
||||
const tomlCommand = commands.find((cmd) => cmd.name === 'toml-cmd');
|
||||
const mdCommand = commands.find((cmd) => cmd.name === 'md-cmd');
|
||||
|
||||
expect(tomlCommand).toBeDefined();
|
||||
expect(tomlCommand?.description).toBe('TOML command');
|
||||
|
||||
expect(mdCommand).toBeDefined();
|
||||
expect(mdCommand?.description).toBe('Markdown command');
|
||||
} finally {
|
||||
loader['getCommandDirectories'] = originalMethod;
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -568,9 +568,9 @@ describe('FileCommandLoader', () => {
|
||||
|
||||
expect(commands).toHaveLength(3);
|
||||
const commandNames = commands.map((cmd) => cmd.name);
|
||||
expect(commandNames).toEqual(['user', 'project', 'ext']);
|
||||
expect(commandNames).toEqual(['user', 'project', 'test-ext:ext']);
|
||||
|
||||
const extCommand = commands.find((cmd) => cmd.name === 'ext');
|
||||
const extCommand = commands.find((cmd) => cmd.name === 'test-ext:ext');
|
||||
expect(extCommand?.extensionName).toBe('test-ext');
|
||||
expect(extCommand?.description).toMatch(/^\[test-ext\]/);
|
||||
});
|
||||
@@ -656,14 +656,14 @@ describe('FileCommandLoader', () => {
|
||||
expect(result1.content).toEqual([{ text: 'Project deploy command' }]);
|
||||
}
|
||||
|
||||
expect(commands[2].name).toBe('deploy');
|
||||
expect(commands[2].name).toBe('test-ext:deploy');
|
||||
expect(commands[2].extensionName).toBe('test-ext');
|
||||
expect(commands[2].description).toMatch(/^\[test-ext\]/);
|
||||
const result2 = await commands[2].action?.(
|
||||
createMockCommandContext({
|
||||
invocation: {
|
||||
raw: '/deploy',
|
||||
name: 'deploy',
|
||||
raw: '/test-ext:deploy',
|
||||
name: 'test-ext:deploy',
|
||||
args: '',
|
||||
},
|
||||
}),
|
||||
@@ -729,7 +729,7 @@ describe('FileCommandLoader', () => {
|
||||
const commands = await loader.loadCommands(signal);
|
||||
|
||||
expect(commands).toHaveLength(1);
|
||||
expect(commands[0].name).toBe('active');
|
||||
expect(commands[0].name).toBe('active-ext:active');
|
||||
expect(commands[0].extensionName).toBe('active-ext');
|
||||
expect(commands[0].description).toMatch(/^\[active-ext\]/);
|
||||
});
|
||||
@@ -803,17 +803,17 @@ describe('FileCommandLoader', () => {
|
||||
expect(commands).toHaveLength(3);
|
||||
|
||||
const commandNames = commands.map((cmd) => cmd.name).sort();
|
||||
expect(commandNames).toEqual(['b:c', 'b:d:e', 'simple']);
|
||||
expect(commandNames).toEqual(['a:b:c', 'a:b:d:e', 'a:simple']);
|
||||
|
||||
const nestedCmd = commands.find((cmd) => cmd.name === 'b:c');
|
||||
const nestedCmd = commands.find((cmd) => cmd.name === 'a:b:c');
|
||||
expect(nestedCmd?.extensionName).toBe('a');
|
||||
expect(nestedCmd?.description).toMatch(/^\[a\]/);
|
||||
expect(nestedCmd).toBeDefined();
|
||||
const result = await nestedCmd!.action?.(
|
||||
createMockCommandContext({
|
||||
invocation: {
|
||||
raw: '/b:c',
|
||||
name: 'b:c',
|
||||
raw: '/a:b:c',
|
||||
name: 'a:b:c',
|
||||
args: '',
|
||||
},
|
||||
}),
|
||||
|
||||
@@ -5,34 +5,23 @@
|
||||
*/
|
||||
|
||||
import { promises as fs } from 'node:fs';
|
||||
import * as fsSync from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import toml from '@iarna/toml';
|
||||
import { glob } from 'glob';
|
||||
import { z } from 'zod';
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import { Storage } from '@qwen-code/qwen-code-core';
|
||||
import { EXTENSIONS_CONFIG_FILENAME, Storage } from '@qwen-code/qwen-code-core';
|
||||
import type { ICommandLoader } from './types.js';
|
||||
import type {
|
||||
CommandContext,
|
||||
SlashCommand,
|
||||
SlashCommandActionReturn,
|
||||
} from '../ui/commands/types.js';
|
||||
import { CommandKind } from '../ui/commands/types.js';
|
||||
import { DefaultArgumentProcessor } from './prompt-processors/argumentProcessor.js';
|
||||
import type {
|
||||
IPromptProcessor,
|
||||
PromptPipelineContent,
|
||||
} from './prompt-processors/types.js';
|
||||
import {
|
||||
SHORTHAND_ARGS_PLACEHOLDER,
|
||||
SHELL_INJECTION_TRIGGER,
|
||||
AT_FILE_INJECTION_TRIGGER,
|
||||
} from './prompt-processors/types.js';
|
||||
parseMarkdownCommand,
|
||||
MarkdownCommandDefSchema,
|
||||
} from './markdown-command-parser.js';
|
||||
import {
|
||||
ConfirmationRequiredError,
|
||||
ShellProcessor,
|
||||
} from './prompt-processors/shellProcessor.js';
|
||||
import { AtFileProcessor } from './prompt-processors/atFileProcessor.js';
|
||||
createSlashCommandFromDefinition,
|
||||
type CommandDefinition,
|
||||
} from './command-factory.js';
|
||||
import type { SlashCommand } from '../ui/commands/types.js';
|
||||
|
||||
interface CommandDirectory {
|
||||
path: string;
|
||||
@@ -96,7 +85,12 @@ export class FileCommandLoader implements ICommandLoader {
|
||||
const commandDirs = this.getCommandDirectories();
|
||||
for (const dirInfo of commandDirs) {
|
||||
try {
|
||||
const files = await glob('**/*.toml', {
|
||||
// Scan both .toml and .md files
|
||||
const tomlFiles = await glob('**/*.toml', {
|
||||
...globOptions,
|
||||
cwd: dirInfo.path,
|
||||
});
|
||||
const mdFiles = await glob('**/*.md', {
|
||||
...globOptions,
|
||||
cwd: dirInfo.path,
|
||||
});
|
||||
@@ -105,18 +99,28 @@ export class FileCommandLoader implements ICommandLoader {
|
||||
return [];
|
||||
}
|
||||
|
||||
const commandPromises = files.map((file) =>
|
||||
this.parseAndAdaptFile(
|
||||
// Process TOML files
|
||||
const tomlCommandPromises = tomlFiles.map((file) =>
|
||||
this.parseAndAdaptTomlFile(
|
||||
path.join(dirInfo.path, file),
|
||||
dirInfo.path,
|
||||
dirInfo.extensionName,
|
||||
),
|
||||
);
|
||||
|
||||
const commands = (await Promise.all(commandPromises)).filter(
|
||||
(cmd): cmd is SlashCommand => cmd !== null,
|
||||
// Process Markdown files
|
||||
const mdCommandPromises = mdFiles.map((file) =>
|
||||
this.parseAndAdaptMarkdownFile(
|
||||
path.join(dirInfo.path, file),
|
||||
dirInfo.path,
|
||||
dirInfo.extensionName,
|
||||
),
|
||||
);
|
||||
|
||||
const commands = (
|
||||
await Promise.all([...tomlCommandPromises, ...mdCommandPromises])
|
||||
).filter((cmd): cmd is SlashCommand => cmd !== null);
|
||||
|
||||
// Add all commands without deduplication
|
||||
allCommands.push(...commands);
|
||||
} catch (error) {
|
||||
@@ -159,17 +163,73 @@ export class FileCommandLoader implements ICommandLoader {
|
||||
.filter((ext) => ext.isActive)
|
||||
.sort((a, b) => a.name.localeCompare(b.name)); // Sort alphabetically for deterministic loading
|
||||
|
||||
const extensionCommandDirs = activeExtensions.map((ext) => ({
|
||||
path: path.join(ext.path, 'commands'),
|
||||
extensionName: ext.name,
|
||||
}));
|
||||
// Collect command directories from each extension
|
||||
for (const ext of activeExtensions) {
|
||||
// Get commands paths from extension config
|
||||
const commandsPaths = this.getExtensionCommandsPaths(ext);
|
||||
|
||||
dirs.push(...extensionCommandDirs);
|
||||
for (const cmdPath of commandsPaths) {
|
||||
dirs.push({
|
||||
path: cmdPath,
|
||||
extensionName: ext.name,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dirs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get commands paths from an extension.
|
||||
* Returns paths from config.commands if specified, otherwise defaults to 'commands' directory.
|
||||
*/
|
||||
private getExtensionCommandsPaths(ext: {
|
||||
path: string;
|
||||
name: string;
|
||||
}): string[] {
|
||||
// Try to get extension config
|
||||
try {
|
||||
const configPath = path.join(ext.path, EXTENSIONS_CONFIG_FILENAME);
|
||||
if (fsSync.existsSync(configPath)) {
|
||||
const configContent = fsSync.readFileSync(configPath, 'utf-8');
|
||||
const config = JSON.parse(configContent);
|
||||
|
||||
if (config.commands) {
|
||||
const commandsArray = Array.isArray(config.commands)
|
||||
? config.commands
|
||||
: [config.commands];
|
||||
|
||||
return commandsArray
|
||||
.map((cmdPath: string) =>
|
||||
path.isAbsolute(cmdPath) ? cmdPath : path.join(ext.path, cmdPath),
|
||||
)
|
||||
.filter((cmdPath: string) => {
|
||||
try {
|
||||
return fsSync.existsSync(cmdPath);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(`Failed to read extension config for ${ext.name}:`, error);
|
||||
}
|
||||
|
||||
// Default fallback: use 'commands' directory
|
||||
const defaultPath = path.join(ext.path, 'commands');
|
||||
try {
|
||||
if (fsSync.existsSync(defaultPath)) {
|
||||
return [defaultPath];
|
||||
}
|
||||
} catch {
|
||||
// Ignore
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a single .toml file and transforms it into a SlashCommand object.
|
||||
* @param filePath The absolute path to the .toml file.
|
||||
@@ -177,7 +237,7 @@ export class FileCommandLoader implements ICommandLoader {
|
||||
* @param extensionName Optional extension name to prefix commands with.
|
||||
* @returns A promise resolving to a SlashCommand, or null if the file is invalid.
|
||||
*/
|
||||
private async parseAndAdaptFile(
|
||||
private async parseAndAdaptTomlFile(
|
||||
filePath: string,
|
||||
baseDir: string,
|
||||
extensionName?: string,
|
||||
@@ -216,104 +276,79 @@ export class FileCommandLoader implements ICommandLoader {
|
||||
|
||||
const validDef = validationResult.data;
|
||||
|
||||
const relativePathWithExt = path.relative(baseDir, filePath);
|
||||
const relativePath = relativePathWithExt.substring(
|
||||
0,
|
||||
relativePathWithExt.length - 5, // length of '.toml'
|
||||
);
|
||||
const baseCommandName = relativePath
|
||||
.split(path.sep)
|
||||
// Sanitize each path segment to prevent ambiguity. Since ':' is our
|
||||
// namespace separator, we replace any literal colons in filenames
|
||||
// with underscores to avoid naming conflicts.
|
||||
.map((segment) => segment.replaceAll(':', '_'))
|
||||
.join(':');
|
||||
|
||||
// Add extension name tag for extension commands
|
||||
const defaultDescription = `Custom command from ${path.basename(filePath)}`;
|
||||
let description = validDef.description || defaultDescription;
|
||||
if (extensionName) {
|
||||
description = `[${extensionName}] ${description}`;
|
||||
}
|
||||
|
||||
const processors: IPromptProcessor[] = [];
|
||||
const usesArgs = validDef.prompt.includes(SHORTHAND_ARGS_PLACEHOLDER);
|
||||
const usesShellInjection = validDef.prompt.includes(
|
||||
SHELL_INJECTION_TRIGGER,
|
||||
);
|
||||
const usesAtFileInjection = validDef.prompt.includes(
|
||||
AT_FILE_INJECTION_TRIGGER,
|
||||
);
|
||||
|
||||
// 1. @-File Injection (Security First).
|
||||
// This runs first to ensure we're not executing shell commands that
|
||||
// could dynamically generate malicious @-paths.
|
||||
if (usesAtFileInjection) {
|
||||
processors.push(new AtFileProcessor(baseCommandName));
|
||||
}
|
||||
|
||||
// 2. Argument and Shell Injection.
|
||||
// This runs after file content has been safely injected.
|
||||
if (usesShellInjection || usesArgs) {
|
||||
processors.push(new ShellProcessor(baseCommandName));
|
||||
}
|
||||
|
||||
// 3. Default Argument Handling.
|
||||
// Appends the raw invocation if no explicit {{args}} are used.
|
||||
if (!usesArgs) {
|
||||
processors.push(new DefaultArgumentProcessor());
|
||||
}
|
||||
|
||||
return {
|
||||
name: baseCommandName,
|
||||
description,
|
||||
kind: CommandKind.FILE,
|
||||
// Use factory to create command
|
||||
return createSlashCommandFromDefinition(
|
||||
filePath,
|
||||
baseDir,
|
||||
validDef,
|
||||
extensionName,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
_args: string,
|
||||
): Promise<SlashCommandActionReturn> => {
|
||||
if (!context.invocation) {
|
||||
console.error(
|
||||
`[FileCommandLoader] Critical error: Command '${baseCommandName}' was executed without invocation context.`,
|
||||
);
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: [{ text: validDef.prompt }], // Fallback to unprocessed prompt
|
||||
};
|
||||
}
|
||||
'.toml',
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
let processedContent: PromptPipelineContent = [
|
||||
{ text: validDef.prompt },
|
||||
];
|
||||
for (const processor of processors) {
|
||||
processedContent = await processor.process(
|
||||
processedContent,
|
||||
context,
|
||||
);
|
||||
}
|
||||
/**
|
||||
* Parses a single .md file and transforms it into a SlashCommand object.
|
||||
* @param filePath The absolute path to the .md file.
|
||||
* @param baseDir The root command directory for name calculation.
|
||||
* @param extensionName Optional extension name to prefix commands with.
|
||||
* @returns A promise resolving to a SlashCommand, or null if the file is invalid.
|
||||
*/
|
||||
private async parseAndAdaptMarkdownFile(
|
||||
filePath: string,
|
||||
baseDir: string,
|
||||
extensionName?: string,
|
||||
): Promise<SlashCommand | null> {
|
||||
let fileContent: string;
|
||||
try {
|
||||
fileContent = await fs.readFile(filePath, 'utf-8');
|
||||
} catch (error: unknown) {
|
||||
console.error(
|
||||
`[FileCommandLoader] Failed to read file ${filePath}:`,
|
||||
error instanceof Error ? error.message : String(error),
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: processedContent,
|
||||
};
|
||||
} catch (e) {
|
||||
// Check if it's our specific error type
|
||||
if (e instanceof ConfirmationRequiredError) {
|
||||
// Halt and request confirmation from the UI layer.
|
||||
return {
|
||||
type: 'confirm_shell_commands',
|
||||
commandsToConfirm: e.commandsToConfirm,
|
||||
originalInvocation: {
|
||||
raw: context.invocation.raw,
|
||||
},
|
||||
};
|
||||
}
|
||||
// Re-throw other errors to be handled by the global error handler.
|
||||
throw e;
|
||||
}
|
||||
},
|
||||
let parsed: ReturnType<typeof parseMarkdownCommand>;
|
||||
try {
|
||||
parsed = parseMarkdownCommand(fileContent);
|
||||
} catch (error: unknown) {
|
||||
console.error(
|
||||
`[FileCommandLoader] Failed to parse Markdown file ${filePath}:`,
|
||||
error instanceof Error ? error.message : String(error),
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
const validationResult = MarkdownCommandDefSchema.safeParse(parsed);
|
||||
|
||||
if (!validationResult.success) {
|
||||
console.error(
|
||||
`[FileCommandLoader] Skipping invalid command file: ${filePath}. Validation errors:`,
|
||||
validationResult.error.flatten(),
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
const validDef = validationResult.data;
|
||||
|
||||
// Convert to CommandDefinition format
|
||||
const definition: CommandDefinition = {
|
||||
prompt: validDef.prompt,
|
||||
description:
|
||||
validDef.frontmatter?.description &&
|
||||
typeof validDef.frontmatter.description === 'string'
|
||||
? validDef.frontmatter.description
|
||||
: undefined,
|
||||
};
|
||||
|
||||
// Use factory to create command
|
||||
return createSlashCommandFromDefinition(
|
||||
filePath,
|
||||
baseDir,
|
||||
definition,
|
||||
extensionName,
|
||||
'.md',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ const mockPrompt = {
|
||||
{ name: 'trail', required: false, description: "The animal's trail." },
|
||||
],
|
||||
invoke: vi.fn().mockResolvedValue({
|
||||
messages: [{ content: { text: 'Hello, world!' } }],
|
||||
messages: [{ content: { type: 'text', text: 'Hello, world!' } }],
|
||||
}),
|
||||
};
|
||||
|
||||
|
||||
@@ -123,7 +123,10 @@ export class McpPromptLoader implements ICommandLoader {
|
||||
};
|
||||
}
|
||||
|
||||
if (!result.messages?.[0]?.content?.['text']) {
|
||||
const firstMessage = result.messages?.[0];
|
||||
const content = firstMessage?.content;
|
||||
|
||||
if (content?.type !== 'text') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
@@ -134,7 +137,7 @@ export class McpPromptLoader implements ICommandLoader {
|
||||
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: JSON.stringify(result.messages[0].content.text),
|
||||
content: JSON.stringify(content.text),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
|
||||
159
packages/cli/src/services/command-factory.ts
Normal file
159
packages/cli/src/services/command-factory.ts
Normal file
@@ -0,0 +1,159 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* This file contains helper functions for FileCommandLoader to create SlashCommand
|
||||
* objects from parsed command definitions (TOML or Markdown).
|
||||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import type {
|
||||
CommandContext,
|
||||
SlashCommand,
|
||||
SlashCommandActionReturn,
|
||||
} from '../ui/commands/types.js';
|
||||
import { CommandKind } from '../ui/commands/types.js';
|
||||
import { DefaultArgumentProcessor } from './prompt-processors/argumentProcessor.js';
|
||||
import type {
|
||||
IPromptProcessor,
|
||||
PromptPipelineContent,
|
||||
} from './prompt-processors/types.js';
|
||||
import {
|
||||
SHORTHAND_ARGS_PLACEHOLDER,
|
||||
SHELL_INJECTION_TRIGGER,
|
||||
AT_FILE_INJECTION_TRIGGER,
|
||||
} from './prompt-processors/types.js';
|
||||
import {
|
||||
ConfirmationRequiredError,
|
||||
ShellProcessor,
|
||||
} from './prompt-processors/shellProcessor.js';
|
||||
import { AtFileProcessor } from './prompt-processors/atFileProcessor.js';
|
||||
|
||||
export interface CommandDefinition {
|
||||
prompt: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a SlashCommand from a parsed command definition.
|
||||
* This function is used by both TOML and Markdown command loaders.
|
||||
*
|
||||
* @param filePath The absolute path to the command file
|
||||
* @param baseDir The root command directory for name calculation
|
||||
* @param definition The parsed command definition (prompt and optional description)
|
||||
* @param extensionName Optional extension name to prefix commands with
|
||||
* @param fileExtension The file extension (e.g., '.toml' or '.md')
|
||||
* @returns A SlashCommand object
|
||||
*/
|
||||
export function createSlashCommandFromDefinition(
|
||||
filePath: string,
|
||||
baseDir: string,
|
||||
definition: CommandDefinition,
|
||||
extensionName: string | undefined,
|
||||
fileExtension: string,
|
||||
): SlashCommand {
|
||||
const relativePathWithExt = path.relative(baseDir, filePath);
|
||||
const relativePath = relativePathWithExt.substring(
|
||||
0,
|
||||
relativePathWithExt.length - fileExtension.length,
|
||||
);
|
||||
const baseCommandName = relativePath
|
||||
.split(path.sep)
|
||||
// Sanitize each path segment to prevent ambiguity. Since ':' is our
|
||||
// namespace separator, we replace any literal colons in filenames
|
||||
// with underscores to avoid naming conflicts.
|
||||
.map((segment) => segment.replaceAll(':', '_'))
|
||||
.join(':');
|
||||
|
||||
// Prefix command name with extension name if provided
|
||||
const commandName = extensionName
|
||||
? `${extensionName}:${baseCommandName}`
|
||||
: baseCommandName;
|
||||
|
||||
// Add extension name tag for extension commands
|
||||
const defaultDescription = `Custom command from ${path.basename(filePath)}`;
|
||||
let description = definition.description || defaultDescription;
|
||||
if (extensionName) {
|
||||
description = `[${extensionName}] ${description}`;
|
||||
}
|
||||
|
||||
const processors: IPromptProcessor[] = [];
|
||||
const usesArgs = definition.prompt.includes(SHORTHAND_ARGS_PLACEHOLDER);
|
||||
const usesShellInjection = definition.prompt.includes(
|
||||
SHELL_INJECTION_TRIGGER,
|
||||
);
|
||||
const usesAtFileInjection = definition.prompt.includes(
|
||||
AT_FILE_INJECTION_TRIGGER,
|
||||
);
|
||||
|
||||
// 1. @-File Injection (Security First).
|
||||
// This runs first to ensure we're not executing shell commands that
|
||||
// could dynamically generate malicious @-paths.
|
||||
if (usesAtFileInjection) {
|
||||
processors.push(new AtFileProcessor(baseCommandName));
|
||||
}
|
||||
|
||||
// 2. Argument and Shell Injection.
|
||||
// This runs after file content has been safely injected.
|
||||
if (usesShellInjection || usesArgs) {
|
||||
processors.push(new ShellProcessor(baseCommandName));
|
||||
}
|
||||
|
||||
// 3. Default Argument Handling.
|
||||
// Appends the raw invocation if no explicit {{args}} are used.
|
||||
if (!usesArgs) {
|
||||
processors.push(new DefaultArgumentProcessor());
|
||||
}
|
||||
|
||||
return {
|
||||
name: commandName,
|
||||
description,
|
||||
kind: CommandKind.FILE,
|
||||
extensionName,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
_args: string,
|
||||
): Promise<SlashCommandActionReturn> => {
|
||||
if (!context.invocation) {
|
||||
console.error(
|
||||
`[FileCommandLoader] Critical error: Command '${commandName}' was executed without invocation context.`,
|
||||
);
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: [{ text: definition.prompt }], // Fallback to unprocessed prompt
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
let processedContent: PromptPipelineContent = [
|
||||
{ text: definition.prompt },
|
||||
];
|
||||
for (const processor of processors) {
|
||||
processedContent = await processor.process(processedContent, context);
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: processedContent,
|
||||
};
|
||||
} catch (e) {
|
||||
// Check if it's our specific error type
|
||||
if (e instanceof ConfirmationRequiredError) {
|
||||
// Halt and request confirmation from the UI layer.
|
||||
return {
|
||||
type: 'confirm_shell_commands',
|
||||
commandsToConfirm: e.commandsToConfirm,
|
||||
originalInvocation: {
|
||||
raw: context.invocation.raw,
|
||||
},
|
||||
};
|
||||
}
|
||||
// Re-throw other errors to be handled by the global error handler.
|
||||
throw e;
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
253
packages/cli/src/services/command-migration-tool.test.ts
Normal file
253
packages/cli/src/services/command-migration-tool.test.ts
Normal file
@@ -0,0 +1,253 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { promises as fs } from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import os from 'node:os';
|
||||
import {
|
||||
detectTomlCommands,
|
||||
migrateTomlCommands,
|
||||
generateMigrationPrompt,
|
||||
} from './command-migration-tool.js';
|
||||
|
||||
describe('command-migration-tool', () => {
|
||||
let tempDir: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'qwen-migration-test-'));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
describe('detectTomlCommands', () => {
|
||||
it('should detect TOML files in directory', async () => {
|
||||
// Create some TOML files
|
||||
await fs.writeFile(
|
||||
path.join(tempDir, 'cmd1.toml'),
|
||||
'prompt = "test"',
|
||||
'utf-8',
|
||||
);
|
||||
await fs.writeFile(
|
||||
path.join(tempDir, 'cmd2.toml'),
|
||||
'prompt = "test"',
|
||||
'utf-8',
|
||||
);
|
||||
|
||||
const tomlFiles = await detectTomlCommands(tempDir);
|
||||
|
||||
expect(tomlFiles).toHaveLength(2);
|
||||
expect(tomlFiles).toContain('cmd1.toml');
|
||||
expect(tomlFiles).toContain('cmd2.toml');
|
||||
});
|
||||
|
||||
it('should detect TOML files in subdirectories', async () => {
|
||||
const subdir = path.join(tempDir, 'subdir');
|
||||
await fs.mkdir(subdir);
|
||||
await fs.writeFile(
|
||||
path.join(subdir, 'nested.toml'),
|
||||
'prompt = "test"',
|
||||
'utf-8',
|
||||
);
|
||||
|
||||
const tomlFiles = await detectTomlCommands(tempDir);
|
||||
|
||||
expect(tomlFiles).toContain('subdir/nested.toml');
|
||||
});
|
||||
|
||||
it('should return empty array for non-existent directory', async () => {
|
||||
const nonExistent = path.join(tempDir, 'does-not-exist');
|
||||
|
||||
const tomlFiles = await detectTomlCommands(nonExistent);
|
||||
|
||||
expect(tomlFiles).toEqual([]);
|
||||
});
|
||||
|
||||
it('should not detect non-TOML files', async () => {
|
||||
await fs.writeFile(path.join(tempDir, 'file.txt'), 'text', 'utf-8');
|
||||
await fs.writeFile(path.join(tempDir, 'file.md'), 'markdown', 'utf-8');
|
||||
|
||||
const tomlFiles = await detectTomlCommands(tempDir);
|
||||
|
||||
expect(tomlFiles).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('migrateTomlCommands', () => {
|
||||
it('should migrate TOML file to Markdown', async () => {
|
||||
const tomlContent = `prompt = "Test prompt"
|
||||
description = "Test description"`;
|
||||
|
||||
await fs.writeFile(path.join(tempDir, 'test.toml'), tomlContent, 'utf-8');
|
||||
|
||||
const result = await migrateTomlCommands({
|
||||
commandDir: tempDir,
|
||||
createBackup: true,
|
||||
deleteOriginal: false,
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.convertedFiles).toContain('test.toml');
|
||||
expect(result.failedFiles).toHaveLength(0);
|
||||
|
||||
// Check Markdown file was created
|
||||
const mdPath = path.join(tempDir, 'test.md');
|
||||
const mdContent = await fs.readFile(mdPath, 'utf-8');
|
||||
expect(mdContent).toContain('description: Test description');
|
||||
expect(mdContent).toContain('Test prompt');
|
||||
|
||||
// Check backup was created (original renamed to .toml.backup)
|
||||
const backupPath = path.join(tempDir, 'test.toml.backup');
|
||||
const backupExists = await fs
|
||||
.access(backupPath)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(backupExists).toBe(true);
|
||||
|
||||
// Original .toml file should not exist (renamed to .backup)
|
||||
const tomlExists = await fs
|
||||
.access(path.join(tempDir, 'test.toml'))
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(tomlExists).toBe(false);
|
||||
});
|
||||
|
||||
it('should delete original TOML when deleteOriginal is true', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempDir, 'delete-me.toml'),
|
||||
'prompt = "Test"',
|
||||
'utf-8',
|
||||
);
|
||||
|
||||
await migrateTomlCommands({
|
||||
commandDir: tempDir,
|
||||
createBackup: false,
|
||||
deleteOriginal: true,
|
||||
});
|
||||
|
||||
// Original should be deleted
|
||||
const tomlExists = await fs
|
||||
.access(path.join(tempDir, 'delete-me.toml'))
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(tomlExists).toBe(false);
|
||||
|
||||
// Markdown should exist
|
||||
const mdExists = await fs
|
||||
.access(path.join(tempDir, 'delete-me.md'))
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(mdExists).toBe(true);
|
||||
|
||||
// Backup should not exist (createBackup was false)
|
||||
const backupExists = await fs
|
||||
.access(path.join(tempDir, 'delete-me.toml.backup'))
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(backupExists).toBe(false);
|
||||
});
|
||||
|
||||
it('should fail if Markdown file already exists', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempDir, 'existing.toml'),
|
||||
'prompt = "Test"',
|
||||
'utf-8',
|
||||
);
|
||||
await fs.writeFile(
|
||||
path.join(tempDir, 'existing.md'),
|
||||
'Already exists',
|
||||
'utf-8',
|
||||
);
|
||||
|
||||
const result = await migrateTomlCommands({
|
||||
commandDir: tempDir,
|
||||
createBackup: false,
|
||||
});
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.failedFiles).toHaveLength(1);
|
||||
expect(result.failedFiles[0].file).toBe('existing.toml');
|
||||
expect(result.failedFiles[0].error).toContain('already exists');
|
||||
});
|
||||
|
||||
it('should handle migration without backup', async () => {
|
||||
await fs.writeFile(
|
||||
path.join(tempDir, 'no-backup.toml'),
|
||||
'prompt = "Test"',
|
||||
'utf-8',
|
||||
);
|
||||
|
||||
const result = await migrateTomlCommands({
|
||||
commandDir: tempDir,
|
||||
createBackup: false,
|
||||
deleteOriginal: false,
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
// Original TOML file should still exist (no backup, no delete)
|
||||
const tomlExists = await fs
|
||||
.access(path.join(tempDir, 'no-backup.toml'))
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(tomlExists).toBe(true);
|
||||
|
||||
// Backup should not exist
|
||||
const backupExists = await fs
|
||||
.access(path.join(tempDir, 'no-backup.toml.backup'))
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(backupExists).toBe(false);
|
||||
});
|
||||
|
||||
it('should return success with empty results for no TOML files', async () => {
|
||||
const result = await migrateTomlCommands({
|
||||
commandDir: tempDir,
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.convertedFiles).toHaveLength(0);
|
||||
expect(result.failedFiles).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateMigrationPrompt', () => {
|
||||
it('should generate prompt for few files', () => {
|
||||
const files = ['cmd1.toml', 'cmd2.toml'];
|
||||
|
||||
const prompt = generateMigrationPrompt(files);
|
||||
|
||||
expect(prompt).toContain('Found 2 command files');
|
||||
expect(prompt).toContain('cmd1.toml');
|
||||
expect(prompt).toContain('cmd2.toml');
|
||||
expect(prompt).toContain('qwen-code migrate-commands');
|
||||
});
|
||||
|
||||
it('should truncate file list for many files', () => {
|
||||
const files = Array.from({ length: 10 }, (_, i) => `cmd${i}.toml`);
|
||||
|
||||
const prompt = generateMigrationPrompt(files);
|
||||
|
||||
expect(prompt).toContain('Found 10 command files');
|
||||
expect(prompt).toContain('... and 7 more');
|
||||
});
|
||||
|
||||
it('should return empty string for no files', () => {
|
||||
const prompt = generateMigrationPrompt([]);
|
||||
|
||||
expect(prompt).toBe('');
|
||||
});
|
||||
|
||||
it('should use singular form for single file', () => {
|
||||
const prompt = generateMigrationPrompt(['single.toml']);
|
||||
|
||||
expect(prompt).toContain('Found 1 command file');
|
||||
// Don't check for plural since "files" appears in other parts of the message
|
||||
});
|
||||
});
|
||||
});
|
||||
169
packages/cli/src/services/command-migration-tool.ts
Normal file
169
packages/cli/src/services/command-migration-tool.ts
Normal file
@@ -0,0 +1,169 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Tool for migrating TOML commands to Markdown format.
|
||||
*/
|
||||
|
||||
import { promises as fs } from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { glob } from 'glob';
|
||||
import { convertTomlToMarkdown } from '@qwen-code/qwen-code-core';
|
||||
|
||||
export interface MigrationResult {
|
||||
success: boolean;
|
||||
convertedFiles: string[];
|
||||
failedFiles: Array<{ file: string; error: string }>;
|
||||
}
|
||||
|
||||
export interface MigrationOptions {
|
||||
/** Directory containing command files */
|
||||
commandDir: string;
|
||||
/** Whether to create backups (default: true) */
|
||||
createBackup?: boolean;
|
||||
/** Whether to delete original TOML files after migration (default: false) */
|
||||
deleteOriginal?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Scans a directory for TOML command files.
|
||||
* @param commandDir Directory to scan
|
||||
* @returns Array of TOML file paths (relative to commandDir)
|
||||
*/
|
||||
export async function detectTomlCommands(
|
||||
commandDir: string,
|
||||
): Promise<string[]> {
|
||||
try {
|
||||
await fs.access(commandDir);
|
||||
} catch {
|
||||
// Directory doesn't exist
|
||||
return [];
|
||||
}
|
||||
|
||||
const tomlFiles = await glob('**/*.toml', {
|
||||
cwd: commandDir,
|
||||
nodir: true,
|
||||
dot: false,
|
||||
});
|
||||
|
||||
return tomlFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrates TOML command files to Markdown format.
|
||||
* @param options Migration options
|
||||
* @returns Migration result with details
|
||||
*/
|
||||
export async function migrateTomlCommands(
|
||||
options: MigrationOptions,
|
||||
): Promise<MigrationResult> {
|
||||
const { commandDir, createBackup = true, deleteOriginal = false } = options;
|
||||
|
||||
const result: MigrationResult = {
|
||||
success: true,
|
||||
convertedFiles: [],
|
||||
failedFiles: [],
|
||||
};
|
||||
|
||||
// Detect TOML files
|
||||
const tomlFiles = await detectTomlCommands(commandDir);
|
||||
|
||||
if (tomlFiles.length === 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Process each TOML file
|
||||
for (const relativeFile of tomlFiles) {
|
||||
const tomlPath = path.join(commandDir, relativeFile);
|
||||
|
||||
try {
|
||||
// Read TOML file
|
||||
const tomlContent = await fs.readFile(tomlPath, 'utf-8');
|
||||
|
||||
// Convert to Markdown
|
||||
const markdownContent = convertTomlToMarkdown(tomlContent);
|
||||
|
||||
// Generate Markdown file path (same location, .md extension)
|
||||
const markdownPath = tomlPath.replace(/\.toml$/, '.md');
|
||||
|
||||
// Check if Markdown file already exists
|
||||
try {
|
||||
await fs.access(markdownPath);
|
||||
throw new Error(
|
||||
`Markdown file already exists: ${path.basename(markdownPath)}`,
|
||||
);
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
throw error;
|
||||
}
|
||||
// File doesn't exist, continue
|
||||
}
|
||||
|
||||
// Write Markdown file
|
||||
await fs.writeFile(markdownPath, markdownContent, 'utf-8');
|
||||
|
||||
// Backup original if requested (rename to .toml.backup)
|
||||
if (createBackup) {
|
||||
const backupPath = `${tomlPath}.backup`;
|
||||
await fs.rename(tomlPath, backupPath);
|
||||
} else if (deleteOriginal) {
|
||||
// Delete original if requested and no backup
|
||||
await fs.unlink(tomlPath);
|
||||
}
|
||||
|
||||
result.convertedFiles.push(relativeFile);
|
||||
} catch (error) {
|
||||
result.success = false;
|
||||
result.failedFiles.push({
|
||||
file: relativeFile,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a migration report message.
|
||||
* @param tomlFiles List of TOML files found
|
||||
* @returns Human-readable migration prompt message
|
||||
*/
|
||||
export function generateMigrationPrompt(tomlFiles: string[]): string {
|
||||
if (tomlFiles.length === 0) {
|
||||
return '';
|
||||
}
|
||||
|
||||
const count = tomlFiles.length;
|
||||
const fileList =
|
||||
tomlFiles.length <= 5
|
||||
? tomlFiles.map((f) => ` - ${f}`).join('\n')
|
||||
: ` - ${tomlFiles.slice(0, 3).join('\n - ')}\n - ... and ${tomlFiles.length - 3} more`;
|
||||
|
||||
return `
|
||||
⚠️ TOML Command Format Deprecation Notice
|
||||
|
||||
Found ${count} command file${count > 1 ? 's' : ''} in TOML format:
|
||||
${fileList}
|
||||
|
||||
The TOML format for commands is being deprecated in favor of Markdown format.
|
||||
Markdown format is more readable and easier to edit.
|
||||
|
||||
You can migrate these files automatically using:
|
||||
qwen-code migrate-commands
|
||||
|
||||
Or manually convert each file:
|
||||
- TOML: prompt = "..." / description = "..."
|
||||
- Markdown: YAML frontmatter + content
|
||||
|
||||
The migration tool will:
|
||||
✓ Convert TOML files to Markdown
|
||||
✓ Create backups of original files
|
||||
✓ Preserve all command functionality
|
||||
|
||||
TOML format will continue to work for now, but migration is recommended.
|
||||
`.trim();
|
||||
}
|
||||
144
packages/cli/src/services/markdown-command-parser.test.ts
Normal file
144
packages/cli/src/services/markdown-command-parser.test.ts
Normal file
@@ -0,0 +1,144 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import {
|
||||
parseMarkdownCommand,
|
||||
MarkdownCommandDefSchema,
|
||||
} from './markdown-command-parser.js';
|
||||
|
||||
describe('parseMarkdownCommand', () => {
|
||||
it('should parse markdown with YAML frontmatter', () => {
|
||||
const content = `---
|
||||
description: Test command
|
||||
---
|
||||
|
||||
This is the prompt content.`;
|
||||
|
||||
const result = parseMarkdownCommand(content);
|
||||
|
||||
expect(result).toEqual({
|
||||
frontmatter: {
|
||||
description: 'Test command',
|
||||
},
|
||||
prompt: 'This is the prompt content.',
|
||||
});
|
||||
});
|
||||
|
||||
it('should parse markdown without frontmatter', () => {
|
||||
const content = 'This is just a prompt without frontmatter.';
|
||||
|
||||
const result = parseMarkdownCommand(content);
|
||||
|
||||
expect(result).toEqual({
|
||||
prompt: 'This is just a prompt without frontmatter.',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle multi-line prompts', () => {
|
||||
const content = `---
|
||||
description: Multi-line test
|
||||
---
|
||||
|
||||
First line of prompt.
|
||||
Second line of prompt.
|
||||
Third line of prompt.`;
|
||||
|
||||
const result = parseMarkdownCommand(content);
|
||||
|
||||
expect(result.prompt).toBe(
|
||||
'First line of prompt.\nSecond line of prompt.\nThird line of prompt.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should trim whitespace from prompt', () => {
|
||||
const content = `---
|
||||
description: Whitespace test
|
||||
---
|
||||
|
||||
Prompt with leading and trailing spaces
|
||||
`;
|
||||
|
||||
const result = parseMarkdownCommand(content);
|
||||
|
||||
expect(result.prompt).toBe('Prompt with leading and trailing spaces');
|
||||
});
|
||||
|
||||
it('should handle empty frontmatter', () => {
|
||||
const content = `---
|
||||
---
|
||||
|
||||
Prompt content after empty frontmatter.`;
|
||||
|
||||
const result = parseMarkdownCommand(content);
|
||||
|
||||
// Empty YAML frontmatter returns undefined, not {}
|
||||
expect(result.frontmatter).toBeUndefined();
|
||||
expect(result.prompt).toBe('Prompt content after empty frontmatter.');
|
||||
});
|
||||
|
||||
it('should handle invalid YAML frontmatter gracefully', () => {
|
||||
// The YAML parser we use is quite tolerant, so most "invalid" YAML
|
||||
// actually parses successfully. This test verifies that behavior.
|
||||
const content = `---
|
||||
description: test
|
||||
---
|
||||
|
||||
Prompt content.`;
|
||||
|
||||
const result = parseMarkdownCommand(content);
|
||||
|
||||
expect(result.frontmatter).toBeDefined();
|
||||
expect(result.prompt).toBe('Prompt content.');
|
||||
});
|
||||
});
|
||||
|
||||
describe('MarkdownCommandDefSchema', () => {
|
||||
it('should validate valid markdown command def', () => {
|
||||
const validDef = {
|
||||
frontmatter: {
|
||||
description: 'Test description',
|
||||
},
|
||||
prompt: 'Test prompt',
|
||||
};
|
||||
|
||||
const result = MarkdownCommandDefSchema.safeParse(validDef);
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate markdown command def without frontmatter', () => {
|
||||
const validDef = {
|
||||
prompt: 'Test prompt',
|
||||
};
|
||||
|
||||
const result = MarkdownCommandDefSchema.safeParse(validDef);
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should reject command def without prompt', () => {
|
||||
const invalidDef = {
|
||||
frontmatter: {
|
||||
description: 'Test description',
|
||||
},
|
||||
};
|
||||
|
||||
const result = MarkdownCommandDefSchema.safeParse(invalidDef);
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
|
||||
it('should reject command def with non-string prompt', () => {
|
||||
const invalidDef = {
|
||||
prompt: 123,
|
||||
};
|
||||
|
||||
const result = MarkdownCommandDefSchema.safeParse(invalidDef);
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
});
|
||||
64
packages/cli/src/services/markdown-command-parser.ts
Normal file
64
packages/cli/src/services/markdown-command-parser.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { z } from 'zod';
|
||||
import { parse as parseYaml } from '@qwen-code/qwen-code-core';
|
||||
|
||||
/**
|
||||
* Defines the Zod schema for a Markdown command definition file.
|
||||
* The frontmatter contains optional metadata, and the body is the prompt.
|
||||
*/
|
||||
export const MarkdownCommandDefSchema = z.object({
|
||||
frontmatter: z
|
||||
.object({
|
||||
description: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
prompt: z.string({
|
||||
required_error: 'The prompt content is required.',
|
||||
invalid_type_error: 'The prompt content must be a string.',
|
||||
}),
|
||||
});
|
||||
|
||||
export type MarkdownCommandDef = z.infer<typeof MarkdownCommandDefSchema>;
|
||||
|
||||
/**
|
||||
* Parses a Markdown command file with optional YAML frontmatter.
|
||||
* @param content The file content
|
||||
* @returns Parsed command definition with frontmatter and prompt
|
||||
*/
|
||||
export function parseMarkdownCommand(content: string): MarkdownCommandDef {
|
||||
// Match YAML frontmatter pattern: ---\n...\n---\n
|
||||
// Allow empty frontmatter: ---\n---\n // Use (?:[\s\S]*?) to make the frontmatter content optional
|
||||
const frontmatterRegex = /^---\n([\s\S]*?)---\n([\s\S]*)$/;
|
||||
const match = content.match(frontmatterRegex);
|
||||
|
||||
if (!match) {
|
||||
// No frontmatter, entire content is the prompt
|
||||
return {
|
||||
prompt: content.trim(),
|
||||
};
|
||||
}
|
||||
|
||||
const [, frontmatterYaml, body] = match;
|
||||
|
||||
// Parse YAML frontmatter if not empty
|
||||
let frontmatter: Record<string, unknown> | undefined;
|
||||
if (frontmatterYaml.trim()) {
|
||||
try {
|
||||
frontmatter = parseYaml(frontmatterYaml) as Record<string, unknown>;
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Failed to parse YAML frontmatter: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
frontmatter,
|
||||
prompt: body.trim(),
|
||||
};
|
||||
}
|
||||
5
packages/cli/src/services/test-commands/example.md
Normal file
5
packages/cli/src/services/test-commands/example.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: Example markdown command
|
||||
---
|
||||
|
||||
This is an example prompt from a markdown file.
|
||||
@@ -1,49 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import {
|
||||
EXTENSIONS_CONFIG_FILENAME,
|
||||
INSTALL_METADATA_FILENAME,
|
||||
} from '../config/extension.js';
|
||||
import {
|
||||
type MCPServerConfig,
|
||||
type ExtensionInstallMetadata,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
|
||||
export function createExtension({
|
||||
extensionsDir = 'extensions-dir',
|
||||
name = 'my-extension',
|
||||
version = '1.0.0',
|
||||
addContextFile = false,
|
||||
contextFileName = undefined as string | undefined,
|
||||
mcpServers = {} as Record<string, MCPServerConfig>,
|
||||
installMetadata = undefined as ExtensionInstallMetadata | undefined,
|
||||
} = {}): string {
|
||||
const extDir = path.join(extensionsDir, name);
|
||||
fs.mkdirSync(extDir, { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(extDir, EXTENSIONS_CONFIG_FILENAME),
|
||||
JSON.stringify({ name, version, contextFileName, mcpServers }),
|
||||
);
|
||||
|
||||
if (addContextFile) {
|
||||
fs.writeFileSync(path.join(extDir, 'QWEN.md'), 'context');
|
||||
}
|
||||
|
||||
if (contextFileName) {
|
||||
fs.writeFileSync(path.join(extDir, contextFileName), 'context');
|
||||
}
|
||||
|
||||
if (installMetadata) {
|
||||
fs.writeFileSync(
|
||||
path.join(extDir, INSTALL_METADATA_FILENAME),
|
||||
JSON.stringify(installMetadata),
|
||||
);
|
||||
}
|
||||
return extDir;
|
||||
}
|
||||
@@ -23,7 +23,6 @@ import {
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { LoadedSettings } from '../config/settings.js';
|
||||
import type { InitializationResult } from '../core/initializer.js';
|
||||
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
|
||||
import { UIStateContext, type UIState } from './contexts/UIStateContext.js';
|
||||
import {
|
||||
UIActionsContext,
|
||||
@@ -56,7 +55,6 @@ vi.mock('./App.js', () => ({
|
||||
App: TestContextConsumer,
|
||||
}));
|
||||
|
||||
vi.mock('./hooks/useQuotaAndFallback.js');
|
||||
vi.mock('./hooks/useHistoryManager.js');
|
||||
vi.mock('./hooks/useThemeCommand.js');
|
||||
vi.mock('./auth/useAuth.js');
|
||||
@@ -78,7 +76,6 @@ vi.mock('./hooks/useFolderTrust.js');
|
||||
vi.mock('./hooks/useIdeTrustListener.js');
|
||||
vi.mock('./hooks/useMessageQueue.js');
|
||||
vi.mock('./hooks/useAutoAcceptIndicator.js');
|
||||
vi.mock('./hooks/useWorkspaceMigration.js');
|
||||
vi.mock('./hooks/useGitBranchName.js');
|
||||
vi.mock('./contexts/VimModeContext.js');
|
||||
vi.mock('./contexts/SessionContext.js');
|
||||
@@ -105,7 +102,6 @@ import { useFolderTrust } from './hooks/useFolderTrust.js';
|
||||
import { useIdeTrustListener } from './hooks/useIdeTrustListener.js';
|
||||
import { useMessageQueue } from './hooks/useMessageQueue.js';
|
||||
import { useAutoAcceptIndicator } from './hooks/useAutoAcceptIndicator.js';
|
||||
import { useWorkspaceMigration } from './hooks/useWorkspaceMigration.js';
|
||||
import { useGitBranchName } from './hooks/useGitBranchName.js';
|
||||
import { useVimMode } from './contexts/VimModeContext.js';
|
||||
import { useSessionStats } from './contexts/SessionContext.js';
|
||||
@@ -122,7 +118,6 @@ describe('AppContainer State Management', () => {
|
||||
let mockInitResult: InitializationResult;
|
||||
|
||||
// Create typed mocks for all hooks
|
||||
const mockedUseQuotaAndFallback = useQuotaAndFallback as Mock;
|
||||
const mockedUseHistory = useHistory as Mock;
|
||||
const mockedUseThemeCommand = useThemeCommand as Mock;
|
||||
const mockedUseAuthCommand = useAuthCommand as Mock;
|
||||
@@ -137,7 +132,6 @@ describe('AppContainer State Management', () => {
|
||||
const mockedUseIdeTrustListener = useIdeTrustListener as Mock;
|
||||
const mockedUseMessageQueue = useMessageQueue as Mock;
|
||||
const mockedUseAutoAcceptIndicator = useAutoAcceptIndicator as Mock;
|
||||
const mockedUseWorkspaceMigration = useWorkspaceMigration as Mock;
|
||||
const mockedUseGitBranchName = useGitBranchName as Mock;
|
||||
const mockedUseVimMode = useVimMode as Mock;
|
||||
const mockedUseSessionStats = useSessionStats as Mock;
|
||||
@@ -164,10 +158,6 @@ describe('AppContainer State Management', () => {
|
||||
capturedUIActions = null!;
|
||||
|
||||
// **Provide a default return value for EVERY mocked hook.**
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: null,
|
||||
handleProQuotaChoice: vi.fn(),
|
||||
});
|
||||
mockedUseHistory.mockReturnValue({
|
||||
history: [],
|
||||
addItem: vi.fn(),
|
||||
@@ -246,12 +236,6 @@ describe('AppContainer State Management', () => {
|
||||
getQueuedMessagesText: vi.fn().mockReturnValue(''),
|
||||
});
|
||||
mockedUseAutoAcceptIndicator.mockReturnValue(false);
|
||||
mockedUseWorkspaceMigration.mockReturnValue({
|
||||
showWorkspaceMigrationDialog: false,
|
||||
workspaceExtensions: [],
|
||||
onWorkspaceMigrationDialogOpen: vi.fn(),
|
||||
onWorkspaceMigrationDialogClose: vi.fn(),
|
||||
});
|
||||
mockedUseGitBranchName.mockReturnValue('main');
|
||||
mockedUseVimMode.mockReturnValue({
|
||||
isVimEnabled: false,
|
||||
@@ -567,75 +551,6 @@ describe('AppContainer State Management', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Quota and Fallback Integration', () => {
|
||||
it('passes a null proQuotaRequest to UIStateContext by default', () => {
|
||||
// The default mock from beforeEach already sets proQuotaRequest to null
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert that the context value is as expected
|
||||
expect(capturedUIState.proQuotaRequest).toBeNull();
|
||||
});
|
||||
|
||||
it('passes a valid proQuotaRequest to UIStateContext when provided by the hook', () => {
|
||||
// Arrange: Create a mock request object that a UI dialog would receive
|
||||
const mockRequest = {
|
||||
failedModel: 'gemini-pro',
|
||||
fallbackModel: 'gemini-flash',
|
||||
resolve: vi.fn(),
|
||||
};
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: mockRequest,
|
||||
handleProQuotaChoice: vi.fn(),
|
||||
});
|
||||
|
||||
// Act: Render the container
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert: The mock request is correctly passed through the context
|
||||
expect(capturedUIState.proQuotaRequest).toEqual(mockRequest);
|
||||
});
|
||||
|
||||
it('passes the handleProQuotaChoice function to UIActionsContext', () => {
|
||||
// Arrange: Create a mock handler function
|
||||
const mockHandler = vi.fn();
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: null,
|
||||
handleProQuotaChoice: mockHandler,
|
||||
});
|
||||
|
||||
// Act: Render the container
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert: The action in the context is the mock handler we provided
|
||||
expect(capturedUIActions.handleProQuotaChoice).toBe(mockHandler);
|
||||
|
||||
// You can even verify that the plumbed function is callable
|
||||
capturedUIActions.handleProQuotaChoice('auth');
|
||||
expect(mockHandler).toHaveBeenCalledWith('auth');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Terminal Title Update Feature', () => {
|
||||
beforeEach(() => {
|
||||
// Reset mock stdout for each test
|
||||
|
||||
@@ -32,13 +32,13 @@ import {
|
||||
type Config,
|
||||
type IdeInfo,
|
||||
type IdeContext,
|
||||
type UserTierId,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
IdeClient,
|
||||
ideContextStore,
|
||||
getErrorMessage,
|
||||
getAllGeminiMdFilenames,
|
||||
ShellExecutionService,
|
||||
Storage,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { buildResumedHistoryItems } from './utils/resumeHistoryUtils.js';
|
||||
import { validateAuthMethod } from '../config/auth.js';
|
||||
@@ -48,7 +48,6 @@ import { useHistory } from './hooks/useHistoryManager.js';
|
||||
import { useMemoryMonitor } from './hooks/useMemoryMonitor.js';
|
||||
import { useThemeCommand } from './hooks/useThemeCommand.js';
|
||||
import { useAuthCommand } from './auth/useAuth.js';
|
||||
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
|
||||
import { useEditorSettings } from './hooks/useEditorSettings.js';
|
||||
import { useSettingsCommand } from './hooks/useSettingsCommand.js';
|
||||
import { useModelCommand } from './hooks/useModelCommand.js';
|
||||
@@ -78,6 +77,9 @@ import { useLoadingIndicator } from './hooks/useLoadingIndicator.js';
|
||||
import { useFolderTrust } from './hooks/useFolderTrust.js';
|
||||
import { useIdeTrustListener } from './hooks/useIdeTrustListener.js';
|
||||
import { type IdeIntegrationNudgeResult } from './IdeIntegrationNudge.js';
|
||||
import { type CommandMigrationNudgeResult } from './CommandFormatMigrationNudge.js';
|
||||
import { useCommandMigration } from './hooks/useCommandMigration.js';
|
||||
import { migrateTomlCommands } from '../services/command-migration-tool.js';
|
||||
import { appEvents, AppEvent } from '../utils/events.js';
|
||||
import { type UpdateObject } from './utils/updateCheck.js';
|
||||
import { setUpdateHandler } from '../utils/handleAutoUpdate.js';
|
||||
@@ -85,10 +87,12 @@ import { ConsolePatcher } from './utils/ConsolePatcher.js';
|
||||
import { registerCleanup, runExitCleanup } from '../utils/cleanup.js';
|
||||
import { useMessageQueue } from './hooks/useMessageQueue.js';
|
||||
import { useAutoAcceptIndicator } from './hooks/useAutoAcceptIndicator.js';
|
||||
import { useWorkspaceMigration } from './hooks/useWorkspaceMigration.js';
|
||||
import { useSessionStats } from './contexts/SessionContext.js';
|
||||
import { useGitBranchName } from './hooks/useGitBranchName.js';
|
||||
import { useExtensionUpdates } from './hooks/useExtensionUpdates.js';
|
||||
import {
|
||||
useExtensionUpdates,
|
||||
useConfirmUpdateRequests,
|
||||
} from './hooks/useExtensionUpdates.js';
|
||||
import { ShellFocusContext } from './contexts/ShellFocusContext.js';
|
||||
import { t } from '../i18n/index.js';
|
||||
import { useWelcomeBack } from './hooks/useWelcomeBack.js';
|
||||
@@ -99,6 +103,7 @@ import { processVisionSwitchOutcome } from './hooks/useVisionAutoSwitch.js';
|
||||
import { useSubagentCreateDialog } from './hooks/useSubagentCreateDialog.js';
|
||||
import { useAgentsManagerDialog } from './hooks/useAgentsManagerDialog.js';
|
||||
import { useAttentionNotifications } from './hooks/useAttentionNotifications.js';
|
||||
import { requestConsentInteractive } from '../commands/extensions/consent.js';
|
||||
|
||||
const CTRL_EXIT_PROMPT_DURATION_MS = 1000;
|
||||
|
||||
@@ -159,15 +164,21 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
config.isTrustedFolder(),
|
||||
);
|
||||
|
||||
const extensions = config.getExtensions();
|
||||
const extensionManager = config.getExtensionManager();
|
||||
|
||||
extensionManager.setRequestConsent((description) =>
|
||||
requestConsentInteractive(description, addConfirmUpdateExtensionRequest),
|
||||
);
|
||||
|
||||
const { addConfirmUpdateExtensionRequest, confirmUpdateExtensionRequests } =
|
||||
useConfirmUpdateRequests();
|
||||
|
||||
const {
|
||||
extensionsUpdateState,
|
||||
extensionsUpdateStateInternal,
|
||||
dispatchExtensionStateUpdate,
|
||||
confirmUpdateExtensionRequests,
|
||||
addConfirmUpdateExtensionRequest,
|
||||
} = useExtensionUpdates(
|
||||
extensions,
|
||||
extensionManager,
|
||||
historyManager.addItem,
|
||||
config.getWorkingDir(),
|
||||
);
|
||||
@@ -192,8 +203,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
|
||||
const [currentModel, setCurrentModel] = useState(getEffectiveModel());
|
||||
|
||||
const [userTier] = useState<UserTierId | undefined>(undefined);
|
||||
|
||||
const [isConfigInitialized, setConfigInitialized] = useState(false);
|
||||
|
||||
const [userMessages, setUserMessages] = useState<string[]>([]);
|
||||
@@ -367,14 +376,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
cancelAuthentication,
|
||||
} = useAuthCommand(settings, config, historyManager.addItem);
|
||||
|
||||
const { proQuotaRequest, handleProQuotaChoice } = useQuotaAndFallback({
|
||||
config,
|
||||
historyManager,
|
||||
userTier,
|
||||
setAuthState,
|
||||
setModelSwitchedFromQuotaError,
|
||||
});
|
||||
|
||||
useInitializationAuthError(initializationResult.authError, onAuthError);
|
||||
|
||||
// Sync user tier from config when authentication changes
|
||||
@@ -448,13 +449,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
remount: refreshStatic,
|
||||
});
|
||||
|
||||
const {
|
||||
showWorkspaceMigrationDialog,
|
||||
workspaceExtensions,
|
||||
onWorkspaceMigrationDialogOpen,
|
||||
onWorkspaceMigrationDialogClose,
|
||||
} = useWorkspaceMigration(settings);
|
||||
|
||||
const { toggleVimEnabled } = useVimMode();
|
||||
|
||||
const {
|
||||
@@ -590,11 +584,11 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
: [],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
settings.merged,
|
||||
config.getExtensionContextFilePaths(),
|
||||
config.isTrustedFolder(),
|
||||
settings.merged.context?.importFormat || 'tree', // Use setting or default to 'tree'
|
||||
config.getFileFilteringOptions(),
|
||||
config.getDiscoveryMaxDirs(),
|
||||
);
|
||||
|
||||
config.setUserMemory(memoryContent);
|
||||
@@ -752,8 +746,7 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
!initError &&
|
||||
!isProcessing &&
|
||||
(streamingState === StreamingState.Idle ||
|
||||
streamingState === StreamingState.Responding) &&
|
||||
!proQuotaRequest;
|
||||
streamingState === StreamingState.Responding);
|
||||
|
||||
const [controlsHeight, setControlsHeight] = useState(0);
|
||||
|
||||
@@ -858,6 +851,13 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
!idePromptAnswered,
|
||||
);
|
||||
|
||||
// Command migration nudge
|
||||
const {
|
||||
showMigrationNudge: shouldShowCommandMigrationNudge,
|
||||
tomlFiles: commandMigrationTomlFiles,
|
||||
setShowMigrationNudge: setShowCommandMigrationNudge,
|
||||
} = useCommandMigration(settings, config.storage);
|
||||
|
||||
const [showErrorDetails, setShowErrorDetails] = useState<boolean>(false);
|
||||
const [showToolDescriptions, setShowToolDescriptions] =
|
||||
useState<boolean>(false);
|
||||
@@ -948,6 +948,92 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
[handleSlashCommand, settings],
|
||||
);
|
||||
|
||||
const handleCommandMigrationComplete = useCallback(
|
||||
async (result: CommandMigrationNudgeResult) => {
|
||||
setShowCommandMigrationNudge(false);
|
||||
|
||||
if (result.userSelection === 'yes') {
|
||||
// Perform migration for both workspace and user levels
|
||||
try {
|
||||
const results = [];
|
||||
|
||||
// Migrate workspace commands
|
||||
const workspaceCommandsDir = config.storage.getProjectCommandsDir();
|
||||
const workspaceResult = await migrateTomlCommands({
|
||||
commandDir: workspaceCommandsDir,
|
||||
createBackup: true,
|
||||
deleteOriginal: false,
|
||||
});
|
||||
if (
|
||||
workspaceResult.convertedFiles.length > 0 ||
|
||||
workspaceResult.failedFiles.length > 0
|
||||
) {
|
||||
results.push({ level: 'workspace', result: workspaceResult });
|
||||
}
|
||||
|
||||
// Migrate user commands
|
||||
const userCommandsDir = Storage.getUserCommandsDir();
|
||||
const userResult = await migrateTomlCommands({
|
||||
commandDir: userCommandsDir,
|
||||
createBackup: true,
|
||||
deleteOriginal: false,
|
||||
});
|
||||
if (
|
||||
userResult.convertedFiles.length > 0 ||
|
||||
userResult.failedFiles.length > 0
|
||||
) {
|
||||
results.push({ level: 'user', result: userResult });
|
||||
}
|
||||
|
||||
// Report results
|
||||
for (const { level, result: migrationResult } of results) {
|
||||
if (
|
||||
migrationResult.success &&
|
||||
migrationResult.convertedFiles.length > 0
|
||||
) {
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `[${level}] Successfully migrated ${migrationResult.convertedFiles.length} command file${migrationResult.convertedFiles.length > 1 ? 's' : ''} to Markdown format. Original files backed up as .toml.backup`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
|
||||
if (migrationResult.failedFiles.length > 0) {
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: `[${level}] Failed to migrate ${migrationResult.failedFiles.length} file${migrationResult.failedFiles.length > 1 ? 's' : ''}:\n${migrationResult.failedFiles.map((f) => ` • ${f.file}: ${f.error}`).join('\n')}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (results.length === 0) {
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: 'No TOML files found to migrate.',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: `❌ Migration failed: ${getErrorMessage(error)}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
[historyManager, setShowCommandMigrationNudge, config.storage],
|
||||
);
|
||||
|
||||
const { elapsedTime, currentLoadingPhrase } = useLoadingIndicator(
|
||||
streamingState,
|
||||
settings.merged.ui?.customWittyPhrases,
|
||||
@@ -1190,8 +1276,8 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
|
||||
const dialogsVisible =
|
||||
showWelcomeBackDialog ||
|
||||
showWorkspaceMigrationDialog ||
|
||||
shouldShowIdePrompt ||
|
||||
shouldShowCommandMigrationNudge ||
|
||||
isFolderTrustDialogOpen ||
|
||||
!!shellConfirmationRequest ||
|
||||
!!confirmationRequest ||
|
||||
@@ -1206,7 +1292,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
isAuthenticating ||
|
||||
isEditorDialogOpen ||
|
||||
showIdeRestartPrompt ||
|
||||
!!proQuotaRequest ||
|
||||
isSubagentCreateDialogOpen ||
|
||||
isAgentsManagerDialogOpen ||
|
||||
isApprovalModeDialogOpen ||
|
||||
@@ -1258,6 +1343,8 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
suggestionsWidth,
|
||||
isInputActive,
|
||||
shouldShowIdePrompt,
|
||||
shouldShowCommandMigrationNudge,
|
||||
commandMigrationTomlFiles,
|
||||
isFolderTrustDialogOpen: isFolderTrustDialogOpen ?? false,
|
||||
isTrustedFolder,
|
||||
constrainHeight,
|
||||
@@ -1274,11 +1361,7 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
historyRemountKey,
|
||||
messageQueue,
|
||||
showAutoAcceptIndicator,
|
||||
showWorkspaceMigrationDialog,
|
||||
workspaceExtensions,
|
||||
currentModel,
|
||||
userTier,
|
||||
proQuotaRequest,
|
||||
contextFileNames,
|
||||
errorCount,
|
||||
availableTerminalHeight,
|
||||
@@ -1349,6 +1432,8 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
suggestionsWidth,
|
||||
isInputActive,
|
||||
shouldShowIdePrompt,
|
||||
shouldShowCommandMigrationNudge,
|
||||
commandMigrationTomlFiles,
|
||||
isFolderTrustDialogOpen,
|
||||
isTrustedFolder,
|
||||
constrainHeight,
|
||||
@@ -1365,10 +1450,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
historyRemountKey,
|
||||
messageQueue,
|
||||
showAutoAcceptIndicator,
|
||||
showWorkspaceMigrationDialog,
|
||||
workspaceExtensions,
|
||||
userTier,
|
||||
proQuotaRequest,
|
||||
contextFileNames,
|
||||
errorCount,
|
||||
availableTerminalHeight,
|
||||
@@ -1422,15 +1503,13 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
setShellModeActive,
|
||||
vimHandleInput,
|
||||
handleIdePromptComplete,
|
||||
handleCommandMigrationComplete,
|
||||
handleFolderTrustSelect,
|
||||
setConstrainHeight,
|
||||
onEscapePromptChange: handleEscapePromptChange,
|
||||
refreshStatic,
|
||||
handleFinalSubmit,
|
||||
handleClearScreen,
|
||||
onWorkspaceMigrationDialogOpen,
|
||||
onWorkspaceMigrationDialogClose,
|
||||
handleProQuotaChoice,
|
||||
// Vision switch dialog
|
||||
handleVisionSwitchSelect,
|
||||
// Welcome back dialog
|
||||
@@ -1460,15 +1539,13 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
setShellModeActive,
|
||||
vimHandleInput,
|
||||
handleIdePromptComplete,
|
||||
handleCommandMigrationComplete,
|
||||
handleFolderTrustSelect,
|
||||
setConstrainHeight,
|
||||
handleEscapePromptChange,
|
||||
refreshStatic,
|
||||
handleFinalSubmit,
|
||||
handleClearScreen,
|
||||
onWorkspaceMigrationDialogOpen,
|
||||
onWorkspaceMigrationDialogClose,
|
||||
handleProQuotaChoice,
|
||||
handleVisionSwitchSelect,
|
||||
handleWelcomeBackSelection,
|
||||
handleWelcomeBackClose,
|
||||
|
||||
90
packages/cli/src/ui/CommandFormatMigrationNudge.tsx
Normal file
90
packages/cli/src/ui/CommandFormatMigrationNudge.tsx
Normal file
@@ -0,0 +1,90 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
import type { RadioSelectItem } from './components/shared/RadioButtonSelect.js';
|
||||
import { RadioButtonSelect } from './components/shared/RadioButtonSelect.js';
|
||||
import { useKeypress } from './hooks/useKeypress.js';
|
||||
import { theme } from './semantic-colors.js';
|
||||
|
||||
export type CommandMigrationNudgeResult = {
|
||||
userSelection: 'yes' | 'no';
|
||||
};
|
||||
|
||||
interface CommandFormatMigrationNudgeProps {
|
||||
tomlFiles: string[];
|
||||
onComplete: (result: CommandMigrationNudgeResult) => void;
|
||||
}
|
||||
|
||||
export function CommandFormatMigrationNudge({
|
||||
tomlFiles,
|
||||
onComplete,
|
||||
}: CommandFormatMigrationNudgeProps) {
|
||||
useKeypress(
|
||||
(key) => {
|
||||
if (key.name === 'escape') {
|
||||
onComplete({
|
||||
userSelection: 'no',
|
||||
});
|
||||
}
|
||||
},
|
||||
{ isActive: true },
|
||||
);
|
||||
|
||||
const OPTIONS: Array<RadioSelectItem<CommandMigrationNudgeResult>> = [
|
||||
{
|
||||
label: 'Yes',
|
||||
value: {
|
||||
userSelection: 'yes',
|
||||
},
|
||||
key: 'Yes',
|
||||
},
|
||||
{
|
||||
label: 'No (esc)',
|
||||
value: {
|
||||
userSelection: 'no',
|
||||
},
|
||||
key: 'No (esc)',
|
||||
},
|
||||
];
|
||||
|
||||
const count = tomlFiles.length;
|
||||
const fileList =
|
||||
count <= 3
|
||||
? tomlFiles.map((f) => ` • ${f}`).join('\n')
|
||||
: ` • ${tomlFiles.slice(0, 2).join('\n • ')}\n • ... and ${count - 2} more`;
|
||||
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
borderStyle="round"
|
||||
borderColor={theme.status.warning}
|
||||
padding={1}
|
||||
width="100%"
|
||||
marginLeft={1}
|
||||
>
|
||||
<Box marginBottom={1} flexDirection="column">
|
||||
<Text>
|
||||
<Text color={theme.status.warning}>{'⚠️ '}</Text>
|
||||
<Text bold>Command Format Migration</Text>
|
||||
</Text>
|
||||
<Text color={theme.text.secondary}>
|
||||
{`Found ${count} TOML command file${count > 1 ? 's' : ''}:`}
|
||||
</Text>
|
||||
<Text color={theme.text.secondary}>{fileList}</Text>
|
||||
<Text>{''}</Text>
|
||||
<Text color={theme.text.secondary}>
|
||||
The TOML format is deprecated. Would you like to migrate them to
|
||||
Markdown format?
|
||||
</Text>
|
||||
<Text color={theme.text.secondary}>
|
||||
(Backups will be created and original files will be preserved)
|
||||
</Text>
|
||||
</Box>
|
||||
<RadioButtonSelect items={OPTIONS} onSelect={onComplete} />
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -168,7 +168,7 @@ describe('AuthDialog', () => {
|
||||
|
||||
it('should not show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to something else', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'foobar';
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.LOGIN_WITH_GOOGLE;
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
|
||||
|
||||
const settings: LoadedSettings = new LoadedSettings(
|
||||
{
|
||||
@@ -212,7 +212,7 @@ describe('AuthDialog', () => {
|
||||
|
||||
it('should show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to use api key', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'foobar';
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_GEMINI;
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
|
||||
|
||||
const settings: LoadedSettings = new LoadedSettings(
|
||||
{
|
||||
@@ -504,12 +504,12 @@ describe('AuthDialog', () => {
|
||||
},
|
||||
{
|
||||
settings: {
|
||||
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
|
||||
security: { auth: { selectedType: AuthType.USE_OPENAI } },
|
||||
ui: { customThemes: {} },
|
||||
mcpServers: {},
|
||||
},
|
||||
originalSettings: {
|
||||
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
|
||||
security: { auth: { selectedType: AuthType.USE_OPENAI } },
|
||||
ui: { customThemes: {} },
|
||||
mcpServers: {},
|
||||
},
|
||||
|
||||
@@ -225,16 +225,26 @@ export const useAuthCommand = (
|
||||
const defaultAuthType = process.env['QWEN_DEFAULT_AUTH_TYPE'];
|
||||
if (
|
||||
defaultAuthType &&
|
||||
![AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].includes(
|
||||
defaultAuthType as AuthType,
|
||||
)
|
||||
![
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].includes(defaultAuthType as AuthType)
|
||||
) {
|
||||
onAuthError(
|
||||
t(
|
||||
'Invalid QWEN_DEFAULT_AUTH_TYPE value: "{{value}}". Valid values are: {{validValues}}',
|
||||
{
|
||||
value: defaultAuthType,
|
||||
validValues: [AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].join(', '),
|
||||
validValues: [
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].join(', '),
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
@@ -19,7 +19,9 @@ export const compressCommand: SlashCommand = {
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context) => {
|
||||
const { ui } = context;
|
||||
if (ui.pendingItem) {
|
||||
const executionMode = context.executionMode ?? 'interactive';
|
||||
|
||||
if (executionMode === 'interactive' && ui.pendingItem) {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
@@ -40,13 +42,80 @@ export const compressCommand: SlashCommand = {
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
ui.setPendingItem(pendingMessage);
|
||||
const config = context.services.config;
|
||||
const geminiClient = config?.getGeminiClient();
|
||||
if (!config || !geminiClient) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Config not loaded.'),
|
||||
};
|
||||
}
|
||||
|
||||
const doCompress = async () => {
|
||||
const promptId = `compress-${Date.now()}`;
|
||||
const compressed = await context.services.config
|
||||
?.getGeminiClient()
|
||||
?.tryCompressChat(promptId, true);
|
||||
if (compressed) {
|
||||
return await geminiClient.tryCompressChat(promptId, true);
|
||||
};
|
||||
|
||||
if (executionMode === 'acp') {
|
||||
const messages = async function* () {
|
||||
try {
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: 'Compressing context...',
|
||||
};
|
||||
const compressed = await doCompress();
|
||||
if (!compressed) {
|
||||
yield {
|
||||
messageType: 'error' as const,
|
||||
content: t('Failed to compress chat history.'),
|
||||
};
|
||||
return;
|
||||
}
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
|
||||
};
|
||||
} catch (e) {
|
||||
yield {
|
||||
messageType: 'error' as const,
|
||||
content: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
return { type: 'stream_messages', messages: messages() };
|
||||
}
|
||||
|
||||
try {
|
||||
if (executionMode === 'interactive') {
|
||||
ui.setPendingItem(pendingMessage);
|
||||
}
|
||||
|
||||
const compressed = await doCompress();
|
||||
|
||||
if (!compressed) {
|
||||
if (executionMode === 'interactive') {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: t('Failed to compress chat history.'),
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Failed to compress chat history.'),
|
||||
};
|
||||
}
|
||||
|
||||
if (executionMode === 'interactive') {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.COMPRESSION,
|
||||
@@ -59,27 +128,39 @@ export const compressCommand: SlashCommand = {
|
||||
} as HistoryItemCompression,
|
||||
Date.now(),
|
||||
);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
|
||||
};
|
||||
} catch (e) {
|
||||
if (executionMode === 'interactive') {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: t('Failed to compress chat history.'),
|
||||
text: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
return;
|
||||
}
|
||||
} catch (e) {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
};
|
||||
} finally {
|
||||
ui.setPendingItem(null);
|
||||
if (executionMode === 'interactive') {
|
||||
ui.setPendingItem(null);
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -4,13 +4,6 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { requestConsentInteractive } from '../../config/extension.js';
|
||||
import {
|
||||
updateAllUpdatableExtensions,
|
||||
type ExtensionUpdateInfo,
|
||||
updateExtension,
|
||||
checkForAllExtensionUpdates,
|
||||
} from '../../config/extensions/update.js';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { ExtensionUpdateState } from '../state/extensions.js';
|
||||
import { MessageType } from '../types.js';
|
||||
@@ -20,8 +13,34 @@ import {
|
||||
CommandKind,
|
||||
} from './types.js';
|
||||
import { t } from '../../i18n/index.js';
|
||||
import type { ExtensionUpdateInfo } from '@qwen-code/qwen-code-core';
|
||||
|
||||
function showMessageIfNoExtensions(
|
||||
context: CommandContext,
|
||||
extensions: unknown[],
|
||||
): boolean {
|
||||
if (extensions.length === 0) {
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: 'No extensions installed. Run `/extensions explore` to check out the gallery.',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async function listAction(context: CommandContext) {
|
||||
const extensions = context.services.config
|
||||
? context.services.config.getExtensions()
|
||||
: [];
|
||||
|
||||
if (showMessageIfNoExtensions(context, extensions)) {
|
||||
return;
|
||||
}
|
||||
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.EXTENSIONS_LIST,
|
||||
@@ -34,7 +53,6 @@ async function updateAction(context: CommandContext, args: string) {
|
||||
const updateArgs = args.split(' ').filter((value) => value.length > 0);
|
||||
const all = updateArgs.length === 1 && updateArgs[0] === '--all';
|
||||
const names = all ? undefined : updateArgs;
|
||||
let updateInfos: ExtensionUpdateInfo[] = [];
|
||||
|
||||
if (!all && names?.length === 0) {
|
||||
context.ui.addItem(
|
||||
@@ -47,29 +65,40 @@ async function updateAction(context: CommandContext, args: string) {
|
||||
return;
|
||||
}
|
||||
|
||||
let updateInfos: ExtensionUpdateInfo[] = [];
|
||||
|
||||
const extensionManager = context.services.config!.getExtensionManager();
|
||||
const extensions = context.services.config
|
||||
? context.services.config.getExtensions()
|
||||
: [];
|
||||
|
||||
if (showMessageIfNoExtensions(context, extensions)) {
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
try {
|
||||
await checkForAllExtensionUpdates(
|
||||
context.services.config!.getExtensions(),
|
||||
context.ui.dispatchExtensionStateUpdate,
|
||||
context.ui.dispatchExtensionStateUpdate({ type: 'BATCH_CHECK_START' });
|
||||
await extensionManager.checkForAllExtensionUpdates((extensionName, state) =>
|
||||
context.ui.dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extensionName, state },
|
||||
}),
|
||||
);
|
||||
context.ui.dispatchExtensionStateUpdate({ type: 'BATCH_CHECK_END' });
|
||||
|
||||
context.ui.setPendingItem({
|
||||
type: MessageType.EXTENSIONS_LIST,
|
||||
});
|
||||
if (all) {
|
||||
updateInfos = await updateAllUpdatableExtensions(
|
||||
context.services.config!.getWorkingDir(),
|
||||
// We don't have the ability to prompt for consent yet in this flow.
|
||||
(description) =>
|
||||
requestConsentInteractive(
|
||||
description,
|
||||
context.ui.addConfirmUpdateExtensionRequest,
|
||||
),
|
||||
context.services.config!.getExtensions(),
|
||||
updateInfos = await extensionManager.updateAllUpdatableExtensions(
|
||||
context.ui.extensionsUpdateState,
|
||||
context.ui.dispatchExtensionStateUpdate,
|
||||
(extensionName, state) =>
|
||||
context.ui.dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extensionName, state },
|
||||
}),
|
||||
);
|
||||
} else if (names?.length) {
|
||||
const workingDir = context.services.config!.getWorkingDir();
|
||||
const extensions = context.services.config!.getExtensions();
|
||||
for (const name of names) {
|
||||
const extension = extensions.find(
|
||||
@@ -85,17 +114,15 @@ async function updateAction(context: CommandContext, args: string) {
|
||||
);
|
||||
continue;
|
||||
}
|
||||
const updateInfo = await updateExtension(
|
||||
const updateInfo = await extensionManager.updateExtension(
|
||||
extension,
|
||||
workingDir,
|
||||
(description) =>
|
||||
requestConsentInteractive(
|
||||
description,
|
||||
context.ui.addConfirmUpdateExtensionRequest,
|
||||
),
|
||||
context.ui.extensionsUpdateState.get(extension.name)?.status ??
|
||||
ExtensionUpdateState.UNKNOWN,
|
||||
context.ui.dispatchExtensionStateUpdate,
|
||||
(extensionName, state) =>
|
||||
context.ui.dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extensionName, state },
|
||||
}),
|
||||
);
|
||||
if (updateInfo) updateInfos.push(updateInfo);
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
const original = await importOriginal<typeof core>();
|
||||
return {
|
||||
...original,
|
||||
getOauthClient: vi.fn(original.getOauthClient),
|
||||
getIdeInstaller: vi.fn(original.getIdeInstaller),
|
||||
IdeClient: {
|
||||
getInstance: vi.fn(),
|
||||
|
||||
@@ -13,6 +13,16 @@ import { createMockCommandContext } from '../../test-utils/mockCommandContext.js
|
||||
vi.mock('../../i18n/index.js', () => ({
|
||||
setLanguageAsync: vi.fn().mockResolvedValue(undefined),
|
||||
getCurrentLanguage: vi.fn().mockReturnValue('en'),
|
||||
detectSystemLanguage: vi.fn().mockReturnValue('en'),
|
||||
getLanguageNameFromLocale: vi.fn((locale: string) => {
|
||||
const map: Record<string, string> = {
|
||||
zh: 'Chinese',
|
||||
en: 'English',
|
||||
ru: 'Russian',
|
||||
de: 'German',
|
||||
};
|
||||
return map[locale] || 'English';
|
||||
}),
|
||||
t: vi.fn((key: string) => key),
|
||||
}));
|
||||
|
||||
@@ -61,7 +71,10 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
|
||||
// Import modules after mocking
|
||||
import * as i18n from '../../i18n/index.js';
|
||||
import { languageCommand } from './languageCommand.js';
|
||||
import {
|
||||
languageCommand,
|
||||
initializeLlmOutputLanguage,
|
||||
} from './languageCommand.js';
|
||||
|
||||
describe('languageCommand', () => {
|
||||
let mockContext: CommandContext;
|
||||
@@ -186,6 +199,39 @@ describe('languageCommand', () => {
|
||||
content: expect.stringContaining('Chinese'),
|
||||
});
|
||||
});
|
||||
|
||||
it('should parse Unicode LLM output language from marker', async () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue(
|
||||
[
|
||||
'# ⚠️ CRITICAL: 中文 Output Language Rule - HIGHEST PRIORITY ⚠️',
|
||||
'<!-- qwen-code:llm-output-language: 中文 -->',
|
||||
'',
|
||||
'Some other content...',
|
||||
].join('\n'),
|
||||
);
|
||||
|
||||
vi.mocked(i18n.t).mockImplementation(
|
||||
(key: string, params?: Record<string, string>) => {
|
||||
if (params && key.includes('{{lang}}')) {
|
||||
return key.replace('{{lang}}', params['lang'] || '');
|
||||
}
|
||||
return key;
|
||||
},
|
||||
);
|
||||
|
||||
if (!languageCommand.action) {
|
||||
throw new Error('The language command must have an action.');
|
||||
}
|
||||
|
||||
const result = await languageCommand.action(mockContext, '');
|
||||
|
||||
expect(result).toEqual({
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: expect.stringContaining('中文'),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('main command action - config not available', () => {
|
||||
@@ -400,6 +446,34 @@ describe('languageCommand', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should normalize locale code "ru" to "Russian"', async () => {
|
||||
if (!languageCommand.action) {
|
||||
throw new Error('The language command must have an action.');
|
||||
}
|
||||
|
||||
await languageCommand.action(mockContext, 'output ru');
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('Russian'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should normalize locale code "de" to "German"', async () => {
|
||||
if (!languageCommand.action) {
|
||||
throw new Error('The language command must have an action.');
|
||||
}
|
||||
|
||||
await languageCommand.action(mockContext, 'output de');
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('German'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle file write errors gracefully', async () => {
|
||||
vi.mocked(fs.writeFileSync).mockImplementation(() => {
|
||||
throw new Error('Permission denied');
|
||||
@@ -481,6 +555,8 @@ describe('languageCommand', () => {
|
||||
const nestedNames = uiSubcommand?.subCommands?.map((c) => c.name);
|
||||
expect(nestedNames).toContain('zh-CN');
|
||||
expect(nestedNames).toContain('en-US');
|
||||
expect(nestedNames).toContain('ru-RU');
|
||||
expect(nestedNames).toContain('de-DE');
|
||||
});
|
||||
|
||||
it('should have action that sets language', async () => {
|
||||
@@ -542,16 +618,9 @@ describe('languageCommand', () => {
|
||||
const enUSSubcommand = uiSubcommand?.subCommands?.find(
|
||||
(c) => c.name === 'en-US',
|
||||
);
|
||||
|
||||
it('zh-CN should have aliases', () => {
|
||||
expect(zhCNSubcommand?.altNames).toContain('zh');
|
||||
expect(zhCNSubcommand?.altNames).toContain('chinese');
|
||||
});
|
||||
|
||||
it('en-US should have aliases', () => {
|
||||
expect(enUSSubcommand?.altNames).toContain('en');
|
||||
expect(enUSSubcommand?.altNames).toContain('english');
|
||||
});
|
||||
const deDESubcommand = uiSubcommand?.subCommands?.find(
|
||||
(c) => c.name === 'de-DE',
|
||||
);
|
||||
|
||||
it('zh-CN action should set Chinese', async () => {
|
||||
if (!zhCNSubcommand?.action) {
|
||||
@@ -583,6 +652,21 @@ describe('languageCommand', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('de-DE action should set German', async () => {
|
||||
if (!deDESubcommand?.action) {
|
||||
throw new Error('de-DE subcommand must have an action.');
|
||||
}
|
||||
|
||||
const result = await deDESubcommand.action(mockContext, '');
|
||||
|
||||
expect(i18n.setLanguageAsync).toHaveBeenCalledWith('de');
|
||||
expect(result).toEqual({
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: expect.stringContaining('UI language changed'),
|
||||
});
|
||||
});
|
||||
|
||||
it('should reject extra arguments', async () => {
|
||||
if (!zhCNSubcommand?.action) {
|
||||
throw new Error('zh-CN subcommand must have an action.');
|
||||
@@ -597,4 +681,74 @@ describe('languageCommand', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('initializeLlmOutputLanguage', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(fs.mkdirSync).mockImplementation(() => undefined);
|
||||
vi.mocked(fs.writeFileSync).mockImplementation(() => undefined);
|
||||
});
|
||||
|
||||
it('should create file when it does not exist', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('en');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.mkdirSync).toHaveBeenCalled();
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('English'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should NOT overwrite existing file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should detect Chinese locale and create Chinese rule file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('zh');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('Chinese'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should detect Russian locale and create Russian rule file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('ru');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('Russian'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should detect German locale and create German rule file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('de');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('German'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* Copyright 2025 Qwen team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
@@ -15,51 +15,72 @@ import { SettingScope } from '../../config/settings.js';
|
||||
import {
|
||||
setLanguageAsync,
|
||||
getCurrentLanguage,
|
||||
detectSystemLanguage,
|
||||
getLanguageNameFromLocale,
|
||||
type SupportedLanguage,
|
||||
t,
|
||||
} from '../../i18n/index.js';
|
||||
import {
|
||||
SUPPORTED_LANGUAGES,
|
||||
type LanguageDefinition,
|
||||
} from '../../i18n/languages.js';
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { Storage } from '@qwen-code/qwen-code-core';
|
||||
|
||||
const LLM_OUTPUT_LANGUAGE_RULE_FILENAME = 'output-language.md';
|
||||
const LLM_OUTPUT_LANGUAGE_MARKER_PREFIX = 'qwen-code:llm-output-language:';
|
||||
|
||||
function parseUiLanguageArg(input: string): SupportedLanguage | null {
|
||||
const lowered = input.trim().toLowerCase();
|
||||
if (!lowered) return null;
|
||||
for (const lang of SUPPORTED_LANGUAGES) {
|
||||
if (
|
||||
lowered === lang.code ||
|
||||
lowered === lang.id.toLowerCase() ||
|
||||
lowered === lang.fullName.toLowerCase()
|
||||
) {
|
||||
return lang.code;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function formatUiLanguageDisplay(lang: SupportedLanguage): string {
|
||||
const option = SUPPORTED_LANGUAGES.find((o) => o.code === lang);
|
||||
return option ? `${option.fullName}(${option.id})` : lang;
|
||||
}
|
||||
|
||||
function sanitizeLanguageForMarker(language: string): string {
|
||||
// HTML comments cannot contain "--" or end markers like "-->" or "--!>" safely.
|
||||
// Also avoid newlines to keep the marker single-line and robust to parsing.
|
||||
return language
|
||||
.replace(/[\r\n]/g, ' ')
|
||||
.replace(/--!?>/g, '')
|
||||
.replace(/--/g, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the LLM output language rule template based on the language name.
|
||||
*/
|
||||
function generateLlmOutputLanguageRule(language: string): string {
|
||||
return `# ⚠️ CRITICAL: ${language} Output Language Rule - HIGHEST PRIORITY ⚠️
|
||||
const markerLanguage = sanitizeLanguageForMarker(language);
|
||||
return `# Output language preference: ${language}
|
||||
<!-- ${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX} ${markerLanguage} -->
|
||||
|
||||
## 🚨 MANDATORY RULE - NO EXCEPTIONS 🚨
|
||||
## Goal
|
||||
Prefer responding in **${language}** for normal assistant messages and explanations.
|
||||
|
||||
**YOU MUST RESPOND IN ${language.toUpperCase()} FOR EVERY SINGLE OUTPUT, REGARDLESS OF THE USER'S INPUT LANGUAGE.**
|
||||
## Keep technical artifacts unchanged
|
||||
Do **not** translate or rewrite:
|
||||
- Code blocks, CLI commands, file paths, stack traces, logs, JSON keys, identifiers
|
||||
- Exact quoted text from the user (keep quotes verbatim)
|
||||
|
||||
This is a **NON-NEGOTIABLE** requirement. Even if the user writes in English, says "hi", asks a simple question, or explicitly requests another language, **YOU MUST ALWAYS RESPOND IN ${language.toUpperCase()}.**
|
||||
## When a conflict exists
|
||||
If higher-priority instructions (system/developer) require a different behavior, follow them.
|
||||
|
||||
## What Must Be in ${language}
|
||||
|
||||
**EVERYTHING** you output: conversation replies, tool call descriptions, success/error messages, generated file content (comments, documentation), and all explanatory text.
|
||||
|
||||
**Tool outputs**: All descriptive text from \`read_file\`, \`write_file\`, \`codebase_search\`, \`run_terminal_cmd\`, \`todo_write\`, \`web_search\`, etc. MUST be in ${language}.
|
||||
|
||||
## Examples
|
||||
|
||||
### ✅ CORRECT:
|
||||
- User says "hi" → Respond in ${language} (e.g., "Bonjour" if ${language} is French)
|
||||
- Tool result → "已成功读取文件 config.json" (if ${language} is Chinese)
|
||||
- Error → "无法找到指定的文件" (if ${language} is Chinese)
|
||||
|
||||
### ❌ WRONG:
|
||||
- User says "hi" → "Hello" in English
|
||||
- Tool result → "Successfully read file" in English
|
||||
- Error → "File not found" in English
|
||||
|
||||
## Notes
|
||||
|
||||
- Code elements (variable/function names, syntax) can remain in English
|
||||
- Comments, documentation, and all other text MUST be in ${language}
|
||||
|
||||
**THIS RULE IS ACTIVE NOW. ALL OUTPUTS MUST BE IN ${language.toUpperCase()}. NO EXCEPTIONS.**
|
||||
## Tool / system outputs
|
||||
Raw tool/system outputs may contain fixed-format English. Preserve them verbatim, and if needed, add a short **${language}** explanation below.
|
||||
`;
|
||||
}
|
||||
|
||||
@@ -73,6 +94,80 @@ function getLlmOutputLanguageRulePath(): string {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalizes a language input to its full English name.
|
||||
* If the input is a known locale code (e.g., "ru", "zh"), converts it to the full name.
|
||||
* Otherwise, returns the input as-is (e.g., "Japanese" stays "Japanese").
|
||||
*/
|
||||
function normalizeLanguageName(language: string): string {
|
||||
const lowered = language.toLowerCase();
|
||||
// Check if it's a known locale code and convert to full name
|
||||
const fullName = getLanguageNameFromLocale(lowered);
|
||||
// If getLanguageNameFromLocale returned a different value, use it
|
||||
// Otherwise, use the original input (preserves case for unknown languages)
|
||||
if (fullName !== 'English' || lowered === 'en') {
|
||||
return fullName;
|
||||
}
|
||||
return language;
|
||||
}
|
||||
|
||||
function extractLlmOutputLanguageFromRuleFileContent(
|
||||
content: string,
|
||||
): string | null {
|
||||
// Preferred: machine-readable marker that supports Unicode and spaces.
|
||||
// Example: <!-- qwen-code:llm-output-language: 中文 -->
|
||||
const markerMatch = content.match(
|
||||
new RegExp(
|
||||
String.raw`<!--\s*${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX}\s*(.*?)\s*-->`,
|
||||
'i',
|
||||
),
|
||||
);
|
||||
if (markerMatch?.[1]) {
|
||||
const lang = markerMatch[1].trim();
|
||||
if (lang) return lang;
|
||||
}
|
||||
|
||||
// Backward compatibility: parse the heading line.
|
||||
// Example: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
|
||||
// Example: "# ⚠️ CRITICAL: 日本語 Output Language Rule - HIGHEST PRIORITY ⚠️"
|
||||
const headingMatch = content.match(
|
||||
/^#.*?CRITICAL:\s*(.*?)\s+Output Language Rule\b/im,
|
||||
);
|
||||
if (headingMatch?.[1]) {
|
||||
const lang = headingMatch[1].trim();
|
||||
if (lang) return lang;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the LLM output language rule file on first startup.
|
||||
* If the file already exists, it is not overwritten (respects user preference).
|
||||
*/
|
||||
export function initializeLlmOutputLanguage(): void {
|
||||
const filePath = getLlmOutputLanguageRulePath();
|
||||
|
||||
// Skip if file already exists (user preference)
|
||||
if (fs.existsSync(filePath)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Detect system language and map to language name
|
||||
const detectedLocale = detectSystemLanguage();
|
||||
const languageName = getLanguageNameFromLocale(detectedLocale);
|
||||
|
||||
// Generate the rule file
|
||||
const content = generateLlmOutputLanguageRule(languageName);
|
||||
|
||||
// Ensure directory exists
|
||||
const dir = path.dirname(filePath);
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
|
||||
// Write file
|
||||
fs.writeFileSync(filePath, content, 'utf-8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current LLM output language from the rule file if it exists.
|
||||
*/
|
||||
@@ -81,12 +176,7 @@ function getCurrentLlmOutputLanguage(): string | null {
|
||||
if (fs.existsSync(filePath)) {
|
||||
try {
|
||||
const content = fs.readFileSync(filePath, 'utf-8');
|
||||
// Extract language name from the first line
|
||||
// Template format: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
|
||||
const match = content.match(/^#.*?(\w+)\s+Output Language Rule/i);
|
||||
if (match) {
|
||||
return match[1];
|
||||
}
|
||||
return extractLlmOutputLanguageFromRuleFileContent(content);
|
||||
} catch {
|
||||
// Ignore errors
|
||||
}
|
||||
@@ -127,18 +217,11 @@ async function setUiLanguage(
|
||||
// Reload commands to update their descriptions with the new language
|
||||
context.ui.reloadCommands();
|
||||
|
||||
// Map language codes to friendly display names
|
||||
const langDisplayNames: Partial<Record<SupportedLanguage, string>> = {
|
||||
zh: '中文(zh-CN)',
|
||||
en: 'English(en-US)',
|
||||
ru: 'Русский (ru-RU)',
|
||||
};
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: t('UI language changed to {{lang}}', {
|
||||
lang: langDisplayNames[lang] || lang,
|
||||
lang: formatUiLanguageDisplay(lang),
|
||||
}),
|
||||
};
|
||||
}
|
||||
@@ -151,7 +234,9 @@ function generateLlmOutputLanguageRuleFile(
|
||||
): Promise<MessageActionReturn> {
|
||||
try {
|
||||
const filePath = getLlmOutputLanguageRulePath();
|
||||
const content = generateLlmOutputLanguageRule(language);
|
||||
// Normalize locale codes (e.g., "ru" -> "Russian") to full language names
|
||||
const normalizedLanguage = normalizeLanguageName(language);
|
||||
const content = generateLlmOutputLanguageRule(normalizedLanguage);
|
||||
|
||||
// Ensure directory exists
|
||||
const dir = path.dirname(filePath);
|
||||
@@ -196,7 +281,6 @@ export const languageCommand: SlashCommand = {
|
||||
args: string,
|
||||
): Promise<SlashCommandActionReturn> => {
|
||||
const { services } = context;
|
||||
|
||||
if (!services.config) {
|
||||
return {
|
||||
type: 'message',
|
||||
@@ -207,18 +291,37 @@ export const languageCommand: SlashCommand = {
|
||||
|
||||
const trimmedArgs = args.trim();
|
||||
|
||||
// Handle subcommands if called directly via action (for tests/backward compatibility)
|
||||
const parts = trimmedArgs.split(/\s+/);
|
||||
const firstArg = parts[0].toLowerCase();
|
||||
const subArgs = parts.slice(1).join(' ');
|
||||
|
||||
if (firstArg === 'ui' || firstArg === 'output') {
|
||||
const subCommand = languageCommand.subCommands?.find(
|
||||
(s) => s.name === firstArg,
|
||||
);
|
||||
if (subCommand?.action) {
|
||||
return subCommand.action(
|
||||
context,
|
||||
subArgs,
|
||||
) as Promise<SlashCommandActionReturn>;
|
||||
}
|
||||
}
|
||||
|
||||
// If no arguments, show current language settings and usage
|
||||
if (!trimmedArgs) {
|
||||
const currentUiLang = getCurrentLanguage();
|
||||
const currentLlmLang = getCurrentLlmOutputLanguage();
|
||||
const message = [
|
||||
t('Current UI language: {{lang}}', { lang: currentUiLang }),
|
||||
t('Current UI language: {{lang}}', {
|
||||
lang: formatUiLanguageDisplay(currentUiLang as SupportedLanguage),
|
||||
}),
|
||||
currentLlmLang
|
||||
? t('Current LLM output language: {{lang}}', { lang: currentLlmLang })
|
||||
: t('LLM output language not set'),
|
||||
'',
|
||||
t('Available subcommands:'),
|
||||
` /language ui [zh-CN|en-US|ru-RU] - ${t('Set UI language')}`,
|
||||
` /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
|
||||
` /language output <language> - ${t('Set LLM output language')}`,
|
||||
].join('\n');
|
||||
|
||||
@@ -229,115 +332,21 @@ export const languageCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
// Parse subcommand
|
||||
const parts = trimmedArgs.split(/\s+/);
|
||||
const subcommand = parts[0].toLowerCase();
|
||||
|
||||
if (subcommand === 'ui') {
|
||||
// Handle /language ui [zh-CN|en-US|ru-RU]
|
||||
if (parts.length === 1) {
|
||||
// Show UI language subcommand help
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: [
|
||||
t('Set UI language'),
|
||||
'',
|
||||
t('Usage: /language ui [zh-CN|en-US|ru-RU]'),
|
||||
'',
|
||||
t('Available options:'),
|
||||
t(' - zh-CN: Simplified Chinese'),
|
||||
t(' - en-US: English'),
|
||||
t(' - ru-RU: Russian'),
|
||||
'',
|
||||
t(
|
||||
'To request additional UI language packs, please open an issue on GitHub.',
|
||||
),
|
||||
].join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
const langArg = parts[1].toLowerCase();
|
||||
let targetLang: SupportedLanguage | null = null;
|
||||
|
||||
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
|
||||
targetLang = 'en';
|
||||
} else if (
|
||||
langArg === 'zh' ||
|
||||
langArg === 'chinese' ||
|
||||
langArg === '中文' ||
|
||||
langArg === 'zh-cn'
|
||||
) {
|
||||
targetLang = 'zh';
|
||||
} else if (
|
||||
langArg === 'ru' ||
|
||||
langArg === 'ru-RU' ||
|
||||
langArg === 'russian' ||
|
||||
langArg === 'русский'
|
||||
) {
|
||||
targetLang = 'ru';
|
||||
} else {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Invalid language. Available: en-US, zh-CN, ru-RU'),
|
||||
};
|
||||
}
|
||||
|
||||
return setUiLanguage(context, targetLang);
|
||||
} else if (subcommand === 'output') {
|
||||
// Handle /language output <language>
|
||||
if (parts.length === 1) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: [
|
||||
t('Set LLM output language'),
|
||||
'',
|
||||
t('Usage: /language output <language>'),
|
||||
` ${t('Example: /language output 中文')}`,
|
||||
].join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
// Join all parts after "output" as the language name
|
||||
const language = parts.slice(1).join(' ');
|
||||
return generateLlmOutputLanguageRuleFile(language);
|
||||
} else {
|
||||
// Backward compatibility: treat as UI language
|
||||
const langArg = trimmedArgs.toLowerCase();
|
||||
let targetLang: SupportedLanguage | null = null;
|
||||
|
||||
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
|
||||
targetLang = 'en';
|
||||
} else if (
|
||||
langArg === 'zh' ||
|
||||
langArg === 'chinese' ||
|
||||
langArg === '中文' ||
|
||||
langArg === 'zh-cn'
|
||||
) {
|
||||
targetLang = 'zh';
|
||||
} else if (
|
||||
langArg === 'ru' ||
|
||||
langArg === 'ru-RU' ||
|
||||
langArg === 'russian' ||
|
||||
langArg === 'русский'
|
||||
) {
|
||||
targetLang = 'ru';
|
||||
} else {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: [
|
||||
t('Invalid command. Available subcommands:'),
|
||||
' - /language ui [zh-CN|en-US|ru-RU] - ' + t('Set UI language'),
|
||||
' - /language output <language> - ' + t('Set LLM output language'),
|
||||
].join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
// Handle backward compatibility for /language [lang]
|
||||
const targetLang = parseUiLanguageArg(trimmedArgs);
|
||||
if (targetLang) {
|
||||
return setUiLanguage(context, targetLang);
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: [
|
||||
t('Invalid command. Available subcommands:'),
|
||||
` - /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
|
||||
' - /language output <language> - ' + t('Set LLM output language'),
|
||||
].join('\n'),
|
||||
};
|
||||
},
|
||||
subCommands: [
|
||||
{
|
||||
@@ -358,11 +367,14 @@ export const languageCommand: SlashCommand = {
|
||||
content: [
|
||||
t('Set UI language'),
|
||||
'',
|
||||
t('Usage: /language ui [zh-CN|en-US]'),
|
||||
t('Usage: /language ui [{{options}}]', {
|
||||
options: SUPPORTED_LANGUAGES.map((o) => o.id).join('|'),
|
||||
}),
|
||||
'',
|
||||
t('Available options:'),
|
||||
t(' - zh-CN: Simplified Chinese'),
|
||||
t(' - en-US: English'),
|
||||
...SUPPORTED_LANGUAGES.map(
|
||||
(o) => ` - ${o.id}: ${t(o.fullName)}`,
|
||||
),
|
||||
'',
|
||||
t(
|
||||
'To request additional UI language packs, please open an issue on GitHub.',
|
||||
@@ -371,99 +383,20 @@ export const languageCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
const langArg = trimmedArgs.toLowerCase();
|
||||
let targetLang: SupportedLanguage | null = null;
|
||||
|
||||
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
|
||||
targetLang = 'en';
|
||||
} else if (
|
||||
langArg === 'zh' ||
|
||||
langArg === 'chinese' ||
|
||||
langArg === '中文' ||
|
||||
langArg === 'zh-cn'
|
||||
) {
|
||||
targetLang = 'zh';
|
||||
} else {
|
||||
const targetLang = parseUiLanguageArg(trimmedArgs);
|
||||
if (!targetLang) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Invalid language. Available: en-US, zh-CN'),
|
||||
content: t('Invalid language. Available: {{options}}', {
|
||||
options: SUPPORTED_LANGUAGES.map((o) => o.id).join(','),
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
return setUiLanguage(context, targetLang);
|
||||
},
|
||||
subCommands: [
|
||||
{
|
||||
name: 'zh-CN',
|
||||
altNames: ['zh', 'chinese', '中文'],
|
||||
get description() {
|
||||
return t('Set UI language to Simplified Chinese (zh-CN)');
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
args: string,
|
||||
): Promise<MessageActionReturn> => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, 'zh');
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'en-US',
|
||||
altNames: ['en', 'english'],
|
||||
get description() {
|
||||
return t('Set UI language to English (en-US)');
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
args: string,
|
||||
): Promise<MessageActionReturn> => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, 'en');
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'ru-RU',
|
||||
altNames: ['ru', 'russian', 'русский'],
|
||||
get description() {
|
||||
return t('Set UI language to Russian (ru-RU)');
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
args: string,
|
||||
): Promise<MessageActionReturn> => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, 'ru');
|
||||
},
|
||||
},
|
||||
],
|
||||
subCommands: SUPPORTED_LANGUAGES.map(createUiLanguageSubCommand),
|
||||
},
|
||||
{
|
||||
name: 'output',
|
||||
@@ -496,3 +429,28 @@ export const languageCommand: SlashCommand = {
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper to create a UI language subcommand.
|
||||
*/
|
||||
function createUiLanguageSubCommand(option: LanguageDefinition): SlashCommand {
|
||||
return {
|
||||
name: option.id,
|
||||
get description() {
|
||||
return t('Set UI language to {{name}}', { name: option.fullName });
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context, args) => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, option.code);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ export const summaryCommand: SlashCommand = {
|
||||
action: async (context): Promise<SlashCommandActionReturn> => {
|
||||
const { config } = context.services;
|
||||
const { ui } = context;
|
||||
const executionMode = context.executionMode ?? 'interactive';
|
||||
|
||||
if (!config) {
|
||||
return {
|
||||
type: 'message',
|
||||
@@ -43,8 +45,8 @@ export const summaryCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
// Check if already generating summary
|
||||
if (ui.pendingItem) {
|
||||
// Check if already generating summary (interactive UI only)
|
||||
if (executionMode === 'interactive' && ui.pendingItem) {
|
||||
ui.addItem(
|
||||
{
|
||||
type: 'error' as const,
|
||||
@@ -63,29 +65,22 @@ export const summaryCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the current chat history
|
||||
const getChatHistory = () => {
|
||||
const chat = geminiClient.getChat();
|
||||
const history = chat.getHistory();
|
||||
return chat.getHistory();
|
||||
};
|
||||
|
||||
const validateChatHistory = (
|
||||
history: ReturnType<typeof getChatHistory>,
|
||||
) => {
|
||||
if (history.length <= 2) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: t('No conversation found to summarize.'),
|
||||
};
|
||||
throw new Error(t('No conversation found to summarize.'));
|
||||
}
|
||||
};
|
||||
|
||||
// Show loading state
|
||||
const pendingMessage: HistoryItemSummary = {
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: true,
|
||||
stage: 'generating',
|
||||
},
|
||||
};
|
||||
ui.setPendingItem(pendingMessage);
|
||||
|
||||
const generateSummaryMarkdown = async (
|
||||
history: ReturnType<typeof getChatHistory>,
|
||||
): Promise<string> => {
|
||||
// Build the conversation context for summary generation
|
||||
const conversationContext = history.map((message) => ({
|
||||
role: message.role,
|
||||
@@ -121,19 +116,21 @@ export const summaryCommand: SlashCommand = {
|
||||
|
||||
if (!markdownSummary) {
|
||||
throw new Error(
|
||||
'Failed to generate summary - no text content received from LLM response',
|
||||
t(
|
||||
'Failed to generate summary - no text content received from LLM response',
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
// Update loading message to show saving progress
|
||||
ui.setPendingItem({
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: true,
|
||||
stage: 'saving',
|
||||
},
|
||||
});
|
||||
return markdownSummary;
|
||||
};
|
||||
|
||||
const saveSummaryToDisk = async (
|
||||
markdownSummary: string,
|
||||
): Promise<{
|
||||
filePathForDisplay: string;
|
||||
fullPath: string;
|
||||
}> => {
|
||||
// Ensure .qwen directory exists
|
||||
const projectRoot = config.getProjectRoot();
|
||||
const qwenDir = path.join(projectRoot, '.qwen');
|
||||
@@ -155,45 +152,163 @@ export const summaryCommand: SlashCommand = {
|
||||
|
||||
await fsPromises.writeFile(summaryPath, summaryContent, 'utf8');
|
||||
|
||||
// Clear pending item and show success message
|
||||
return {
|
||||
filePathForDisplay: '.qwen/PROJECT_SUMMARY.md',
|
||||
fullPath: summaryPath,
|
||||
};
|
||||
};
|
||||
|
||||
const emitInteractivePending = (stage: 'generating' | 'saving') => {
|
||||
if (executionMode !== 'interactive') {
|
||||
return;
|
||||
}
|
||||
const pendingMessage: HistoryItemSummary = {
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: true,
|
||||
stage,
|
||||
},
|
||||
};
|
||||
ui.setPendingItem(pendingMessage);
|
||||
};
|
||||
|
||||
const completeInteractive = (filePathForDisplay: string) => {
|
||||
if (executionMode !== 'interactive') {
|
||||
return;
|
||||
}
|
||||
ui.setPendingItem(null);
|
||||
const completedSummaryItem: HistoryItemSummary = {
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: false,
|
||||
stage: 'completed',
|
||||
filePath: '.qwen/PROJECT_SUMMARY.md',
|
||||
filePath: filePathForDisplay,
|
||||
},
|
||||
};
|
||||
ui.addItem(completedSummaryItem, Date.now());
|
||||
};
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: '', // Empty content since we show the message in UI component
|
||||
};
|
||||
} catch (error) {
|
||||
// Clear pending item on error
|
||||
const formatErrorMessage = (error: unknown): string =>
|
||||
t('Failed to generate project context summary: {{error}}', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
|
||||
const failInteractive = (error: unknown) => {
|
||||
if (executionMode !== 'interactive') {
|
||||
return;
|
||||
}
|
||||
ui.setPendingItem(null);
|
||||
ui.addItem(
|
||||
{
|
||||
type: 'error' as const,
|
||||
text: `❌ ${t(
|
||||
'Failed to generate project context summary: {{error}}',
|
||||
{
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
},
|
||||
)}`,
|
||||
text: `❌ ${formatErrorMessage(error)}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
};
|
||||
|
||||
const formatSuccessMessage = (filePathForDisplay: string): string =>
|
||||
t('Saved project summary to {{filePathForDisplay}}.', {
|
||||
filePathForDisplay,
|
||||
});
|
||||
|
||||
const returnNoConversationMessage = (): SlashCommandActionReturn => {
|
||||
const msg = t('No conversation found to summarize.');
|
||||
if (executionMode === 'acp') {
|
||||
const messages = async function* () {
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: msg,
|
||||
};
|
||||
};
|
||||
return {
|
||||
type: 'stream_messages',
|
||||
messages: messages(),
|
||||
};
|
||||
}
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: msg,
|
||||
};
|
||||
};
|
||||
|
||||
const executeSummaryGeneration = async (
|
||||
history: ReturnType<typeof getChatHistory>,
|
||||
): Promise<{
|
||||
markdownSummary: string;
|
||||
filePathForDisplay: string;
|
||||
}> => {
|
||||
emitInteractivePending('generating');
|
||||
const markdownSummary = await generateSummaryMarkdown(history);
|
||||
emitInteractivePending('saving');
|
||||
const { filePathForDisplay } = await saveSummaryToDisk(markdownSummary);
|
||||
completeInteractive(filePathForDisplay);
|
||||
return { markdownSummary, filePathForDisplay };
|
||||
};
|
||||
|
||||
// Validate chat history once at the beginning
|
||||
const history = getChatHistory();
|
||||
try {
|
||||
validateChatHistory(history);
|
||||
} catch (_error) {
|
||||
return returnNoConversationMessage();
|
||||
}
|
||||
|
||||
if (executionMode === 'acp') {
|
||||
const messages = async function* () {
|
||||
try {
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: t('Generating project summary...'),
|
||||
};
|
||||
|
||||
const { filePathForDisplay } =
|
||||
await executeSummaryGeneration(history);
|
||||
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: formatSuccessMessage(filePathForDisplay),
|
||||
};
|
||||
} catch (error) {
|
||||
failInteractive(error);
|
||||
yield {
|
||||
messageType: 'error' as const,
|
||||
content: formatErrorMessage(error),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
type: 'stream_messages',
|
||||
messages: messages(),
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const { filePathForDisplay } = await executeSummaryGeneration(history);
|
||||
|
||||
if (executionMode === 'non_interactive') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: formatSuccessMessage(filePathForDisplay),
|
||||
};
|
||||
}
|
||||
|
||||
// Interactive mode: UI components already display progress and completion.
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: '',
|
||||
};
|
||||
} catch (error) {
|
||||
failInteractive(error);
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Failed to generate project context summary: {{error}}', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
}),
|
||||
content: formatErrorMessage(error),
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
@@ -22,6 +22,14 @@ import type {
|
||||
|
||||
// Grouped dependencies for clarity and easier mocking
|
||||
export interface CommandContext {
|
||||
/**
|
||||
* Execution mode for the current invocation.
|
||||
*
|
||||
* - interactive: React/Ink UI mode
|
||||
* - non_interactive: non-interactive CLI mode (text/json)
|
||||
* - acp: ACP/Zed integration mode
|
||||
*/
|
||||
executionMode?: 'interactive' | 'non_interactive' | 'acp';
|
||||
// Invocation properties for when commands are called.
|
||||
invocation?: {
|
||||
/** The raw, untrimmed input string from the user. */
|
||||
@@ -108,6 +116,19 @@ export interface MessageActionReturn {
|
||||
content: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* The return type for a command action that streams multiple messages.
|
||||
* Used for long-running operations that need to send progress updates.
|
||||
*/
|
||||
export interface StreamMessagesActionReturn {
|
||||
type: 'stream_messages';
|
||||
messages: AsyncGenerator<
|
||||
{ messageType: 'info' | 'error'; content: string },
|
||||
void,
|
||||
unknown
|
||||
>;
|
||||
}
|
||||
|
||||
/**
|
||||
* The return type for a command action that needs to open a dialog.
|
||||
*/
|
||||
@@ -174,6 +195,7 @@ export interface ConfirmActionReturn {
|
||||
export type SlashCommandActionReturn =
|
||||
| ToolActionReturn
|
||||
| MessageActionReturn
|
||||
| StreamMessagesActionReturn
|
||||
| QuitActionReturn
|
||||
| OpenDialogActionReturn
|
||||
| LoadHistoryActionReturn
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
import { IdeIntegrationNudge } from '../IdeIntegrationNudge.js';
|
||||
import { CommandFormatMigrationNudge } from '../CommandFormatMigrationNudge.js';
|
||||
import { LoopDetectionConfirmation } from './LoopDetectionConfirmation.js';
|
||||
import { FolderTrustDialog } from './FolderTrustDialog.js';
|
||||
import { ShellConfirmationDialog } from './ShellConfirmationDialog.js';
|
||||
@@ -16,8 +17,6 @@ import { QwenOAuthProgress } from './QwenOAuthProgress.js';
|
||||
import { AuthDialog } from '../auth/AuthDialog.js';
|
||||
import { OpenAIKeyPrompt } from './OpenAIKeyPrompt.js';
|
||||
import { EditorSettingsDialog } from './EditorSettingsDialog.js';
|
||||
import { WorkspaceMigrationDialog } from './WorkspaceMigrationDialog.js';
|
||||
import { ProQuotaDialog } from './ProQuotaDialog.js';
|
||||
import { PermissionsModifyTrustDialog } from './PermissionsModifyTrustDialog.js';
|
||||
import { ModelDialog } from './ModelDialog.js';
|
||||
import { ApprovalModeDialog } from './ApprovalModeDialog.js';
|
||||
@@ -78,24 +77,6 @@ export const DialogManager = ({
|
||||
if (uiState.showIdeRestartPrompt) {
|
||||
return <IdeTrustChangeDialog reason={uiState.ideTrustRestartReason} />;
|
||||
}
|
||||
if (uiState.showWorkspaceMigrationDialog) {
|
||||
return (
|
||||
<WorkspaceMigrationDialog
|
||||
workspaceExtensions={uiState.workspaceExtensions}
|
||||
onOpen={uiActions.onWorkspaceMigrationDialogOpen}
|
||||
onClose={uiActions.onWorkspaceMigrationDialogClose}
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.proQuotaRequest) {
|
||||
return (
|
||||
<ProQuotaDialog
|
||||
failedModel={uiState.proQuotaRequest.failedModel}
|
||||
fallbackModel={uiState.proQuotaRequest.fallbackModel}
|
||||
onChoice={uiActions.handleProQuotaChoice}
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.shouldShowIdePrompt) {
|
||||
return (
|
||||
<IdeIntegrationNudge
|
||||
@@ -104,6 +85,14 @@ export const DialogManager = ({
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.shouldShowCommandMigrationNudge) {
|
||||
return (
|
||||
<CommandFormatMigrationNudge
|
||||
tomlFiles={uiState.commandMigrationTomlFiles}
|
||||
onComplete={uiActions.handleCommandMigrationComplete}
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.isFolderTrustDialogOpen) {
|
||||
return (
|
||||
<FolderTrustDialog
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { render } from 'ink-testing-library';
|
||||
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
|
||||
import { ProQuotaDialog } from './ProQuotaDialog.js';
|
||||
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
|
||||
|
||||
// Mock the child component to make it easier to test the parent
|
||||
vi.mock('./shared/RadioButtonSelect.js', () => ({
|
||||
RadioButtonSelect: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('ProQuotaDialog', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should render with correct title and options', () => {
|
||||
const { lastFrame } = render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={() => {}}
|
||||
/>,
|
||||
);
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Pro quota limit reached for gemini-2.5-pro.');
|
||||
|
||||
// Check that RadioButtonSelect was called with the correct items
|
||||
expect(RadioButtonSelect).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
items: [
|
||||
{
|
||||
label: 'Change auth (executes the /auth command)',
|
||||
value: 'auth',
|
||||
key: 'auth',
|
||||
},
|
||||
{
|
||||
label: `Continue with gemini-2.5-flash`,
|
||||
value: 'continue',
|
||||
key: 'continue',
|
||||
},
|
||||
],
|
||||
}),
|
||||
undefined,
|
||||
);
|
||||
});
|
||||
|
||||
it('should call onChoice with "auth" when "Change auth" is selected', () => {
|
||||
const mockOnChoice = vi.fn();
|
||||
render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={mockOnChoice}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Get the onSelect function passed to RadioButtonSelect
|
||||
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
|
||||
|
||||
// Simulate the selection
|
||||
onSelect('auth');
|
||||
|
||||
expect(mockOnChoice).toHaveBeenCalledWith('auth');
|
||||
});
|
||||
|
||||
it('should call onChoice with "continue" when "Continue with flash" is selected', () => {
|
||||
const mockOnChoice = vi.fn();
|
||||
render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={mockOnChoice}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Get the onSelect function passed to RadioButtonSelect
|
||||
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
|
||||
|
||||
// Simulate the selection
|
||||
onSelect('continue');
|
||||
|
||||
expect(mockOnChoice).toHaveBeenCalledWith('continue');
|
||||
});
|
||||
});
|
||||
@@ -1,55 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type React from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
|
||||
import { theme } from '../semantic-colors.js';
|
||||
import { t } from '../../i18n/index.js';
|
||||
|
||||
interface ProQuotaDialogProps {
|
||||
failedModel: string;
|
||||
fallbackModel: string;
|
||||
onChoice: (choice: 'auth' | 'continue') => void;
|
||||
}
|
||||
|
||||
export function ProQuotaDialog({
|
||||
failedModel,
|
||||
fallbackModel,
|
||||
onChoice,
|
||||
}: ProQuotaDialogProps): React.JSX.Element {
|
||||
const items = [
|
||||
{
|
||||
label: t('Change auth (executes the /auth command)'),
|
||||
value: 'auth' as const,
|
||||
key: 'auth',
|
||||
},
|
||||
{
|
||||
label: t('Continue with {{model}}', { model: fallbackModel }),
|
||||
value: 'continue' as const,
|
||||
key: 'continue',
|
||||
},
|
||||
];
|
||||
|
||||
const handleSelect = (choice: 'auth' | 'continue') => {
|
||||
onChoice(choice);
|
||||
};
|
||||
|
||||
return (
|
||||
<Box borderStyle="round" flexDirection="column" paddingX={1}>
|
||||
<Text bold color={theme.status.warning}>
|
||||
{t('Pro quota limit reached for {{model}}.', { model: failedModel })}
|
||||
</Text>
|
||||
<Box marginTop={1}>
|
||||
<RadioButtonSelect
|
||||
items={items}
|
||||
initialIndex={1}
|
||||
onSelect={handleSelect}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
import {
|
||||
type Extension,
|
||||
performWorkspaceExtensionMigration,
|
||||
} from '../../config/extension.js';
|
||||
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
|
||||
import { theme } from '../semantic-colors.js';
|
||||
import { useState } from 'react';
|
||||
import { useKeypress } from '../hooks/useKeypress.js';
|
||||
|
||||
export function WorkspaceMigrationDialog(props: {
|
||||
workspaceExtensions: Extension[];
|
||||
onOpen: () => void;
|
||||
onClose: () => void;
|
||||
}) {
|
||||
const { workspaceExtensions, onOpen, onClose } = props;
|
||||
const [migrationComplete, setMigrationComplete] = useState(false);
|
||||
const [failedExtensions, setFailedExtensions] = useState<string[]>([]);
|
||||
onOpen();
|
||||
const onMigrate = async () => {
|
||||
const failed = await performWorkspaceExtensionMigration(
|
||||
workspaceExtensions,
|
||||
// We aren't updating extensions, just moving them around, don't need to ask for consent.
|
||||
async (_) => true,
|
||||
);
|
||||
setFailedExtensions(failed);
|
||||
setMigrationComplete(true);
|
||||
};
|
||||
|
||||
useKeypress(
|
||||
(key) => {
|
||||
if (migrationComplete && key.sequence === 'q') {
|
||||
process.exit(0);
|
||||
}
|
||||
},
|
||||
{ isActive: true },
|
||||
);
|
||||
|
||||
if (migrationComplete) {
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
borderStyle="round"
|
||||
borderColor={theme.border.default}
|
||||
padding={1}
|
||||
>
|
||||
{failedExtensions.length > 0 ? (
|
||||
<>
|
||||
<Text color={theme.text.primary}>
|
||||
The following extensions failed to migrate. Please try installing
|
||||
them manually. To see other changes, Qwen Code must be restarted.
|
||||
Press 'q' to quit.
|
||||
</Text>
|
||||
<Box flexDirection="column" marginTop={1} marginLeft={2}>
|
||||
{failedExtensions.map((failed) => (
|
||||
<Text key={failed}>- {failed}</Text>
|
||||
))}
|
||||
</Box>
|
||||
</>
|
||||
) : (
|
||||
<Text color={theme.text.primary}>
|
||||
Migration complete. To see changes, Qwen Code must be restarted.
|
||||
Press 'q' to quit.
|
||||
</Text>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
borderStyle="round"
|
||||
borderColor={theme.border.default}
|
||||
padding={1}
|
||||
>
|
||||
<Text bold color={theme.text.primary}>
|
||||
Workspace-level extensions are deprecated{'\n'}
|
||||
</Text>
|
||||
<Text color={theme.text.primary}>
|
||||
Would you like to install them at the user level?
|
||||
</Text>
|
||||
<Text color={theme.text.primary}>
|
||||
The extension definition will remain in your workspace directory.
|
||||
</Text>
|
||||
<Text color={theme.text.primary}>
|
||||
If you opt to skip, you can install them manually using the extensions
|
||||
install command.
|
||||
</Text>
|
||||
|
||||
<Box flexDirection="column" marginTop={1} marginLeft={2}>
|
||||
{workspaceExtensions.map((extension) => (
|
||||
<Text key={extension.config.name}>- {extension.config.name}</Text>
|
||||
))}
|
||||
</Box>
|
||||
<Box marginTop={1}>
|
||||
<RadioButtonSelect
|
||||
items={[
|
||||
{ label: 'Install all', value: 'migrate', key: 'migrate' },
|
||||
{ label: 'Skip', value: 'skip', key: 'skip' },
|
||||
]}
|
||||
onSelect={(value: string) => {
|
||||
if (value === 'migrate') {
|
||||
onMigrate();
|
||||
} else {
|
||||
onClose();
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -218,7 +218,7 @@ export const AgentSelectionStep = ({
|
||||
const renderAgentItem = (
|
||||
agent: {
|
||||
name: string;
|
||||
level: 'project' | 'user' | 'builtin' | 'session';
|
||||
level: 'project' | 'user' | 'builtin' | 'session' | 'extension';
|
||||
isBuiltin?: boolean;
|
||||
},
|
||||
index: number,
|
||||
|
||||
@@ -18,7 +18,6 @@ const mockUseUIState = vi.mocked(useUIState);
|
||||
const mockExtensions = [
|
||||
{ name: 'ext-one', version: '1.0.0', isActive: true },
|
||||
{ name: 'ext-two', version: '2.1.0', isActive: true },
|
||||
{ name: 'ext-disabled', version: '3.0.0', isActive: false },
|
||||
];
|
||||
|
||||
describe('<ExtensionsList />', () => {
|
||||
@@ -29,7 +28,6 @@ describe('<ExtensionsList />', () => {
|
||||
const mockUIState = (
|
||||
extensions: unknown[],
|
||||
extensionsUpdateState: Map<string, ExtensionUpdateState>,
|
||||
disabledExtensions: string[] = [],
|
||||
) => {
|
||||
mockUseUIState.mockReturnValue({
|
||||
commandContext: createMockCommandContext({
|
||||
@@ -37,13 +35,6 @@ describe('<ExtensionsList />', () => {
|
||||
config: {
|
||||
getExtensions: () => extensions,
|
||||
},
|
||||
settings: {
|
||||
merged: {
|
||||
extensions: {
|
||||
disabled: disabledExtensions,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
extensionsUpdateState,
|
||||
@@ -58,12 +49,11 @@ describe('<ExtensionsList />', () => {
|
||||
});
|
||||
|
||||
it('should render a list of extensions with their version and status', () => {
|
||||
mockUIState(mockExtensions, new Map(), ['ext-disabled']);
|
||||
mockUIState(mockExtensions, new Map());
|
||||
const { lastFrame } = render(<ExtensionsList />);
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('ext-one (v1.0.0) - active');
|
||||
expect(output).toContain('ext-two (v2.1.0) - active');
|
||||
expect(output).toContain('ext-disabled (v3.0.0) - disabled');
|
||||
});
|
||||
|
||||
it('should display "unknown state" if an extension has no update state', () => {
|
||||
|
||||
@@ -9,12 +9,10 @@ import { useUIState } from '../../contexts/UIStateContext.js';
|
||||
import { ExtensionUpdateState } from '../../state/extensions.js';
|
||||
|
||||
export const ExtensionsList = () => {
|
||||
const { commandContext, extensionsUpdateState } = useUIState();
|
||||
const allExtensions = commandContext.services.config!.getExtensions();
|
||||
const settings = commandContext.services.settings;
|
||||
const disabledExtensions = settings.merged.extensions?.disabled ?? [];
|
||||
const { extensionsUpdateState, commandContext } = useUIState();
|
||||
const extensions = commandContext.services.config?.getExtensions() || [];
|
||||
|
||||
if (allExtensions.length === 0) {
|
||||
if (extensions.length === 0) {
|
||||
return <Text>No extensions installed.</Text>;
|
||||
}
|
||||
|
||||
@@ -22,10 +20,11 @@ export const ExtensionsList = () => {
|
||||
<Box flexDirection="column" marginTop={1} marginBottom={1}>
|
||||
<Text>Installed extensions:</Text>
|
||||
<Box flexDirection="column" paddingLeft={2}>
|
||||
{allExtensions.map((ext) => {
|
||||
{extensions.map((ext) => {
|
||||
const state = extensionsUpdateState.get(ext.name);
|
||||
const isActive = !disabledExtensions.includes(ext.name);
|
||||
const isActive = ext.isActive;
|
||||
const activeString = isActive ? 'active' : 'disabled';
|
||||
const activeColor = isActive ? 'green' : 'grey';
|
||||
|
||||
let stateColor = 'gray';
|
||||
const stateText = state || 'unknown state';
|
||||
@@ -44,6 +43,7 @@ export const ExtensionsList = () => {
|
||||
break;
|
||||
case ExtensionUpdateState.UP_TO_DATE:
|
||||
case ExtensionUpdateState.NOT_UPDATABLE:
|
||||
case ExtensionUpdateState.UPDATED:
|
||||
stateColor = 'green';
|
||||
break;
|
||||
default:
|
||||
@@ -52,12 +52,22 @@ export const ExtensionsList = () => {
|
||||
}
|
||||
|
||||
return (
|
||||
<Box key={ext.name}>
|
||||
<Box key={ext.name} flexDirection="column" marginBottom={1}>
|
||||
<Text>
|
||||
<Text color="cyan">{`${ext.name} (v${ext.version})`}</Text>
|
||||
{` - ${activeString}`}
|
||||
<Text color={activeColor}>{` - ${activeString}`}</Text>
|
||||
{<Text color={stateColor}>{` (${stateText})`}</Text>}
|
||||
</Text>
|
||||
{ext.resolvedSettings && ext.resolvedSettings.length > 0 && (
|
||||
<Box flexDirection="column" paddingLeft={2}>
|
||||
<Text>settings:</Text>
|
||||
{ext.resolvedSettings.map((setting) => (
|
||||
<Text key={setting.name}>
|
||||
- {setting.name}: {setting.value}
|
||||
</Text>
|
||||
))}
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
})}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import { createContext, useContext } from 'react';
|
||||
import { type Key } from '../hooks/useKeypress.js';
|
||||
import { type IdeIntegrationNudgeResult } from '../IdeIntegrationNudge.js';
|
||||
import { type CommandMigrationNudgeResult } from '../CommandFormatMigrationNudge.js';
|
||||
import { type FolderTrustChoice } from '../components/FolderTrustDialog.js';
|
||||
import {
|
||||
type AuthType,
|
||||
@@ -47,15 +48,13 @@ export interface UIActions {
|
||||
setShellModeActive: (value: boolean) => void;
|
||||
vimHandleInput: (key: Key) => boolean;
|
||||
handleIdePromptComplete: (result: IdeIntegrationNudgeResult) => void;
|
||||
handleCommandMigrationComplete: (result: CommandMigrationNudgeResult) => void;
|
||||
handleFolderTrustSelect: (choice: FolderTrustChoice) => void;
|
||||
setConstrainHeight: (value: boolean) => void;
|
||||
onEscapePromptChange: (show: boolean) => void;
|
||||
refreshStatic: () => void;
|
||||
handleFinalSubmit: (value: string) => void;
|
||||
handleClearScreen: () => void;
|
||||
onWorkspaceMigrationDialogOpen: () => void;
|
||||
onWorkspaceMigrationDialogClose: () => void;
|
||||
handleProQuotaChoice: (choice: 'auth' | 'continue') => void;
|
||||
// Vision switch dialog
|
||||
handleVisionSwitchSelect: (outcome: VisionSwitchOutcome) => void;
|
||||
// Welcome back dialog
|
||||
|
||||
@@ -22,21 +22,13 @@ import type {
|
||||
AuthType,
|
||||
IdeContext,
|
||||
ApprovalMode,
|
||||
UserTierId,
|
||||
IdeInfo,
|
||||
FallbackIntent,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { DOMElement } from 'ink';
|
||||
import type { SessionStatsState } from '../contexts/SessionContext.js';
|
||||
import type { ExtensionUpdateState } from '../state/extensions.js';
|
||||
import type { UpdateObject } from '../utils/updateCheck.js';
|
||||
|
||||
export interface ProQuotaDialogRequest {
|
||||
failedModel: string;
|
||||
fallbackModel: string;
|
||||
resolve: (intent: FallbackIntent) => void;
|
||||
}
|
||||
|
||||
import { type UseHistoryManagerReturn } from '../hooks/useHistoryManager.js';
|
||||
import { type RestartReason } from '../hooks/useIdeTrustListener.js';
|
||||
|
||||
@@ -80,6 +72,8 @@ export interface UIState {
|
||||
suggestionsWidth: number;
|
||||
isInputActive: boolean;
|
||||
shouldShowIdePrompt: boolean;
|
||||
shouldShowCommandMigrationNudge: boolean;
|
||||
commandMigrationTomlFiles: string[];
|
||||
isFolderTrustDialogOpen: boolean;
|
||||
isTrustedFolder: boolean | undefined;
|
||||
constrainHeight: boolean;
|
||||
@@ -95,12 +89,7 @@ export interface UIState {
|
||||
historyRemountKey: number;
|
||||
messageQueue: string[];
|
||||
showAutoAcceptIndicator: ApprovalMode;
|
||||
showWorkspaceMigrationDialog: boolean;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
workspaceExtensions: any[]; // Extension[]
|
||||
// Quota-related state
|
||||
userTier: UserTierId | undefined;
|
||||
proQuotaRequest: ProQuotaDialogRequest | null;
|
||||
currentModel: string;
|
||||
contextFileNames: string[];
|
||||
errorCount: number;
|
||||
|
||||
@@ -520,6 +520,13 @@ export const useSlashCommandProcessor = (
|
||||
true,
|
||||
);
|
||||
}
|
||||
case 'stream_messages': {
|
||||
// stream_messages is only used in ACP/Zed integration mode
|
||||
// and should not be returned in interactive UI mode
|
||||
throw new Error(
|
||||
'stream_messages result type is not supported in interactive mode',
|
||||
);
|
||||
}
|
||||
default: {
|
||||
const unhandled: never = result;
|
||||
throw new Error(
|
||||
|
||||
51
packages/cli/src/ui/hooks/useCommandMigration.ts
Normal file
51
packages/cli/src/ui/hooks/useCommandMigration.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useEffect, useState } from 'react';
|
||||
import { Storage } from '@qwen-code/qwen-code-core';
|
||||
import { detectTomlCommands } from '../../services/command-migration-tool.js';
|
||||
import type { LoadedSettings } from '../../config/settings.js';
|
||||
|
||||
/**
|
||||
* Hook to detect TOML command files and manage migration nudge visibility.
|
||||
* Checks all command directories: workspace, user, and global levels.
|
||||
*/
|
||||
export function useCommandMigration(
|
||||
settings: LoadedSettings,
|
||||
storage: Storage,
|
||||
) {
|
||||
const [showMigrationNudge, setShowMigrationNudge] = useState(false);
|
||||
const [tomlFiles, setTomlFiles] = useState<string[]>([]);
|
||||
|
||||
useEffect(() => {
|
||||
const checkTomlCommands = async () => {
|
||||
const allFiles: string[] = [];
|
||||
|
||||
// Check workspace commands directory (.qwen/commands)
|
||||
const workspaceCommandsDir = storage.getProjectCommandsDir();
|
||||
const workspaceFiles = await detectTomlCommands(workspaceCommandsDir);
|
||||
allFiles.push(...workspaceFiles.map((f) => `workspace: ${f}`));
|
||||
|
||||
// Check user commands directory (~/.qwen/commands)
|
||||
const userCommandsDir = Storage.getUserCommandsDir();
|
||||
const userFiles = await detectTomlCommands(userCommandsDir);
|
||||
allFiles.push(...userFiles.map((f) => `user: ${f}`));
|
||||
|
||||
if (allFiles.length > 0) {
|
||||
setTomlFiles(allFiles);
|
||||
setShowMigrationNudge(true);
|
||||
}
|
||||
};
|
||||
|
||||
checkTomlCommands();
|
||||
}, [storage]);
|
||||
|
||||
return {
|
||||
showMigrationNudge,
|
||||
tomlFiles,
|
||||
setShowMigrationNudge,
|
||||
};
|
||||
}
|
||||
@@ -4,26 +4,21 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { vi } from 'vitest';
|
||||
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import * as fs from 'node:fs';
|
||||
import * as os from 'node:os';
|
||||
import * as path from 'node:path';
|
||||
import {
|
||||
ExtensionStorage,
|
||||
annotateActiveExtensions,
|
||||
loadExtension,
|
||||
} from '../../config/extension.js';
|
||||
import { createExtension } from '../../test-utils/createExtension.js';
|
||||
|
||||
import { useExtensionUpdates } from './useExtensionUpdates.js';
|
||||
import { QWEN_DIR, type GeminiCLIExtension } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
QWEN_DIR,
|
||||
type ExtensionManager,
|
||||
type Extension,
|
||||
type ExtensionUpdateInfo,
|
||||
ExtensionUpdateState,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { renderHook, waitFor } from '@testing-library/react';
|
||||
import { MessageType } from '../types.js';
|
||||
import { ExtensionEnablementManager } from '../../config/extensions/extensionEnablement.js';
|
||||
import {
|
||||
checkForAllExtensionUpdates,
|
||||
updateExtension,
|
||||
} from '../../config/extensions/update.js';
|
||||
import { ExtensionUpdateState } from '../state/extensions.js';
|
||||
|
||||
vi.mock('os', async (importOriginal) => {
|
||||
const mockedOs = await importOriginal<typeof os>();
|
||||
@@ -33,63 +28,85 @@ vi.mock('os', async (importOriginal) => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('../../config/extensions/update.js', () => ({
|
||||
checkForAllExtensionUpdates: vi.fn(),
|
||||
updateExtension: vi.fn(),
|
||||
}));
|
||||
function createMockExtension(overrides: Partial<Extension> = {}): Extension {
|
||||
return {
|
||||
id: 'test-extension-id',
|
||||
name: 'test-extension',
|
||||
version: '1.0.0',
|
||||
path: '/some/path',
|
||||
isActive: true,
|
||||
config: {
|
||||
name: 'test-extension',
|
||||
version: '1.0.0',
|
||||
},
|
||||
contextFiles: [],
|
||||
installMetadata: {
|
||||
type: 'git',
|
||||
source: 'https://some/repo',
|
||||
autoUpdate: false,
|
||||
},
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function createMockExtensionManager(
|
||||
extensions: Extension[],
|
||||
checkCallback?: (
|
||||
callback: (extensionName: string, state: ExtensionUpdateState) => void,
|
||||
) => Promise<void>,
|
||||
updateResult?: ExtensionUpdateInfo | undefined,
|
||||
): ExtensionManager {
|
||||
return {
|
||||
getLoadedExtensions: vi.fn(() => extensions),
|
||||
checkForAllExtensionUpdates: vi.fn(
|
||||
async (
|
||||
callback: (extensionName: string, state: ExtensionUpdateState) => void,
|
||||
) => {
|
||||
if (checkCallback) {
|
||||
await checkCallback(callback);
|
||||
}
|
||||
},
|
||||
),
|
||||
updateExtension: vi.fn(async () => updateResult),
|
||||
} as unknown as ExtensionManager;
|
||||
}
|
||||
|
||||
describe('useExtensionUpdates', () => {
|
||||
let tempHomeDir: string;
|
||||
let userExtensionsDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempHomeDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), 'gemini-cli-test-home-'),
|
||||
);
|
||||
tempHomeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'qwen-cli-test-home-'));
|
||||
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
|
||||
userExtensionsDir = path.join(tempHomeDir, QWEN_DIR, 'extensions');
|
||||
fs.mkdirSync(userExtensionsDir, { recursive: true });
|
||||
vi.mocked(checkForAllExtensionUpdates).mockReset();
|
||||
vi.mocked(updateExtension).mockReset();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(tempHomeDir, { recursive: true, force: true });
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should check for updates and log a message if an update is available', async () => {
|
||||
const extensions = [
|
||||
{
|
||||
name: 'test-extension',
|
||||
const extension = createMockExtension({
|
||||
name: 'test-extension',
|
||||
installMetadata: {
|
||||
type: 'git',
|
||||
version: '1.0.0',
|
||||
path: '/some/path',
|
||||
isActive: true,
|
||||
installMetadata: {
|
||||
type: 'git',
|
||||
source: 'https://some/repo',
|
||||
autoUpdate: false,
|
||||
},
|
||||
source: 'https://some/repo',
|
||||
autoUpdate: false,
|
||||
},
|
||||
];
|
||||
});
|
||||
const addItem = vi.fn();
|
||||
const cwd = '/test/cwd';
|
||||
|
||||
vi.mocked(checkForAllExtensionUpdates).mockImplementation(
|
||||
async (extensions, dispatch) => {
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'test-extension',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
const extensionManager = createMockExtensionManager(
|
||||
[extension],
|
||||
async (callback) => {
|
||||
callback('test-extension', ExtensionUpdateState.UPDATE_AVAILABLE);
|
||||
},
|
||||
);
|
||||
|
||||
renderHook(() =>
|
||||
useExtensionUpdates(extensions as GeminiCLIExtension[], addItem, cwd),
|
||||
);
|
||||
renderHook(() => useExtensionUpdates(extensionManager, addItem, cwd));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(addItem).toHaveBeenCalledWith(
|
||||
@@ -103,43 +120,32 @@ describe('useExtensionUpdates', () => {
|
||||
});
|
||||
|
||||
it('should check for updates and automatically update if autoUpdate is true', async () => {
|
||||
const extensionDir = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
const extension = createMockExtension({
|
||||
name: 'test-extension',
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo',
|
||||
type: 'git',
|
||||
source: 'https://some.git/repo',
|
||||
autoUpdate: true,
|
||||
},
|
||||
});
|
||||
const extension = annotateActiveExtensions(
|
||||
[loadExtension({ extensionDir, workspaceDir: tempHomeDir })!],
|
||||
tempHomeDir,
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
)[0];
|
||||
|
||||
const addItem = vi.fn();
|
||||
|
||||
vi.mocked(checkForAllExtensionUpdates).mockImplementation(
|
||||
async (extensions, dispatch) => {
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'test-extension',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
const extensionManager = createMockExtensionManager(
|
||||
[extension],
|
||||
async (callback) => {
|
||||
callback('test-extension', ExtensionUpdateState.UPDATE_AVAILABLE);
|
||||
},
|
||||
{
|
||||
originalVersion: '1.0.0',
|
||||
updatedVersion: '1.1.0',
|
||||
name: 'test-extension',
|
||||
},
|
||||
);
|
||||
|
||||
vi.mocked(updateExtension).mockResolvedValue({
|
||||
originalVersion: '1.0.0',
|
||||
updatedVersion: '1.1.0',
|
||||
name: '',
|
||||
});
|
||||
|
||||
renderHook(() => useExtensionUpdates([extension], addItem, tempHomeDir));
|
||||
renderHook(() =>
|
||||
useExtensionUpdates(extensionManager, addItem, tempHomeDir),
|
||||
);
|
||||
|
||||
await waitFor(
|
||||
() => {
|
||||
@@ -156,77 +162,64 @@ describe('useExtensionUpdates', () => {
|
||||
});
|
||||
|
||||
it('should batch update notifications for multiple extensions', async () => {
|
||||
const extensionDir1 = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
const extension1 = createMockExtension({
|
||||
id: 'test-extension-1-id',
|
||||
name: 'test-extension-1',
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo1',
|
||||
type: 'git',
|
||||
source: 'https://some.git/repo1',
|
||||
autoUpdate: true,
|
||||
},
|
||||
});
|
||||
const extensionDir2 = createExtension({
|
||||
extensionsDir: userExtensionsDir,
|
||||
const extension2 = createMockExtension({
|
||||
id: 'test-extension-2-id',
|
||||
name: 'test-extension-2',
|
||||
version: '2.0.0',
|
||||
installMetadata: {
|
||||
source: 'https://some.git/repo2',
|
||||
type: 'git',
|
||||
source: 'https://some.git/repo2',
|
||||
autoUpdate: true,
|
||||
},
|
||||
});
|
||||
|
||||
const extensions = annotateActiveExtensions(
|
||||
[
|
||||
loadExtension({
|
||||
extensionDir: extensionDir1,
|
||||
workspaceDir: tempHomeDir,
|
||||
})!,
|
||||
loadExtension({
|
||||
extensionDir: extensionDir2,
|
||||
workspaceDir: tempHomeDir,
|
||||
})!,
|
||||
],
|
||||
tempHomeDir,
|
||||
new ExtensionEnablementManager(ExtensionStorage.getUserExtensionsDir()),
|
||||
);
|
||||
|
||||
const addItem = vi.fn();
|
||||
let updateCallCount = 0;
|
||||
|
||||
vi.mocked(checkForAllExtensionUpdates).mockImplementation(
|
||||
async (extensions, dispatch) => {
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
const extensionManager = {
|
||||
getLoadedExtensions: vi.fn(() => [extension1, extension2]),
|
||||
checkForAllExtensionUpdates: vi.fn(
|
||||
async (
|
||||
callback: (
|
||||
extensionName: string,
|
||||
state: ExtensionUpdateState,
|
||||
) => void,
|
||||
) => {
|
||||
callback('test-extension-1', ExtensionUpdateState.UPDATE_AVAILABLE);
|
||||
callback('test-extension-2', ExtensionUpdateState.UPDATE_AVAILABLE);
|
||||
},
|
||||
),
|
||||
updateExtension: vi.fn(async () => {
|
||||
updateCallCount++;
|
||||
if (updateCallCount === 1) {
|
||||
return {
|
||||
originalVersion: '1.0.0',
|
||||
updatedVersion: '1.1.0',
|
||||
name: 'test-extension-1',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'test-extension-2',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
return {
|
||||
originalVersion: '2.0.0',
|
||||
updatedVersion: '2.1.0',
|
||||
name: 'test-extension-2',
|
||||
};
|
||||
}),
|
||||
} as unknown as ExtensionManager;
|
||||
|
||||
renderHook(() =>
|
||||
useExtensionUpdates(extensionManager, addItem, tempHomeDir),
|
||||
);
|
||||
|
||||
vi.mocked(updateExtension)
|
||||
.mockResolvedValueOnce({
|
||||
originalVersion: '1.0.0',
|
||||
updatedVersion: '1.1.0',
|
||||
name: '',
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
originalVersion: '2.0.0',
|
||||
updatedVersion: '2.1.0',
|
||||
name: '',
|
||||
});
|
||||
|
||||
renderHook(() => useExtensionUpdates(extensions, addItem, tempHomeDir));
|
||||
|
||||
await waitFor(
|
||||
() => {
|
||||
expect(addItem).toHaveBeenCalledTimes(2);
|
||||
@@ -250,60 +243,40 @@ describe('useExtensionUpdates', () => {
|
||||
});
|
||||
|
||||
it('should batch update notifications for multiple extensions with autoUpdate: false', async () => {
|
||||
const extensions = [
|
||||
{
|
||||
name: 'test-extension-1',
|
||||
const extension1 = createMockExtension({
|
||||
id: 'test-extension-1-id',
|
||||
name: 'test-extension-1',
|
||||
version: '1.0.0',
|
||||
installMetadata: {
|
||||
type: 'git',
|
||||
version: '1.0.0',
|
||||
path: '/some/path1',
|
||||
isActive: true,
|
||||
installMetadata: {
|
||||
type: 'git',
|
||||
source: 'https://some/repo1',
|
||||
autoUpdate: false,
|
||||
},
|
||||
source: 'https://some/repo1',
|
||||
autoUpdate: false,
|
||||
},
|
||||
{
|
||||
name: 'test-extension-2',
|
||||
});
|
||||
const extension2 = createMockExtension({
|
||||
id: 'test-extension-2-id',
|
||||
name: 'test-extension-2',
|
||||
version: '2.0.0',
|
||||
installMetadata: {
|
||||
type: 'git',
|
||||
version: '2.0.0',
|
||||
path: '/some/path2',
|
||||
isActive: true,
|
||||
installMetadata: {
|
||||
type: 'git',
|
||||
source: 'https://some/repo2',
|
||||
autoUpdate: false,
|
||||
},
|
||||
source: 'https://some/repo2',
|
||||
autoUpdate: false,
|
||||
},
|
||||
];
|
||||
});
|
||||
|
||||
const addItem = vi.fn();
|
||||
const cwd = '/test/cwd';
|
||||
|
||||
vi.mocked(checkForAllExtensionUpdates).mockImplementation(
|
||||
async (extensions, dispatch) => {
|
||||
dispatch({ type: 'BATCH_CHECK_START' });
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'test-extension-1',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
const extensionManager = createMockExtensionManager(
|
||||
[extension1, extension2],
|
||||
async (callback) => {
|
||||
callback('test-extension-1', ExtensionUpdateState.UPDATE_AVAILABLE);
|
||||
await new Promise((r) => setTimeout(r, 50));
|
||||
dispatch({
|
||||
type: 'SET_STATE',
|
||||
payload: {
|
||||
name: 'test-extension-2',
|
||||
state: ExtensionUpdateState.UPDATE_AVAILABLE,
|
||||
},
|
||||
});
|
||||
dispatch({ type: 'BATCH_CHECK_END' });
|
||||
callback('test-extension-2', ExtensionUpdateState.UPDATE_AVAILABLE);
|
||||
},
|
||||
);
|
||||
|
||||
renderHook(() =>
|
||||
useExtensionUpdates(extensions as GeminiCLIExtension[], addItem, cwd),
|
||||
);
|
||||
renderHook(() => useExtensionUpdates(extensionManager, addItem, cwd));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(addItem).toHaveBeenCalledTimes(1);
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { GeminiCLIExtension } from '@qwen-code/qwen-code-core';
|
||||
import type { ExtensionManager } from '@qwen-code/qwen-code-core';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import {
|
||||
ExtensionUpdateState,
|
||||
@@ -14,11 +14,6 @@ import {
|
||||
import { useCallback, useEffect, useMemo, useReducer } from 'react';
|
||||
import type { UseHistoryManagerReturn } from './useHistoryManager.js';
|
||||
import { MessageType, type ConfirmationRequest } from '../types.js';
|
||||
import {
|
||||
checkForAllExtensionUpdates,
|
||||
updateExtension,
|
||||
} from '../../config/extensions/update.js';
|
||||
import { requestConsentInteractive } from '../../config/extension.js';
|
||||
import { checkExhaustive } from '../../utils/checks.js';
|
||||
|
||||
type ConfirmationRequestWrapper = {
|
||||
@@ -45,15 +40,7 @@ function confirmationRequestsReducer(
|
||||
}
|
||||
}
|
||||
|
||||
export const useExtensionUpdates = (
|
||||
extensions: GeminiCLIExtension[],
|
||||
addItem: UseHistoryManagerReturn['addItem'],
|
||||
cwd: string,
|
||||
) => {
|
||||
const [extensionsUpdateState, dispatchExtensionStateUpdate] = useReducer(
|
||||
extensionUpdatesReducer,
|
||||
initialExtensionUpdatesState,
|
||||
);
|
||||
export const useConfirmUpdateRequests = () => {
|
||||
const [
|
||||
confirmUpdateExtensionRequests,
|
||||
dispatchConfirmUpdateExtensionRequests,
|
||||
@@ -78,15 +65,52 @@ export const useExtensionUpdates = (
|
||||
},
|
||||
[dispatchConfirmUpdateExtensionRequests],
|
||||
);
|
||||
return {
|
||||
addConfirmUpdateExtensionRequest,
|
||||
confirmUpdateExtensionRequests,
|
||||
dispatchConfirmUpdateExtensionRequests,
|
||||
};
|
||||
};
|
||||
|
||||
export const useExtensionUpdates = (
|
||||
extensionManager: ExtensionManager,
|
||||
addItem: UseHistoryManagerReturn['addItem'],
|
||||
cwd: string,
|
||||
) => {
|
||||
const [extensionsUpdateState, dispatchExtensionStateUpdate] = useReducer(
|
||||
extensionUpdatesReducer,
|
||||
initialExtensionUpdatesState,
|
||||
);
|
||||
const extensions = extensionManager.getLoadedExtensions();
|
||||
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
await checkForAllExtensionUpdates(
|
||||
extensions,
|
||||
dispatchExtensionStateUpdate,
|
||||
const extensionsToCheck = extensions.filter((extension) => {
|
||||
const currentStatus = extensionsUpdateState.extensionStatuses.get(
|
||||
extension.name,
|
||||
);
|
||||
if (!currentStatus) return true;
|
||||
const currentState = currentStatus.status;
|
||||
return !currentState || currentState === ExtensionUpdateState.UNKNOWN;
|
||||
});
|
||||
if (extensionsToCheck.length === 0) return;
|
||||
dispatchExtensionStateUpdate({ type: 'BATCH_CHECK_START' });
|
||||
await extensionManager.checkForAllExtensionUpdates(
|
||||
(extensionName: string, state: ExtensionUpdateState) => {
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extensionName, state },
|
||||
});
|
||||
},
|
||||
);
|
||||
dispatchExtensionStateUpdate({ type: 'BATCH_CHECK_END' });
|
||||
})();
|
||||
}, [extensions, extensions.length, dispatchExtensionStateUpdate]);
|
||||
}, [
|
||||
extensions,
|
||||
extensionManager,
|
||||
extensionsUpdateState.extensionStatuses,
|
||||
dispatchExtensionStateUpdate,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
if (extensionsUpdateState.batchChecksInProgress > 0) {
|
||||
@@ -113,17 +137,17 @@ export const useExtensionUpdates = (
|
||||
});
|
||||
|
||||
if (extension.installMetadata?.autoUpdate) {
|
||||
updateExtension(
|
||||
extension,
|
||||
cwd,
|
||||
(description) =>
|
||||
requestConsentInteractive(
|
||||
description,
|
||||
addConfirmUpdateExtensionRequest,
|
||||
),
|
||||
currentState.status,
|
||||
dispatchExtensionStateUpdate,
|
||||
)
|
||||
extensionManager
|
||||
.updateExtension(
|
||||
extension,
|
||||
currentState.status,
|
||||
(extensionName, state) => {
|
||||
dispatchExtensionStateUpdate({
|
||||
type: 'SET_STATE',
|
||||
payload: { name: extensionName, state },
|
||||
});
|
||||
},
|
||||
)
|
||||
.then((result) => {
|
||||
if (!result) return;
|
||||
addItem(
|
||||
@@ -157,13 +181,7 @@ export const useExtensionUpdates = (
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
}, [
|
||||
extensions,
|
||||
extensionsUpdateState,
|
||||
addConfirmUpdateExtensionRequest,
|
||||
addItem,
|
||||
cwd,
|
||||
]);
|
||||
}, [extensions, extensionManager, extensionsUpdateState, addItem, cwd]);
|
||||
|
||||
const extensionsUpdateStateComputed = useMemo(() => {
|
||||
const result = new Map<string, ExtensionUpdateState>();
|
||||
@@ -180,7 +198,5 @@ export const useExtensionUpdates = (
|
||||
extensionsUpdateState: extensionsUpdateStateComputed,
|
||||
extensionsUpdateStateInternal: extensionsUpdateState.extensionStatuses,
|
||||
dispatchExtensionStateUpdate,
|
||||
confirmUpdateExtensionRequests,
|
||||
addConfirmUpdateExtensionRequest,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1323,7 +1323,7 @@ describe('useGeminiStream', () => {
|
||||
it('should call parseAndFormatApiError with the correct authType on stream initialization failure', async () => {
|
||||
// 1. Setup
|
||||
const mockError = new Error('Rate limit exceeded');
|
||||
const mockAuthType = AuthType.LOGIN_WITH_GOOGLE;
|
||||
const mockAuthType = AuthType.USE_VERTEX_AI;
|
||||
mockParseAndFormatApiError.mockClear();
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
@@ -1374,9 +1374,6 @@ describe('useGeminiStream', () => {
|
||||
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
|
||||
'Rate limit exceeded',
|
||||
mockAuthType,
|
||||
undefined,
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -2493,9 +2490,6 @@ describe('useGeminiStream', () => {
|
||||
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
|
||||
{ message: 'Test error' },
|
||||
expect.any(String),
|
||||
undefined,
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -26,7 +26,6 @@ import {
|
||||
GitService,
|
||||
UnauthorizedError,
|
||||
UserPromptEvent,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
logConversationFinishedEvent,
|
||||
ConversationFinishedEvent,
|
||||
ApprovalMode,
|
||||
@@ -527,10 +526,15 @@ export const useGeminiStream = (
|
||||
return currentThoughtBuffer;
|
||||
}
|
||||
|
||||
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
|
||||
const pendingType = pendingHistoryItemRef.current?.type;
|
||||
const isPendingThought =
|
||||
pendingType === 'gemini_thought' ||
|
||||
pendingType === 'gemini_thought_content';
|
||||
|
||||
// If we're not already showing a thought, start a new one
|
||||
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
|
||||
if (!isPendingThought) {
|
||||
// If there's a pending non-thought item, finalize it first
|
||||
if (pendingHistoryItemRef.current) {
|
||||
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
|
||||
@@ -538,11 +542,37 @@ export const useGeminiStream = (
|
||||
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
|
||||
}
|
||||
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought',
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
// Split large thought messages for better rendering performance (same rationale
|
||||
// as regular content streaming). This helps avoid terminal flicker caused by
|
||||
// constantly re-rendering an ever-growing "pending" block.
|
||||
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
|
||||
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
|
||||
isPendingThought && pendingType === 'gemini_thought_content'
|
||||
? 'gemini_thought_content'
|
||||
: 'gemini_thought';
|
||||
|
||||
if (splitPoint === newThoughtBuffer.length) {
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: nextPendingType,
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
} else {
|
||||
const beforeText = newThoughtBuffer.substring(0, splitPoint);
|
||||
const afterText = newThoughtBuffer.substring(splitPoint);
|
||||
addItem(
|
||||
{
|
||||
type: nextPendingType,
|
||||
text: beforeText,
|
||||
},
|
||||
userMessageTimestamp,
|
||||
);
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought_content',
|
||||
text: afterText,
|
||||
});
|
||||
newThoughtBuffer = afterText;
|
||||
}
|
||||
|
||||
// Also update the thought state for the loading indicator
|
||||
mergeThought(eventValue);
|
||||
@@ -600,9 +630,6 @@ export const useGeminiStream = (
|
||||
text: parseAndFormatApiError(
|
||||
eventValue.error,
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
),
|
||||
},
|
||||
userMessageTimestamp,
|
||||
@@ -654,6 +681,9 @@ export const useGeminiStream = (
|
||||
'Response stopped due to image safety violations.',
|
||||
[FinishReason.UNEXPECTED_TOOL_CALL]:
|
||||
'Response stopped due to unexpected tool call.',
|
||||
[FinishReason.IMAGE_PROHIBITED_CONTENT]:
|
||||
'Response stopped due to image prohibited content.',
|
||||
[FinishReason.NO_IMAGE]: 'Response stopped due to no image.',
|
||||
};
|
||||
|
||||
const message = finishReasonMessages[finishReason];
|
||||
@@ -770,11 +800,17 @@ export const useGeminiStream = (
|
||||
for await (const event of stream) {
|
||||
switch (event.type) {
|
||||
case ServerGeminiEventType.Thought:
|
||||
thoughtBuffer = handleThoughtEvent(
|
||||
event.value,
|
||||
thoughtBuffer,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
// If the thought has a subject, it's a discrete status update rather than
|
||||
// a streamed textual thought, so we update the thought state directly.
|
||||
if (event.value.subject) {
|
||||
setThought(event.value);
|
||||
} else {
|
||||
thoughtBuffer = handleThoughtEvent(
|
||||
event.value,
|
||||
thoughtBuffer,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
}
|
||||
break;
|
||||
case ServerGeminiEventType.Content:
|
||||
geminiMessageBuffer = handleContentEvent(
|
||||
@@ -845,6 +881,7 @@ export const useGeminiStream = (
|
||||
handleMaxSessionTurnsEvent,
|
||||
handleSessionTokenLimitExceededEvent,
|
||||
handleCitationEvent,
|
||||
setThought,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -987,9 +1024,6 @@ export const useGeminiStream = (
|
||||
text: parseAndFormatApiError(
|
||||
getErrorMessage(error) || 'Unknown error',
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
),
|
||||
},
|
||||
userMessageTimestamp,
|
||||
|
||||
@@ -8,19 +8,22 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { renderHook, act } from '@testing-library/react';
|
||||
import { useLoadingIndicator } from './useLoadingIndicator.js';
|
||||
import { StreamingState } from '../types.js';
|
||||
import {
|
||||
WITTY_LOADING_PHRASES,
|
||||
PHRASE_CHANGE_INTERVAL_MS,
|
||||
} from './usePhraseCycler.js';
|
||||
import { PHRASE_CHANGE_INTERVAL_MS } from './usePhraseCycler.js';
|
||||
import * as i18n from '../../i18n/index.js';
|
||||
|
||||
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
|
||||
|
||||
describe('useLoadingIndicator', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
|
||||
vi.spyOn(i18n, 't').mockImplementation((key) => key);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers(); // Restore real timers after each test
|
||||
act(() => vi.runOnlyPendingTimers);
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should initialize with default values when Idle', () => {
|
||||
@@ -28,9 +31,7 @@ describe('useLoadingIndicator', () => {
|
||||
useLoadingIndicator(StreamingState.Idle),
|
||||
);
|
||||
expect(result.current.elapsedTime).toBe(0);
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
});
|
||||
|
||||
it('should reflect values when Responding', async () => {
|
||||
@@ -40,18 +41,14 @@ describe('useLoadingIndicator', () => {
|
||||
|
||||
// Initial state before timers advance
|
||||
expect(result.current.elapsedTime).toBe(0);
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
|
||||
await act(async () => {
|
||||
await vi.advanceTimersByTimeAsync(PHRASE_CHANGE_INTERVAL_MS + 1);
|
||||
});
|
||||
|
||||
// Phrase should cycle if PHRASE_CHANGE_INTERVAL_MS has passed
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
});
|
||||
|
||||
it('should show waiting phrase and retain elapsedTime when WaitingForConfirmation', async () => {
|
||||
@@ -104,9 +101,7 @@ describe('useLoadingIndicator', () => {
|
||||
rerender({ streamingState: StreamingState.Responding });
|
||||
});
|
||||
expect(result.current.elapsedTime).toBe(0); // Should reset
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
|
||||
await act(async () => {
|
||||
await vi.advanceTimersByTimeAsync(1000);
|
||||
@@ -130,9 +125,7 @@ describe('useLoadingIndicator', () => {
|
||||
});
|
||||
|
||||
expect(result.current.elapsedTime).toBe(0);
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
|
||||
// Timer should not advance
|
||||
await act(async () => {
|
||||
|
||||
@@ -8,13 +8,17 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { renderHook, act } from '@testing-library/react';
|
||||
import {
|
||||
usePhraseCycler,
|
||||
WITTY_LOADING_PHRASES,
|
||||
PHRASE_CHANGE_INTERVAL_MS,
|
||||
} from './usePhraseCycler.js';
|
||||
import * as i18n from '../../i18n/index.js';
|
||||
|
||||
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
|
||||
|
||||
describe('usePhraseCycler', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
|
||||
vi.spyOn(i18n, 't').mockImplementation((key) => key);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -23,7 +27,7 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
it('should initialize with a witty phrase when not active and not waiting', () => {
|
||||
const { result } = renderHook(() => usePhraseCycler(false, false));
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should show "Waiting for user confirmation..." when isWaiting is true', () => {
|
||||
@@ -47,35 +51,30 @@ describe('usePhraseCycler', () => {
|
||||
it('should cycle through witty phrases when isActive is true and not waiting', () => {
|
||||
const { result } = renderHook(() => usePhraseCycler(true, false));
|
||||
// Initial phrase should be one of the witty phrases
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
const _initialPhrase = result.current;
|
||||
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
});
|
||||
// Phrase should change and be one of the witty phrases
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
|
||||
const _secondPhrase = result.current;
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
});
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should reset to a witty phrase when isActive becomes true after being false (and not waiting)', () => {
|
||||
// Ensure there are at least two phrases for this test to be meaningful.
|
||||
if (WITTY_LOADING_PHRASES.length < 2) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Mock Math.random to make the test deterministic.
|
||||
let callCount = 0;
|
||||
vi.spyOn(Math, 'random').mockImplementation(() => {
|
||||
// Cycle through 0, 1, 0, 1, ...
|
||||
const val = callCount % 2;
|
||||
callCount++;
|
||||
return val / WITTY_LOADING_PHRASES.length;
|
||||
return val / MOCK_WITTY_PHRASES.length;
|
||||
});
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
@@ -86,9 +85,9 @@ describe('usePhraseCycler', () => {
|
||||
// Activate
|
||||
rerender({ isActive: true, isWaiting: false });
|
||||
const firstActivePhrase = result.current;
|
||||
expect(WITTY_LOADING_PHRASES).toContain(firstActivePhrase);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(firstActivePhrase);
|
||||
// With our mock, this should be the first phrase.
|
||||
expect(firstActivePhrase).toBe(WITTY_LOADING_PHRASES[0]);
|
||||
expect(firstActivePhrase).toBe(MOCK_WITTY_PHRASES[0]);
|
||||
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
@@ -96,18 +95,18 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
// Phrase should change to the second phrase.
|
||||
expect(result.current).not.toBe(firstActivePhrase);
|
||||
expect(result.current).toBe(WITTY_LOADING_PHRASES[1]);
|
||||
expect(result.current).toBe(MOCK_WITTY_PHRASES[1]);
|
||||
|
||||
// Set to inactive - should reset to the default initial phrase
|
||||
rerender({ isActive: false, isWaiting: false });
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
|
||||
// Set back to active - should pick a random witty phrase (which our mock controls)
|
||||
act(() => {
|
||||
rerender({ isActive: true, isWaiting: false });
|
||||
});
|
||||
// The random mock will now return 0, so it should be the first phrase again.
|
||||
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
|
||||
expect(result.current).toBe(MOCK_WITTY_PHRASES[0]);
|
||||
});
|
||||
|
||||
it('should clear phrase interval on unmount when active', () => {
|
||||
@@ -148,7 +147,7 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
rerender({ isActive: true, isWaiting: false, customPhrases: undefined });
|
||||
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should fall back to witty phrases if custom phrases are an empty array', () => {
|
||||
@@ -164,7 +163,7 @@ describe('usePhraseCycler', () => {
|
||||
},
|
||||
);
|
||||
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should reset to a witty phrase when transitioning from waiting to active', () => {
|
||||
@@ -174,16 +173,13 @@ describe('usePhraseCycler', () => {
|
||||
);
|
||||
|
||||
const _initialPhrase = result.current;
|
||||
expect(WITTY_LOADING_PHRASES).toContain(_initialPhrase);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(_initialPhrase);
|
||||
|
||||
// Cycle to a different phrase (potentially)
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
});
|
||||
if (WITTY_LOADING_PHRASES.length > 1) {
|
||||
// This check is probabilistic with random selection
|
||||
}
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
|
||||
// Go to waiting state
|
||||
rerender({ isActive: false, isWaiting: true });
|
||||
@@ -191,6 +187,6 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
// Go back to active cycling - should pick a random witty phrase
|
||||
rerender({ isActive: true, isWaiting: false });
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
});
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user