Compare commits

..

1 Commits

Author SHA1 Message Date
奕桁
5a328e4586 fix release workflow 2025-08-01 17:08:30 +08:00
346 changed files with 21924 additions and 36651 deletions

View File

@@ -5,19 +5,19 @@ steps:
entrypoint: 'npm'
args: ['install']
# Step 2: Authenticate for Docker (so we can push images to the artifact registry)
# Step 4: Authenticate for Docker (so we can push images to the artifact registry)
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Authenticate docker'
entrypoint: 'npm'
args: ['run', 'auth']
# Step 3: Build workspace packages
# Step 5: Build workspace packages
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Build packages'
entrypoint: 'npm'
args: ['run', 'build:packages']
# Step 4: Determine Docker Image Tag
# Step 6: Determine Docker Image Tag
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Determine Docker Image Tag'
entrypoint: 'bash'
@@ -39,7 +39,7 @@ steps:
echo "Determined image tag: $$FINAL_TAG"
echo "$$FINAL_TAG" > /workspace/image_tag.txt
# Step 5: Build sandbox container image
# Step 7: Build sandbox container image
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Build sandbox Docker image'
entrypoint: 'bash'
@@ -48,7 +48,7 @@ steps:
- |
export GEMINI_SANDBOX_IMAGE_TAG=$$(cat /workspace/image_tag.txt)
echo "Using Docker image tag for build: $$GEMINI_SANDBOX_IMAGE_TAG"
npm run build:sandbox -- --output-file /workspace/final_image_uri.txt
npm run build:sandbox
env:
- 'GEMINI_SANDBOX=$_CONTAINER_TOOL'

View File

@@ -1,5 +1,5 @@
name: Bug Report
description: Report a bug to help us improve Qwen Code
description: Report a bug to help us improve Gemini CLI
labels: ['kind/bug', 'status/need-triage']
body:
- type: markdown
@@ -8,7 +8,7 @@ body:
> [!IMPORTANT]
> Thanks for taking the time to fill out this bug report!
>
> Please search **[existing issues](https://github.com/QwenLM/qwen-code/issues)** to see if an issue already exists for the bug you encountered.
> Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if an issue already exists for the bug you encountered.
- type: textarea
id: problem
@@ -29,12 +29,12 @@ body:
id: info
attributes:
label: Client information
description: Please paste the full text from the `/about` command run from Qwen Code. Also include which platform (macOS, Windows, Linux).
description: Please paste the full text from the `/about` command run from Gemini CLI. Also include which platform (MacOS, Windows, Linux).
value: |
<details>
```console
$ qwen /about
$ gemini /about
# paste output here
```
@@ -46,7 +46,7 @@ body:
id: login-info
attributes:
label: Login information
description: Describe how you are logging in (e.g., API Config).
description: Describe how you are logging in (e.g., Google Account, API key).
- type: textarea
id: additional-context

View File

@@ -8,7 +8,7 @@ body:
> [!IMPORTANT]
> Thanks for taking the time to suggest an enhancement!
>
> Please search **[existing issues](https://github.com/QwenLM/qwen-code/issues)** to see if a similar feature has already been requested.
> Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if a similar feature has already been requested.
- type: textarea
id: feature

View File

@@ -17,9 +17,6 @@ inputs:
node_version:
description: 'Node.js version for context in messages'
required: true
os:
description: 'The os for context in messages'
required: true
github_token:
description: 'GitHub token for posting comments'
required: true
@@ -94,7 +91,7 @@ runs:
echo "</details>" >> "$comment_file"
echo "" >> "$comment_file"
echo "_For detailed HTML reports, please see the 'coverage-reports-${{ inputs.node_version }}-${{ inputs.os }}' artifact from the main CI run._" >> "$comment_file"
echo "_For detailed HTML reports, please see the 'coverage-reports-${{ inputs.node_version }}' artifact from the main CI run._" >> "$comment_file"
- name: Post Coverage Comment
uses: thollander/actions-comment-pull-request@v3

View File

@@ -4,7 +4,7 @@
## Dive Deeper
<!-- more thoughts and in-depth discussion here -->
<!-- more thoughts and in depth discussion here -->
## Reviewer Test Plan

View File

@@ -24,7 +24,7 @@ process_pr() {
ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "")
fi
# Pattern 2: Closes/Fixes/Resolves patterns (case-insensitive)
# Pattern 2: Closes/Fixes/Resolves patterns (case insensitive)
if [ -z "$ISSUE_NUMBER" ]; then
ISSUE_NUMBER=$(echo "$PR_BODY" | grep -iE '(closes?|fixes?|resolves?) #[0-9]+' | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "")
fi

View File

@@ -1,6 +1,6 @@
# .github/workflows/ci.yml
name: Qwen Code CI
name: Gemini CLI CI
on:
push:
@@ -10,19 +10,22 @@ on:
merge_group:
jobs:
lint:
name: Lint
build:
name: Build and Lint
runs-on: ubuntu-latest
permissions:
contents: read # For checkout
strategy:
matrix:
node-version: [20.x, 22.x, 24.x]
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Node.js
- name: Set up Node.js ${{ matrix.node-version }}
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version-file: '.nvmrc'
node-version: ${{ matrix.node-version }}
cache: 'npm'
- name: Install dependencies
@@ -42,18 +45,24 @@ jobs:
- name: Run type check
run: npm run typecheck
- name: Upload build artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: build-artifacts-${{ matrix.node-version }}
path: |
packages/*/dist
package-lock.json # Only upload dist and lockfile
test:
name: Test
runs-on: ${{ matrix.os }}
needs: lint
runs-on: ubuntu-latest
needs: build # This job depends on the 'build' job
permissions:
contents: read
checks: write
pull-requests: write
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
node-version: [20.x, 22.x, 24.x]
node-version: [20.x, 22.x, 24.x] # Should match the build job's matrix
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
@@ -64,20 +73,26 @@ jobs:
node-version: ${{ matrix.node-version }}
cache: 'npm'
- name: Build project
run: npm run build
- name: Download build artifacts
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
with:
name: build-artifacts-${{ matrix.node-version }}
path: . # Download to the root, this will include package-lock.json and packages/*/dist
# Restore/create package structure for dist folders if necessary.
# The download-artifact action with path: . should place them correctly if the
# upload paths were relative to the workspace root.
# Example: if uploaded `packages/cli/dist`, it will be at `./packages/cli/dist`.
- name: Install dependencies for testing
run: npm ci # Install fresh dependencies using the downloaded package-lock.json
- name: Run tests and generate reports
run: npm run test:ci
env:
NO_COLOR: true
run: NO_COLOR=true npm run test:ci
- name: Publish Test Report (for non-forks)
if: always() && (github.event.pull_request.head.repo.full_name == github.repository)
uses: dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3 # v2
uses: dorny/test-reporter@890a17cecf52a379fc869ab770a71657660be727 # v2
with:
name: Test Results (Node ${{ matrix.node-version }})
path: packages/*/junit.xml
@@ -88,14 +103,14 @@ jobs:
if: always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository)
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: test-results-fork-${{ matrix.node-version }}-${{ matrix.os }}
name: test-results-fork-${{ matrix.node-version }}
path: packages/*/junit.xml
- name: Upload coverage reports
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }}
name: coverage-reports-${{ matrix.node-version }}
path: packages/*/coverage
post_coverage_comment:
@@ -109,9 +124,7 @@ jobs:
pull-requests: write # For commenting
strategy:
matrix:
# Reduce noise by only posting the comment once
os: [ubuntu-latest]
node-version: [22.x]
node-version: [22.x] # Reduce noise by only posting the comment once
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
@@ -119,7 +132,7 @@ jobs:
- name: Download coverage reports artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
with:
name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }}
name: coverage-reports-${{ matrix.node-version }}
path: coverage_artifact # Download to a specific directory
- name: Post Coverage Comment using Composite Action
@@ -130,24 +143,4 @@ jobs:
cli_full_text_summary_file: coverage_artifact/cli/coverage/full-text-summary.txt
core_full_text_summary_file: coverage_artifact/core/coverage/full-text-summary.txt
node_version: ${{ matrix.node-version }}
os: ${{ matrix.os }}
github_token: ${{ secrets.GITHUB_TOKEN }}
codeql:
name: CodeQL
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Initialize CodeQL
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3
with:
languages: javascript
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3

View File

@@ -8,21 +8,20 @@ on:
merge_group:
jobs:
e2e-test-linux:
name: E2E Test (Linux) - ${{ matrix.sandbox }}
e2e-test:
name: E2E Test - ${{ matrix.sandbox }}
runs-on: ubuntu-latest
strategy:
matrix:
sandbox: [sandbox:none, sandbox:docker]
node-version: [20.x, 22.x, 24.x]
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Node.js ${{ matrix.node-version }}
- name: Set up Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version: ${{ matrix.node-version }}
node-version: 20.x
cache: 'npm'
- name: Install dependencies
@@ -49,29 +48,3 @@ jobs:
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
run: npm run test:integration:${{ matrix.sandbox }} -- --verbose --keep-output
e2e-test-macos:
name: E2E Test - macOS
runs-on: macos-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version: 20.x
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Build project
run: npm run build
- name: Run E2E tests
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
run: npm run test:e2e

View File

@@ -1,4 +1,4 @@
name: Qwen Automated Issue Triage
name: Gemini Automated Issue Triage
on:
issues:
@@ -7,7 +7,7 @@ on:
jobs:
triage-issue:
timeout-minutes: 5
if: ${{ github.repository == 'QwenLM/qwen-code' }}
if: ${{ github.repository == 'google-gemini/gemini-cli' }}
permissions:
issues: write
contents: read
@@ -17,13 +17,22 @@ jobs:
cancel-in-progress: true
runs-on: ubuntu-latest
steps:
- name: Run Qwen Issue Triage
uses: QwenLM/qwen-code-action@5fd6818d04d64e87d255ee4d5f77995e32fbf4c2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Generate GitHub App Token
id: generate_token
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2
with:
version: 0.0.4
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
app-id: ${{ secrets.APP_ID }}
private-key: ${{ secrets.PRIVATE_KEY }}
- name: Run Gemini Issue Triage
uses: google-gemini/gemini-cli-action@df3f890f003d28c60a2a09d2c29e0126e4d1e2ff
env:
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
with:
version: 0.1.8-rc.0
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }}
OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }}
settings_json: |
{
"coreTools": [
@@ -31,6 +40,10 @@ jobs:
"run_shell_command(gh issue edit)",
"run_shell_command(gh issue list)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
},
"sandbox": false
}
prompt: |

View File

@@ -1,4 +1,4 @@
name: Qwen Scheduled Issue Triage
name: Gemini Scheduled Issue Triage
on:
schedule:
@@ -8,17 +8,24 @@ on:
jobs:
triage-issues:
timeout-minutes: 10
if: ${{ github.repository == 'QwenLM/qwen-code' }}
if: ${{ github.repository == 'google-gemini/gemini-cli' }}
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
issues: write
steps:
- name: Generate GitHub App Token
id: generate_token
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2
with:
app-id: ${{ secrets.APP_ID }}
private-key: ${{ secrets.PRIVATE_KEY }}
- name: Find untriaged issues
id: find_issues
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
run: |
echo "🔍 Finding issues without labels..."
NO_LABEL_ISSUES=$(gh issue list --repo ${{ github.repository }} --search "is:open is:issue no:label" --json number,title,body)
@@ -34,18 +41,18 @@ jobs:
echo "✅ Found $(echo "$ISSUES" | jq 'length') issues to triage! 🎯"
- name: Run Qwen Issue Triage
- name: Run Gemini Issue Triage
if: steps.find_issues.outputs.issues_to_triage != '[]'
uses: QwenLM/qwen-code-action@5fd6818d04d64e87d255ee4d5f77995e32fbf4c2
uses: google-gemini/gemini-cli-action@df3f890f003d28c60a2a09d2c29e0126e4d1e2ff
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
ISSUES_TO_TRIAGE: ${{ steps.find_issues.outputs.issues_to_triage }}
REPOSITORY: ${{ github.repository }}
with:
version: 0.0.4
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
version: 0.1.8-rc.0
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }}
OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }}
settings_json: |
{
"coreTools": [
@@ -54,6 +61,10 @@ jobs:
"run_shell_command(gh issue edit)",
"run_shell_command(gh issue list)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
},
"sandbox": false
}
prompt: |

View File

@@ -1,4 +1,4 @@
name: Qwen Scheduled PR Triage 🚀
name: Gemini Scheduled PR Triage 🚀
on:
schedule:
@@ -8,7 +8,7 @@ on:
jobs:
audit-prs:
timeout-minutes: 15
if: ${{ github.repository == 'QwenLM/qwen-code' }}
if: ${{ github.repository == 'google-gemini/gemini-cli' }}
permissions:
contents: read
id-token: write
@@ -21,9 +21,16 @@ jobs:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Generate GitHub App Token
id: generate_token
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2
with:
app-id: ${{ secrets.APP_ID }}
private-key: ${{ secrets.PRIVATE_KEY }}
- name: Run PR Triage Script
id: run_triage
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
GITHUB_REPOSITORY: ${{ github.repository }}
run: ./.github/scripts/pr-triage.sh

View File

@@ -1,191 +0,0 @@
name: 🧐 Qwen Pull Request Review
on:
pull_request:
types: [opened]
pull_request_review_comment:
types: [created]
pull_request_review:
types: [submitted]
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to review'
required: true
type: number
jobs:
review-pr:
if: >
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'pull_request' && github.event.action == 'opened') ||
(github.event_name == 'issue_comment' &&
github.event.issue.pull_request &&
contains(github.event.comment.body, '@qwen /review') &&
(github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR')) ||
(github.event_name == 'pull_request_review_comment' &&
contains(github.event.comment.body, '@qwen /review') &&
(github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR')) ||
(github.event_name == 'pull_request_review' &&
contains(github.event.review.body, '@qwen /review') &&
(github.event.review.author_association == 'OWNER' ||
github.event.review.author_association == 'MEMBER' ||
github.event.review.author_association == 'COLLABORATOR'))
timeout-minutes: 15
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
pull-requests: write
issues: write
steps:
- name: Checkout PR code
uses: actions/checkout@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
fetch-depth: 0
- name: Get PR details (pull_request & workflow_dispatch)
id: get_pr
if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
PR_NUMBER=${{ github.event.inputs.pr_number }}
else
PR_NUMBER=${{ github.event.pull_request.number }}
fi
echo "pr_number=$PR_NUMBER" >> "$GITHUB_OUTPUT"
# Get PR details
PR_DATA=$(gh pr view $PR_NUMBER --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)
echo "pr_data=$PR_DATA" >> "$GITHUB_OUTPUT"
# Get file changes
CHANGED_FILES=$(gh pr diff $PR_NUMBER --name-only)
echo "changed_files<<EOF" >> "$GITHUB_OUTPUT"
echo "$CHANGED_FILES" >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
- name: Get PR details (issue_comment)
id: get_pr_comment
if: github.event_name == 'issue_comment'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMMENT_BODY: ${{ github.event.comment.body }}
run: |
PR_NUMBER=${{ github.event.issue.number }}
echo "pr_number=$PR_NUMBER" >> "$GITHUB_OUTPUT"
# Extract additional instructions from comment
ADDITIONAL_INSTRUCTIONS=$(echo "$COMMENT_BODY" | sed 's/.*@qwen \/review//' | xargs)
echo "additional_instructions=$ADDITIONAL_INSTRUCTIONS" >> "$GITHUB_OUTPUT"
# Get PR details
PR_DATA=$(gh pr view $PR_NUMBER --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)
echo "pr_data=$PR_DATA" >> "$GITHUB_OUTPUT"
# Get file changes
CHANGED_FILES=$(gh pr diff $PR_NUMBER --name-only)
echo "changed_files<<EOF" >> "$GITHUB_OUTPUT"
echo "$CHANGED_FILES" >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
- name: Run Qwen PR Review
uses: QwenLM/qwen-code-action@5fd6818d04d64e87d255ee4d5f77995e32fbf4c2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ steps.get_pr.outputs.pr_number || steps.get_pr_comment.outputs.pr_number }}
PR_DATA: ${{ steps.get_pr.outputs.pr_data || steps.get_pr_comment.outputs.pr_data }}
CHANGED_FILES: ${{ steps.get_pr.outputs.changed_files || steps.get_pr_comment.outputs.changed_files }}
ADDITIONAL_INSTRUCTIONS: ${{ steps.get_pr.outputs.additional_instructions || steps.get_pr_comment.outputs.additional_instructions }}
REPOSITORY: ${{ github.repository }}
with:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
settings_json: |
{
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh pr view)",
"run_shell_command(gh pr diff)",
"run_shell_command(gh pr comment)",
"run_shell_command(cat)",
"run_shell_command(head)",
"run_shell_command(tail)",
"run_shell_command(grep)",
"write_file"
],
"sandbox": false
}
prompt: |
You are an expert code reviewer. You have access to shell commands to gather PR information and perform the review.
IMPORTANT: Use the available shell commands to gather information. Do not ask for information to be provided.
Start by running these commands to gather the required data:
1. Run: echo "$PR_DATA" to get PR details (JSON format)
2. Run: echo "$CHANGED_FILES" to get the list of changed files
3. Run: echo "$PR_NUMBER" to get the PR number
4. Run: echo "$ADDITIONAL_INSTRUCTIONS" to see any specific review instructions from the user
5. Run: gh pr diff $PR_NUMBER to see the full diff
6. For any specific files, use: cat filename, head -50 filename, or tail -50 filename
Additional Review Instructions:
If ADDITIONAL_INSTRUCTIONS contains text, prioritize those specific areas or focus points in your review.
Common instruction examples: "focus on security", "check performance", "review error handling", "check for breaking changes"
Once you have the information, provide a comprehensive code review by:
1. Writing your review to a file: write_file("review.md", "<your detailed review feedback here>")
2. Posting the review: gh pr comment $PR_NUMBER --body-file review.md --repo $REPOSITORY
Review Areas:
- **Security**: Authentication, authorization, input validation, data sanitization
- **Performance**: Algorithms, database queries, caching, resource usage
- **Reliability**: Error handling, logging, testing coverage, edge cases
- **Maintainability**: Code structure, documentation, naming conventions
- **Functionality**: Logic correctness, requirements fulfillment
Output Format:
Structure your review using this exact format with markdown:
## 📋 Review Summary
Provide a brief 2-3 sentence overview of the PR and overall assessment.
## 🔍 General Feedback
- List general observations about code quality
- Mention overall patterns or architectural decisions
- Highlight positive aspects of the implementation
- Note any recurring themes across files
## 🎯 Specific Feedback
Only include sections below that have actual issues. If there are no issues in a priority category, omit that entire section.
### 🔴 Critical
(Only include this section if there are critical issues)
Issues that must be addressed before merging (security vulnerabilities, breaking changes, major bugs):
- **File: `filename:line`** - Description of critical issue with specific recommendation
### 🟡 High
(Only include this section if there are high priority issues)
Important issues that should be addressed (performance problems, design flaws, significant bugs):
- **File: `filename:line`** - Description of high priority issue with suggested fix
### 🟢 Medium
(Only include this section if there are medium priority issues)
Improvements that would enhance code quality (style issues, minor optimizations, better practices):
- **File: `filename:line`** - Description of medium priority improvement
### 🔵 Low
(Only include this section if there are suggestions)
Nice-to-have improvements and suggestions (documentation, naming, minor refactoring):
- **File: `filename:line`** - Description of suggestion or enhancement
**Note**: If no specific issues are found in any category, simply state "No specific issues identified in this review."
## ✅ Highlights
(Only include this section if there are positive aspects to highlight)
- Mention specific good practices or implementations
- Acknowledge well-written code sections
- Note improvements from previous versions

View File

@@ -139,7 +139,7 @@ jobs:
scope: '@qwen-code'
- name: Publish @qwen-code/qwen-code-core
run: npm publish --workspace=@qwen-code/qwen-code-core --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
run: npm publish --workspace=@qwen-code/qwen-code-core --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
@@ -148,7 +148,7 @@ jobs:
run: npm install @qwen-code/qwen-code-core@${{ steps.version.outputs.RELEASE_VERSION }} --workspace=@qwen-code/qwen-code --save-exact
- name: Publish @qwen-code/qwen-code
run: npm publish --workspace=@qwen-code/qwen-code --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
run: npm publish --workspace=@qwen-code/qwen-code --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

9
.gitignore vendored
View File

@@ -38,8 +38,11 @@ packages/*/coverage/
# Generated files
packages/cli/src/generated/
.integration-tests/
packages/vscode-ide-companion/*.vsix
# Logs
logs/
# Qwen Code Configs
.qwen/
logs/
.qwen/

3
.npmrc
View File

@@ -1 +1,2 @@
@google:registry=https://wombat-dressing-room.appspot.com
@google:registry=https://wombat-dressing-room.appspot.com
@ali:registry=https://registry.anpm.alibaba-inc.com

1
.nvmrc
View File

@@ -1 +0,0 @@
20

12
.vscode/launch.json vendored
View File

@@ -30,18 +30,6 @@
"GEMINI_SANDBOX": "false"
}
},
{
"name": "Launch Companion VS Code Extension",
"type": "extensionHost",
"request": "launch",
"args": [
"--extensionDevelopmentPath=${workspaceFolder}/packages/vscode-ide-companion"
],
"outFiles": [
"${workspaceFolder}/packages/vscode-ide-companion/dist/**/*.js"
],
"preLaunchTask": "npm: build: vscode-ide-companion"
},
{
"name": "Attach",
"port": 9229,

9
.vscode/tasks.json vendored
View File

@@ -11,15 +11,6 @@
"problemMatcher": [],
"label": "npm: build",
"detail": "scripts/build.sh"
},
{
"type": "npm",
"script": "build",
"path": "packages/vscode-ide-companion",
"group": "build",
"problemMatcher": [],
"label": "npm: build: vscode-ide-companion",
"detail": "npm run build -w packages/vscode-ide-companion"
}
]
}

View File

@@ -210,7 +210,7 @@ npm run lint
- Please adhere to the coding style, patterns, and conventions used throughout the existing codebase.
- Consult [GEMINI.md](https://github.com/google-gemini/gemini-cli/blob/main/GEMINI.md) (typically found in the project root) for specific instructions related to AI-assisted development, including conventions for React, comments, and Git usage.
- **Imports:** Pay special attention to import paths. The project uses ESLint to enforce restrictions on relative imports between packages.
- **Imports:** Pay special attention to import paths. The project uses `eslint-rules/no-relative-cross-package-imports.js` to enforce restrictions on relative imports between packages.
### Project Structure
@@ -272,19 +272,19 @@ To debug the CLI's React-based UI, you can use React DevTools. Ink, the library
## Sandboxing
### macOS Seatbelt
### MacOS Seatbelt
On macOS, `qwen` uses Seatbelt (`sandbox-exec`) under a `permissive-open` profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that restricts writes to the project folder but otherwise allows all other operations and outbound network traffic ("open") by default. You can switch to a `restrictive-closed` profile (see `packages/cli/src/utils/sandbox-macos-restrictive-closed.sb`) that declines all operations and outbound network traffic ("closed") by default by setting `SEATBELT_PROFILE=restrictive-closed` in your environment or `.env` file. Available built-in profiles are `{permissive,restrictive}-{open,closed,proxied}` (see below for proxied networking). You can also switch to a custom profile `SEATBELT_PROFILE=<profile>` if you also create a file `.qwen/sandbox-macos-<profile>.sb` under your project settings directory `.qwen`.
On MacOS, `gemini` uses Seatbelt (`sandbox-exec`) under a `permissive-open` profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that restricts writes to the project folder but otherwise allows all other operations and outbound network traffic ("open") by default. You can switch to a `restrictive-closed` profile (see `packages/cli/src/utils/sandbox-macos-restrictive-closed.sb`) that declines all operations and outbound network traffic ("closed") by default by setting `SEATBELT_PROFILE=restrictive-closed` in your environment or `.env` file. Available built-in profiles are `{permissive,restrictive}-{open,closed,proxied}` (see below for proxied networking). You can also switch to a custom profile `SEATBELT_PROFILE=<profile>` if you also create a file `.qwen/sandbox-macos-<profile>.sb` under your project settings directory `.gemini`.
### Container-based Sandboxing (All Platforms)
For stronger container-based sandboxing on macOS or other platforms, you can set `GEMINI_SANDBOX=true|docker|podman|<command>` in your environment or `.env` file. The specified command (or if `true` then either `docker` or `podman`) must be installed on the host machine. Once enabled, `npm run build:all` will build a minimal container ("sandbox") image and `npm start` will launch inside a fresh instance of that container. The first build can take 20-30s (mostly due to downloading of the base image) but after that both build and start overhead should be minimal. Default builds (`npm run build`) will not rebuild the sandbox.
For stronger container-based sandboxing on MacOS or other platforms, you can set `GEMINI_SANDBOX=true|docker|podman|<command>` in your environment or `.env` file. The specified command (or if `true` then either `docker` or `podman`) must be installed on the host machine. Once enabled, `npm run build:all` will build a minimal container ("sandbox") image and `npm start` will launch inside a fresh instance of that container. The first build can take 20-30s (mostly due to downloading of the base image) but after that both build and start overhead should be minimal. Default builds (`npm run build`) will not rebuild the sandbox.
Container-based sandboxing mounts the project directory (and system temp directory) with read-write access and is started/stopped/removed automatically as you start/stop Gemini CLI. Files created within the sandbox should be automatically mapped to your user/group on host machine. You can easily specify additional mounts, ports, or environment variables by setting `SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox for your projects by creating the files `.qwen/sandbox.Dockerfile` and/or `.qwen/sandbox.bashrc` under your project settings directory (`.qwen`) and running `qwen` with `BUILD_SANDBOX=1` to trigger building of your custom sandbox.
Container-based sandboxing mounts the project directory (and system temp directory) with read-write access and is started/stopped/removed automatically as you start/stop Gemini CLI. Files created within the sandbox should be automatically mapped to your user/group on host machine. You can easily specify additional mounts, ports, or environment variables by setting `SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox for your projects by creating the files `.qwen/sandbox.Dockerfile` and/or `.qwen/sandbox.bashrc` under your project settings directory (`.gemini`) and running `gemini` with `BUILD_SANDBOX=1` to trigger building of your custom sandbox.
#### Proxied Networking
All sandboxing methods, including macOS Seatbelt using `*-proxied` profiles, support restricting outbound network traffic through a custom proxy server that can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=<command>`, where `<command>` must start a proxy server that listens on `:::8877` for relevant requests. See `docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS` connections to `example.com:443` (e.g. `curl https://example.com`) and declines all other requests. The proxy is started and stopped automatically alongside the sandbox.
All sandboxing methods, including MacOS Seatbelt using `*-proxied` profiles, support restricting outbound network traffic through a custom proxy server that can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=<command>`, where `<command>` must start a proxy server that listens on `:::8877` for relevant requests. See `docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS` connections to `example.com:443` (e.g. `curl https://example.com`) and declines all other requests. The proxy is started and stopped automatically alongside the sandbox.
## Manual Publish

View File

@@ -71,8 +71,7 @@ JavaScript classes, by their nature, are designed to encapsulate internal state
- Reduced Boilerplate and Increased Conciseness: Classes often promote the use of constructors, this binding, getters, setters, and other boilerplate that can unnecessarily bloat code. TypeScript interface and type declarations provide powerful static type checking without the runtime overhead or verbosity of class definitions. This allows for more succinct and readable code, aligning with JavaScript's strengths in functional programming.
- Enhanced Readability and Predictability: Plain objects, especially when their structure is clearly defined by TypeScript interfaces, are often easier to read and understand. Their properties are directly accessible, and there's no hidden internal state or complex inheritance chains to navigate. This predictability leads to fewer bugs and a more maintainable codebase.
- Simplified Immutability: While not strictly enforced, plain objects encourage an immutable approach to data. When you need to modify an object, you typically create a new one with the desired changes, rather than mutating the original. This pattern aligns perfectly with React's reconciliation process and helps prevent subtle bugs related to shared mutable state.
Simplified Immutability: While not strictly enforced, plain objects encourage an immutable approach to data. When you need to modify an object, you typically create a new one with the desired changes, rather than mutating the original. This pattern aligns perfectly with React's reconciliation process and helps prevent subtle bugs related to shared mutable state.
- Better Serialization and Deserialization: Plain JavaScript objects are naturally easy to serialize to JSON and deserialize back, which is a common requirement in web development (e.g., for API communication or local storage). Classes, with their methods and prototypes, can complicate this process.

View File

@@ -1,4 +1,4 @@
# Qwen CLI Roadmap
# Gemini CLI Roadmap
The [Official Gemini CLI Roadmap](https://github.com/orgs/google-gemini/projects/11/)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 259 KiB

View File

@@ -6,7 +6,7 @@ The Gemini CLI includes a Checkpointing feature that automatically saves a snaps
When you approve a tool that modifies the file system (like `write_file` or `replace`), the CLI automatically creates a "checkpoint." This checkpoint includes:
1. **A Git Snapshot:** A commit is made in a special, shadow Git repository located in your home directory (`~/.gemini/history/<project_hash>`). This snapshot captures the complete state of your project files at that moment. It does **not** interfere with your own project's Git repository.
1. **A Git Snapshot:** A commit is made in a special, shadow Git repository located in your home directory (`~/.qwen/history/<project_hash>`). This snapshot captures the complete state of your project files at that moment. It does **not** interfere with your own project's Git repository.
2. **Conversation History:** The entire conversation you've had with the agent up to that point is saved.
3. **The Tool Call:** The specific tool call that was about to be executed is also stored.
@@ -16,7 +16,7 @@ If you want to undo the change or simply go back, you can use the `/restore` com
- Restore the conversation history in the CLI.
- Re-propose the original tool call, allowing you to run it again, modify it, or simply ignore it.
All checkpoint data, including the Git snapshot and conversation history, is stored locally on your machine. The Git snapshot is stored in the shadow repository while the conversation history and tool calls are saved in a JSON file in your project's temporary directory, typically located at `~/.gemini/tmp/<project_hash>/checkpoints`.
All checkpoint data, including the Git snapshot and conversation history, is stored locally on your machine. The Git snapshot is stored in the shadow repository while the conversation history and tool calls are saved in a JSON file in your project's temporary directory, typically located at `~/.qwen/tmp/<project_hash>/checkpoints`.
## Enabling the Feature

View File

@@ -1,6 +1,6 @@
# Authentication Setup
The Gemini CLI requires you to authenticate with Google's AI services. On initial startup you'll need to configure **one** of the following authentication methods:
The Qwen Code CLI supports multiple authentication methods. On initial startup you'll need to configure **one** of the following authentication methods:
1. **Login with Google (Gemini Code Assist):**
- Use this option to log in with your google account.
@@ -8,18 +8,17 @@ The Gemini CLI requires you to authenticate with Google's AI services. On initia
- Note that the web login must be done in a browser that can communicate with the machine Gemini CLI is being run from. (Specifically, the browser will be redirected to a localhost url that Gemini CLI will be listening on).
- <a id="workspace-gca">Users may have to specify a GOOGLE_CLOUD_PROJECT if:</a>
1. You have a Google Workspace account. Google Workspace is a paid service for businesses and organizations that provides a suite of productivity tools, including a custom email domain (e.g. your-name@your-company.com), enhanced security features, and administrative controls. These accounts are often managed by an employer or school.
1. You have received a Gemini Code Assist license through the [Google Developer Program](https://developers.google.com/program/plans-and-pricing) (including qualified Google Developer Experts)
1. You have received a free Code Assist license through the [Google Developer Program](https://developers.google.com/program/plans-and-pricing) (including qualified Google Developer Experts)
1. You have been assigned a license to a current Gemini Code Assist standard or enterprise subscription.
1. You are using the product outside the [supported regions](https://developers.google.com/gemini-code-assist/resources/available-locations) for free individual usage.
1. You are a Google account holder under the age of 18
- If you fall into one of these categories, you must first configure a Google Cloud Project ID to use, [enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api) and [configure access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam).
- If you fall into one of these categories, you must first configure a Google Cloud Project Id to use, [enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api) and [configure access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam).
You can temporarily set the environment variable in your current shell session using the following command:
```bash
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
```
- For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file:
```bash
@@ -34,17 +33,12 @@ The Gemini CLI requires you to authenticate with Google's AI services. On initia
```bash
export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"
```
- For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files).
- Alternatively you can export the API key from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file:
- For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file:
```bash
echo 'export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"' >> ~/.bashrc
source ~/.bashrc
```
:warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it.
3. **Vertex AI:**
- Obtain your Google Cloud API key: [Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser)
- Set the `GOOGLE_API_KEY` environment variable. In the following methods, replace `YOUR_GOOGLE_API_KEY` with your Vertex AI API key:
@@ -69,36 +63,28 @@ The Gemini CLI requires you to authenticate with Google's AI services. On initia
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION" # e.g., us-central1
```
- For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files)
- Alternatively you can export the environment variables from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file:
- For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file:
```bash
echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc
echo 'export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"' >> ~/.bashrc
source ~/.bashrc
```
:warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it.
4. **Cloud Shell:**
- This option is only available when running in a Google Cloud Shell environment.
- It automatically uses the credentials of the logged-in user in the Cloud Shell environment.
- This is the default authentication method when running in Cloud Shell and no other method is configured.
:warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it.
### Persisting Environment Variables with `.env` Files
You can create a **`.gemini/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.gemini/.env` is recommended to keep Gemini variables isolated from other tools.
You can create a **`.qwen/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.qwen/.env` is recommended to keep Gemini variables isolated from other tools.
Gemini CLI automatically loads environment variables from the **first** `.env` file it finds, using the following search order:
1. Starting in the **current directory** and moving upward toward `/`, for each directory it checks:
1. `.gemini/.env`
1. `.qwen/.env`
2. `.env`
2. If no file is found, it falls back to your **home directory**:
- `~/.gemini/.env`
- `~/.qwen/.env`
- `~/.env`
> **Important:** The search stops at the **first** file encountered—variables are **not merged** across multiple files.
@@ -109,36 +95,20 @@ Gemini CLI automatically loads environment variables from the **first** `.env` f
```bash
mkdir -p .gemini
echo 'GOOGLE_CLOUD_PROJECT="your-project-id"' >> .gemini/.env
echo 'GOOGLE_CLOUD_PROJECT="your-project-id"' >> .qwen/.env
```
**User-wide settings** (available in every directory):
```bash
mkdir -p ~/.gemini
cat >> ~/.gemini/.env <<'EOF'
cat >> ~/.qwen/.env <<'EOF'
GOOGLE_CLOUD_PROJECT="your-project-id"
GEMINI_API_KEY="your-gemini-api-key"
EOF
```
## Non-Interactive Mode / Headless Environments
When running the Gemini CLI in a non-interactive environment, you cannot use the interactive login flow.
Instead, you must configure authentication using environment variables.
The CLI will automatically detect if it is running in a non-interactive terminal and will use one of the
following authentication methods if available:
1. **Gemini API Key:**
- Set the `GEMINI_API_KEY` environment variable.
- The CLI will use this key to authenticate with the Gemini API.
2. **Vertex AI:**
- Set the `GOOGLE_GENAI_USE_VERTEXAI=true` environment variable.
- **Using an API Key:** Set the `GOOGLE_API_KEY` environment variable.
- **Using Application Default Credentials (ADC):**
- Run `gcloud auth application-default login` in your environment to configure ADC.
- Ensure the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables are set.
If none of these environment variables are set in a non-interactive session, the CLI will exit with an error.
5. **OpenAI Authentication:**
- Use OpenAI models instead of Google's Gemini models
- For detailed setup instructions, see [OpenAI Authentication](./openai-auth.md)
- Supports interactive setup, command line arguments, and environment variables

View File

@@ -6,8 +6,6 @@ Qwen Code supports several built-in commands to help you manage your session, cu
Slash commands provide meta-level control over the CLI itself.
### Built-in Commands
- **`/bug`**
- **Description:** File an issue about Qwen Code. By default, the issue is filed within the GitHub repository for Qwen Code. The string you enter after `/bug` will become the headline for the bug being filed. The default `/bug` behavior can be modified using the `bugCommand` setting in your `.qwen/settings.json` files.
@@ -30,9 +28,6 @@ Slash commands provide meta-level control over the CLI itself.
- **`/compress`**
- **Description:** Replace the entire chat context with a summary. This saves on tokens used for future tasks while retaining a high level summary of what has happened.
- **`/copy`**
- **Description:** Copies the last output produced by Qwen Code to your clipboard, for easy sharing or reuse.
- **`/editor`**
- **Description:** Open a dialog for selecting supported editors.
@@ -95,199 +90,6 @@ Slash commands provide meta-level control over the CLI itself.
- **`/quit`** (or **`/exit`**)
- **Description:** Exit Qwen Code.
- **`/vim`**
- **Description:** Toggle vim mode on or off. When vim mode is enabled, the input area supports vim-style navigation and editing commands in both NORMAL and INSERT modes.
- **Features:**
- **NORMAL mode:** Navigate with `h`, `j`, `k`, `l`; jump by words with `w`, `b`, `e`; go to line start/end with `0`, `$`, `^`; go to specific lines with `G` (or `gg` for first line)
- **INSERT mode:** Standard text input with escape to return to NORMAL mode
- **Editing commands:** Delete with `x`, change with `c`, insert with `i`, `a`, `o`, `O`; complex operations like `dd`, `cc`, `dw`, `cw`
- **Count support:** Prefix commands with numbers (e.g., `3h`, `5w`, `10G`)
- **Repeat last command:** Use `.` to repeat the last editing operation
- **Persistent setting:** Vim mode preference is saved to `~/.gemini/settings.json` and restored between sessions
- **Status indicator:** When enabled, shows `[NORMAL]` or `[INSERT]` in the footer
### Custom Commands
For a quick start, see the [example](#example-a-pure-function-refactoring-command) below.
Custom commands allow you to save and reuse your favorite or most frequently used prompts as personal shortcuts within Gemini CLI. You can create commands that are specific to a single project or commands that are available globally across all your projects, streamlining your workflow and ensuring consistency.
#### File Locations & Precedence
Gemini CLI discovers commands from two locations, loaded in a specific order:
1. **User Commands (Global):** Located in `~/.gemini/commands/`. These commands are available in any project you are working on.
2. **Project Commands (Local):** Located in `<your-project-root>/.gemini/commands/`. These commands are specific to the current project and can be checked into version control to be shared with your team.
If a command in the project directory has the same name as a command in the user directory, the **project command will always be used.** This allows projects to override global commands with project-specific versions.
#### Naming and Namespacing
The name of a command is determined by its file path relative to its `commands` directory. Subdirectories are used to create namespaced commands, with the path separator (`/` or `\`) being converted to a colon (`:`).
- A file at `~/.gemini/commands/test.toml` becomes the command `/test`.
- A file at `<project>/.gemini/commands/git/commit.toml` becomes the namespaced command `/git:commit`.
#### TOML File Format (v1)
Your command definition files must be written in the TOML format and use the `.toml` file extension.
##### Required Fields
- `prompt` (String): The prompt that will be sent to the Gemini model when the command is executed. This can be a single-line or multi-line string.
##### Optional Fields
- `description` (String): A brief, one-line description of what the command does. This text will be displayed next to your command in the `/help` menu. **If you omit this field, a generic description will be generated from the filename.**
#### Handling Arguments
Custom commands support two powerful, low-friction methods for handling arguments. The CLI automatically chooses the correct method based on the content of your command's `prompt`.
##### 1. Shorthand Injection with `{{args}}`
If your `prompt` contains the special placeholder `{{args}}`, the CLI will replace that exact placeholder with all the text the user typed after the command name. This is perfect for simple, deterministic commands where you need to inject user input into a specific place in a larger prompt template.
**Example (`git/fix.toml`):**
```toml
# In: ~/.gemini/commands/git/fix.toml
# Invoked via: /git:fix "Button is misaligned on mobile"
description = "Generates a fix for a given GitHub issue."
prompt = "Please analyze the staged git changes and provide a code fix for the issue described here: {{args}}."
```
The model will receive the final prompt: `Please analyze the staged git changes and provide a code fix for the issue described here: "Button is misaligned on mobile".`
##### 2. Default Argument Handling
If your `prompt` does **not** contain the special placeholder `{{args}}`, the CLI uses a default behavior for handling arguments.
If you provide arguments to the command (e.g., `/mycommand arg1`), the CLI will append the full command you typed to the end of the prompt, separated by two newlines. This allows the model to see both the original instructions and the specific arguments you just provided.
If you do **not** provide any arguments (e.g., `/mycommand`), the prompt is sent to the model exactly as it is, with nothing appended.
**Example (`changelog.toml`):**
This example shows how to create a robust command by defining a role for the model, explaining where to find the user's input, and specifying the expected format and behavior.
```toml
# In: <project>/.gemini/commands/changelog.toml
# Invoked via: /changelog 1.2.0 added "Support for default argument parsing."
description = "Adds a new entry to the project's CHANGELOG.md file."
prompt = """
# Task: Update Changelog
You are an expert maintainer of this software project. A user has invoked a command to add a new entry to the changelog.
**The user's raw command is appended below your instructions.**
Your task is to parse the `<version>`, `<change_type>`, and `<message>` from their input and use the `write_file` tool to correctly update the `CHANGELOG.md` file.
## Expected Format
The command follows this format: `/changelog <version> <type> <message>`
- `<type>` must be one of: "added", "changed", "fixed", "removed".
## Behavior
1. Read the `CHANGELOG.md` file.
2. Find the section for the specified `<version>`.
3. Add the `<message>` under the correct `<type>` heading.
4. If the version or type section doesn't exist, create it.
5. Adhere strictly to the "Keep a Changelog" format.
"""
```
When you run `/changelog 1.2.0 added "New feature"`, the final text sent to the model will be the original prompt followed by two newlines and the command you typed.
##### 3. Executing Shell Commands with `!{...}`
You can make your commands dynamic by executing shell commands directly within your `prompt` and injecting their output. This is ideal for gathering context from your local environment, like reading file content or checking the status of Git.
When a custom command attempts to execute a shell command, Gemini CLI will now prompt you for confirmation before proceeding. This is a security measure to ensure that only intended commands can be run.
**How It Works:**
1. **Inject Commands:** Use the `!{...}` syntax in your `prompt` to specify where the command should be run and its output injected.
2. **Confirm Execution:** When you run the command, a dialog will appear listing the shell commands the prompt wants to execute.
3. **Grant Permission:** You can choose to:
- **Allow once:** The command(s) will run this one time.
- **Allow always for this session:** The command(s) will be added to a temporary allowlist for the current CLI session and will not require confirmation again.
- **No:** Cancel the execution of the shell command(s).
The CLI still respects the global `excludeTools` and `coreTools` settings. A command will be blocked without a confirmation prompt if it is explicitly disallowed in your configuration.
**Example (`git/commit.toml`):**
This command gets the staged git diff and uses it to ask the model to write a commit message.
````toml
# In: <project>/.gemini/commands/git/commit.toml
# Invoked via: /git:commit
description = "Generates a Git commit message based on staged changes."
# The prompt uses !{...} to execute the command and inject its output.
prompt = """
Please generate a Conventional Commit message based on the following git diff:
```diff
!{git diff --staged}
````
"""
````
When you run `/git:commit`, the CLI first executes `git diff --staged`, then replaces `!{git diff --staged}` with the output of that command before sending the final, complete prompt to the model.
---
#### Example: A "Pure Function" Refactoring Command
Let's create a global command that asks the model to refactor a piece of code.
**1. Create the file and directories:**
First, ensure the user commands directory exists, then create a `refactor` subdirectory for organization and the final TOML file.
```bash
mkdir -p ~/.gemini/commands/refactor
touch ~/.gemini/commands/refactor/pure.toml
````
**2. Add the content to the file:**
Open `~/.gemini/commands/refactor/pure.toml` in your editor and add the following content. We are including the optional `description` for best practice.
```toml
# In: ~/.gemini/commands/refactor/pure.toml
# This command will be invoked via: /refactor:pure
description = "Asks the model to refactor the current context into a pure function."
prompt = """
Please analyze the code I've provided in the current context.
Refactor it into a pure function.
Your response should include:
1. The refactored, pure function code block.
2. A brief explanation of the key changes you made and why they contribute to purity.
"""
```
**3. Run the Command:**
That's it! You can now run your command in the CLI. First, you might add a file to the context, and then invoke your command:
```
> @my-messy-function.js
> /refactor:pure
```
Gemini CLI will then execute the multi-line prompt defined in your TOML file.
## At commands (`@`)
At commands are used to include the content of files or directories as part of your prompt to Gemini. These commands include git-aware filtering.
@@ -317,13 +119,13 @@ At commands are used to include the content of files or directories as part of y
## Shell mode & passthrough commands (`!`)
The `!` prefix lets you interact with your system's shell directly from within Gemini CLI.
The `!` prefix lets you interact with your system's shell directly from within Qwen Code.
- **`!<shell_command>`**
- **Description:** Execute the given `<shell_command>` using `bash` on Linux/macOS or `cmd.exe` on Windows. Any output or errors from the command are displayed in the terminal.
- **Description:** Execute the given `<shell_command>` in your system's default shell. Any output or errors from the command are displayed in the terminal.
- **Examples:**
- `!ls -la` (executes `ls -la` and returns to Gemini CLI)
- `!git status` (executes `git status` and returns to Gemini CLI)
- `!ls -la` (executes `ls -la` and returns to Qwen Code)
- `!git status` (executes `git status` and returns to Qwen Code)
- **`!` (Toggle shell mode)**
- **Description:** Typing `!` on its own toggles shell mode.
@@ -331,8 +133,6 @@ The `!` prefix lets you interact with your system's shell directly from within G
- When active, shell mode uses a different coloring and a "Shell Mode Indicator".
- While in shell mode, text you type is interpreted directly as a shell command.
- **Exiting shell mode:**
- When exited, the UI reverts to its standard appearance and normal Gemini CLI behavior resumes.
- When exited, the UI reverts to its standard appearance and normal Qwen Code behavior resumes.
- **Caution for all `!` usage:** Commands you execute in shell mode have the same permissions and impact as if you ran them directly in your terminal.
- **Environment Variable:** When a command is executed via `!` or in shell mode, the `GEMINI_CLI=1` environment variable is set in the subprocess's environment. This allows scripts or tools to detect if they are being run from within the Gemini CLI.

View File

@@ -24,7 +24,7 @@ Gemini CLI uses `settings.json` files for persistent configuration. There are th
- **Location:** `.qwen/settings.json` within your project's root directory.
- **Scope:** Applies only when running Gemini CLI from that specific project. Project settings override user settings.
- **System settings file:**
- **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable.
- **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS).
- **Scope:** Applies to all Gemini CLI sessions on the system, for all users. System settings override user and project settings. May be useful for system administrators at enterprises to have controls over users' Gemini CLI setups.
**Note on environment variables in settings:** String values within your `settings.json` files can reference environment variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will be automatically resolved when the settings are loaded. For example, if you have an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like this: `"apiKey": "$MY_API_TOKEN"`.
@@ -81,18 +81,6 @@ In addition to a project settings file, a project's `.gemini` directory can cont
`excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands
that can be executed.
- **`allowMCPServers`** (array of strings):
- **Description:** Allows you to specify a list of MCP server names that should be made available to the model. This can be used to restrict the set of MCP servers to connect to. Note that this will be ignored if `--allowed-mcp-server-names` is set.
- **Default:** All MCP servers are available for use by the Gemini model.
- **Example:** `"allowMCPServers": ["myPythonServer"]`.
- **Security Note:** This uses simple string matching on MCP server names, which can be modified. If you're a system administrator looking to prevent users from bypassing this, consider configuring the `mcpServers` at the system settings level such that the user will not be able to configure any MCP servers of their own. This should not be used as an airtight security mechanism.
- **`excludeMCPServers`** (array of strings):
- **Description:** Allows you to specify a list of MCP server names that should be excluded from the model. A server listed in both `excludeMCPServers` and `allowMCPServers` is excluded. Note that this will be ignored if `--allowed-mcp-server-names` is set.
- **Default**: No MCP servers excluded.
- **Example:** `"excludeMCPServers": ["myNodeServer"]`.
- **Security Note:** This uses simple string matching on MCP server names, which can be modified. If you're a system administrator looking to prevent users from bypassing this, consider configuring the `mcpServers` at the system settings level such that the user will not be able to configure any MCP servers of their own. This should not be used as an airtight security mechanism.
- **`autoAccept`** (boolean):
- **Description:** Controls whether the CLI automatically accepts and executes tool calls that are considered safe (e.g., read-only operations) without explicit user confirmation. If set to `true`, the CLI will bypass the confirmation prompt for tools deemed safe.
- **Default:** `false`
@@ -103,11 +91,6 @@ In addition to a project settings file, a project's `.gemini` directory can cont
- **Default:** `"Default"`
- **Example:** `"theme": "GitHub"`
- **`vimMode`** (boolean):
- **Description:** Enables or disables vim mode for input editing. When enabled, the input area supports vim-style navigation and editing commands with NORMAL and INSERT modes. The vim mode status is displayed in the footer and persists between sessions.
- **Default:** `false`
- **Example:** `"vimMode": true`
- **`sandbox`** (boolean or string):
- **Description:** Controls whether and how to use sandboxing for tool execution. If set to `true`, Gemini CLI uses a pre-built `gemini-cli-sandbox` Docker image. For more information, see [Sandboxing](#sandboxing).
- **Default:** `false`
@@ -137,8 +120,6 @@ In addition to a project settings file, a project's `.gemini` directory can cont
- `cwd` (string, optional): The working directory in which to start the server.
- `timeout` (number, optional): Timeout in milliseconds for requests to this MCP server.
- `trust` (boolean, optional): Trust this server and bypass all tool call confirmations.
- `includeTools` (array of strings, optional): List of tool names to include from this MCP server. When specified, only the tools listed here will be available from this server (whitelist behavior). If not specified, all tools from the server are enabled by default.
- `excludeTools` (array of strings, optional): List of tool names to exclude from this MCP server. Tools listed here will not be available to the model, even if they are exposed by the server. **Note:** `excludeTools` takes precedence over `includeTools` - if a tool is in both lists, it will be excluded.
- **Example:**
```json
"mcpServers": {
@@ -146,14 +127,12 @@ In addition to a project settings file, a project's `.gemini` directory can cont
"command": "python",
"args": ["mcp_server.py", "--port", "8080"],
"cwd": "./mcp_tools/python",
"timeout": 5000,
"includeTools": ["safe_tool", "file_reader"],
"timeout": 5000
},
"myNodeServer": {
"command": "node",
"args": ["mcp_server.js"],
"cwd": "./mcp_tools/node",
"excludeTools": ["dangerous_tool", "file_deleter"]
"cwd": "./mcp_tools/node"
},
"myDockerServer": {
"command": "docker",
@@ -227,17 +206,45 @@ In addition to a project settings file, a project's `.gemini` directory can cont
"maxSessionTurns": 10
```
- **`summarizeToolOutput`** (object):
- **Description:** Enables or disables the summarization of tool output. You can specify the token budget for the summarization using the `tokenBudget` setting.
- Note: Currently only the `run_shell_command` tool is supported.
- **Default:** `{}` (Disabled by default)
- **`enableOpenAILogging`** (boolean):
- **Description:** Enables or disables logging of OpenAI API calls for debugging and analysis. When enabled, all requests and responses to the OpenAI API are logged to files in the `~/.qwen/logs/` directory.
- **Default:** `false`
- **Example:**
```json
"summarizeToolOutput": {
"run_shell_command": {
"tokenBudget": 2000
"enableOpenAILogging": true
```
- **`systemPromptMappings`** (array):
- **Description:** Configures custom system prompt templates for specific model names and base URLs. This allows you to use different system prompts for different AI models or API endpoints.
- **Default:** `undefined` (uses default system prompt)
- **Properties:**
- **`baseUrls`** (array of strings, optional): Array of base URLs to exactly match against `OPENAI_BASE_URL` environment variable. If not specified, matches any base URL.
- **`modelNames`** (array of strings, optional): Array of model names to exactly match against `OPENAI_MODEL` environment variable. If not specified, matches any model.
- **`template`** (string): The system prompt template to use when both baseUrl and modelNames match. Supports placeholders:
- `{RUNTIME_VARS_IS_GIT_REPO}`: Replaced with `true` or `false` based on whether the current directory is a git repository
- `{RUNTIME_VARS_SANDBOX}`: Replaced with the sandbox type (e.g., `"sandbox-exec"`, `"docker"`, or empty string)
- **Example:**
```json
"systemPromptMappings": [
{
"baseUrls": [
"https://dashscope.aliyuncs.com/compatible-mode/v1",
"https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
],
"modelNames": ["qwen3-coder-plus"],
"template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"is_git_repository\":{RUNTIME_VARS_IS_GIT_REPO},\"sandbox\":\"{RUNTIME_VARS_SANDBOX}\"}}"
},
{
"modelNames": ["gpt-4"],
"template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {RUNTIME_VARS_SANDBOX}"
},
{
"baseUrls": ["api.openai.com"],
"template": "You are an AI coding assistant. Working in git repository: {RUNTIME_VARS_IS_GIT_REPO}"
}
}
]
```
### Example `settings.json`:
@@ -267,11 +274,22 @@ In addition to a project settings file, a project's `.gemini` directory can cont
"hideTips": false,
"hideBanner": false,
"maxSessionTurns": 10,
"summarizeToolOutput": {
"run_shell_command": {
"tokenBudget": 100
"enableOpenAILogging": true,
"systemPromptMappings": [
{
"baseUrl": "dashscope",
"modelNames": ["qwen3"],
"template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"VARS_IS_GIT_REPO\":{VARS_IS_GIT_REPO},\"sandbox\":\"{sandbox}\"}}"
},
{
"modelNames": ["gpt-4"],
"template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {sandbox}"
},
{
"baseUrl": "api.openai.com",
"template": "You are an AI coding assistant. Working in git repository: {VARS_IS_GIT_REPO}"
}
}
]
}
```
@@ -349,11 +367,6 @@ Arguments passed directly when running the CLI can override other configurations
- Example: `npm start -- --model gemini-1.5-pro-latest`
- **`--prompt <your_prompt>`** (**`-p <your_prompt>`**):
- Used to pass a prompt directly to the command. This invokes Gemini CLI in a non-interactive mode.
- **`--prompt-interactive <your_prompt>`** (**`-i <your_prompt>`**):
- Starts an interactive session with the provided prompt as the initial input.
- The prompt is processed within the interactive session, not before it.
- Cannot be used when piping input from stdin.
- Example: `gemini -i "explain this code"`
- **`--sandbox`** (**`-s`**):
- Enables sandbox mode for this session.
- **`--sandbox-image`**:
@@ -377,16 +390,13 @@ Arguments passed directly when running the CLI can override other configurations
- **`--telemetry-log-prompts`**:
- Enables logging of prompts for telemetry. See [telemetry](../telemetry.md) for more information.
- **`--checkpointing`**:
- Enables [checkpointing](../checkpointing.md).
- Enables [checkpointing](./commands.md#checkpointing-commands).
- **`--extensions <extension_name ...>`** (**`-e <extension_name ...>`**):
- Specifies a list of extensions to use for the session. If not provided, all available extensions are used.
- Use the special term `gemini -e none` to disable all extensions.
- Example: `gemini -e my-extension -e my-other-extension`
- **`--list-extensions`** (**`-l`**):
- Lists all available extensions and exits.
- **`--proxy`**:
- Sets the proxy for the CLI.
- Example: `--proxy http://localhost:7890`.
- **`--version`**:
- Displays the version of the CLI.
- **`--openai-logging`**:
@@ -435,13 +445,13 @@ This example demonstrates how you can provide general project context, specific
- **Hierarchical Loading and Precedence:** The CLI implements a sophisticated hierarchical memory system by loading context files (e.g., `GEMINI.md`) from several locations. Content from files lower in this list (more specific) typically overrides or supplements content from files higher up (more general). The exact concatenation order and final context can be inspected using the `/memory show` command. The typical loading order is:
1. **Global Context File:**
- Location: `~/.gemini/<contextFileName>` (e.g., `~/.gemini/GEMINI.md` in your user home directory).
- Location: `~/.qwen/<contextFileName>` (e.g., `~/.qwen/GEMINI.md` in your user home directory).
- Scope: Provides default instructions for all your projects.
2. **Project Root & Ancestors Context Files:**
- Location: The CLI searches for the configured context file in the current working directory and then in each parent directory up to either the project root (identified by a `.git` folder) or your home directory.
- Scope: Provides context relevant to the entire project or a significant portion of it.
3. **Sub-directory Context Files (Contextual/Local):**
- Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.). The breadth of this search is limited to 200 directories by default, but can be configured with a `memoryDiscoveryMaxDirs` field in your `settings.json` file.
- Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.).
- Scope: Allows for highly specific instructions relevant to a particular component, module, or subsection of your project.
- **Concatenation & UI Indication:** The contents of all found context files are concatenated (with separators indicating their origin and path) and provided as part of the system prompt to the Gemini model. The CLI footer displays the count of loaded context files, giving you a quick visual cue about the active instructional context.
- **Commands for Memory Management:**
@@ -463,7 +473,7 @@ Sandboxing is disabled by default, but you can enable it in a few ways:
By default, it uses a pre-built `gemini-cli-sandbox` Docker image.
For project-specific sandboxing needs, you can create a custom Dockerfile at `.gemini/sandbox.Dockerfile` in your project's root directory. This Dockerfile can be based on the base sandbox image:
For project-specific sandboxing needs, you can create a custom Dockerfile at `.qwen/sandbox.Dockerfile` in your project's root directory. This Dockerfile can be based on the base sandbox image:
```dockerfile
FROM gemini-cli-sandbox
@@ -474,7 +484,7 @@ FROM gemini-cli-sandbox
# COPY ./my-config /app/my-config
```
When `.gemini/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX` environment variable when running Gemini CLI to automatically build the custom sandbox image:
When `.qwen/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX` environment variable when running Gemini CLI to automatically build the custom sandbox image:
```bash
BUILD_SANDBOX=1 gemini -s

View File

@@ -32,84 +32,6 @@ Gemini CLI comes with a selection of pre-defined themes, which you can list usin
Selected themes are saved in Gemini CLI's [configuration](./configuration.md) so your preference is remembered across sessions.
---
## Custom Color Themes
Gemini CLI allows you to create your own custom color themes by specifying them in your `settings.json` file. This gives you full control over the color palette used in the CLI.
### How to Define a Custom Theme
Add a `customThemes` block to your user, project, or system `settings.json` file. Each custom theme is defined as an object with a unique name and a set of color keys. For example:
```json
{
"customThemes": {
"MyCustomTheme": {
"name": "MyCustomTheme",
"type": "custom",
"Background": "#181818",
"Foreground": "#F8F8F2",
"LightBlue": "#82AAFF",
"AccentBlue": "#61AFEF",
"AccentPurple": "#C678DD",
"AccentCyan": "#56B6C2",
"AccentGreen": "#98C379",
"AccentYellow": "#E5C07B",
"AccentRed": "#E06C75",
"Comment": "#5C6370",
"Gray": "#ABB2BF"
}
}
}
```
**Color keys:**
- `Background`
- `Foreground`
- `LightBlue`
- `AccentBlue`
- `AccentPurple`
- `AccentCyan`
- `AccentGreen`
- `AccentYellow`
- `AccentRed`
- `Comment`
- `Gray`
**Required Properties:**
- `name` (must match the key in the `customThemes` object and be a string)
- `type` (must be the string `"custom"`)
- `Background`
- `Foreground`
- `LightBlue`
- `AccentBlue`
- `AccentPurple`
- `AccentCyan`
- `AccentGreen`
- `AccentYellow`
- `AccentRed`
- `Comment`
- `Gray`
You can use either hex codes (e.g., `#FF0000`) **or** standard CSS color names (e.g., `coral`, `teal`, `blue`) for any color value. See [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#color_keywords) for a full list of supported names.
You can define multiple custom themes by adding more entries to the `customThemes` object.
### Example Custom Theme
<img src="../assets/theme-custom.png" alt="Custom theme example" width="600" />
### Using Your Custom Theme
- Select your custom theme using the `/theme` command in Gemini CLI. Your custom theme will appear in the theme selection dialog.
- Or, set it as the default by adding `"theme": "MyCustomTheme"` to your `settings.json`.
- Custom themes can be set at the user, project, or system level, and follow the same [configuration precedence](./configuration.md) as other settings.
---
## Dark Themes
### ANSI

View File

@@ -8,7 +8,7 @@ The Gemini CLI core (`packages/core`) features a robust system for defining, reg
- `name`: A unique internal name (used in API calls to Gemini).
- `displayName`: A user-friendly name.
- `description`: A clear explanation of what the tool does, which is provided to the Gemini model.
- `parameterSchema`: A JSON schema defining the parameters that the tool accepts. This is crucial for the Gemini model to understand how to call the tool correctly.
- `parameterSchema`: A JSON schema defining the parameters the tool accepts. This is crucial for the Gemini model to understand how to call the tool correctly.
- `validateToolParams()`: A method to validate incoming parameters.
- `getDescription()`: A method to provide a human-readable description of what the tool will do with specific parameters before execution.
- `shouldConfirmExecute()`: A method to determine if user confirmation is required before execution (e.g., for potentially destructive operations).

View File

@@ -6,14 +6,14 @@ Gemini CLI supports extensions that can be used to configure and extend its func
On startup, Gemini CLI looks for extensions in two locations:
1. `<workspace>/.gemini/extensions`
2. `<home>/.gemini/extensions`
1. `<workspace>/.qwen/extensions`
2. `<home>/.qwen/extensions`
Gemini CLI loads all extensions from both locations. If an extension with the same name exists in both locations, the extension in the workspace directory takes precedence.
Within each location, individual extensions exist as a directory that contains a `gemini-extension.json` file. For example:
`<workspace>/.gemini/extensions/my-extension/gemini-extension.json`
`<workspace>/.qwen/extensions/my-extension/gemini-extension.json`
### `gemini-extension.json`

View File

@@ -132,7 +132,7 @@ This structure makes it easy to locate the artifacts for a specific test run, fi
## Continuous integration
To ensure the integration tests are always run, a GitHub Actions workflow is defined in `.github/workflows/e2e.yml`. This workflow automatically runs the integrations tests for pull requests against the `main` branch, or when a pull request is added to a merge queue.
To ensure the integration tests are always run, a GitHub Actions workflow is defined in `.github/workflows/e2e.yml`. This workflow automatically runs the integration tests on every pull request and push to the `main` branch.
The workflow runs the tests in different sandboxing environments to ensure Gemini CLI is tested across each:

View File

@@ -8,7 +8,7 @@ Gemini CLI's telemetry system is built on the **[OpenTelemetry] (OTEL)** standar
## Enabling telemetry
You can enable telemetry in multiple ways. Configuration is primarily managed via the [`.gemini/settings.json` file](./cli/configuration.md) and environment variables, but CLI flags can override these settings for a specific session.
You can enable telemetry in multiple ways. Configuration is primarily managed via the [`.qwen/settings.json` file](./cli/configuration.md) and environment variables, but CLI flags can override these settings for a specific session.
### Order of precedence
@@ -19,14 +19,13 @@ The following lists the precedence for applying telemetry settings, with items l
- `--telemetry-target <local|gcp>`: Overrides `telemetry.target`.
- `--telemetry-otlp-endpoint <URL>`: Overrides `telemetry.otlpEndpoint`.
- `--telemetry-log-prompts` / `--no-telemetry-log-prompts`: Overrides `telemetry.logPrompts`.
- `--telemetry-outfile <path>`: Redirects telemetry output to a file. See [Exporting to a file](#exporting-to-a-file).
1. **Environment variables:**
- `OTEL_EXPORTER_OTLP_ENDPOINT`: Overrides `telemetry.otlpEndpoint`.
1. **Workspace settings file (`.gemini/settings.json`):** Values from the `telemetry` object in this project-specific file.
1. **Workspace settings file (`.qwen/settings.json`):** Values from the `telemetry` object in this project-specific file.
1. **User settings file (`~/.gemini/settings.json`):** Values from the `telemetry` object in this global user file.
1. **User settings file (`~/.qwen/settings.json`):** Values from the `telemetry` object in this global user file.
1. **Defaults:** applied if not set by any of the above.
- `telemetry.enabled`: `false`
@@ -39,7 +38,7 @@ The `--target` argument to this script _only_ overrides the `telemetry.target` f
### Example settings
The following code can be added to your workspace (`.gemini/settings.json`) or user (`~/.gemini/settings.json`) settings to enable telemetry and send the output to Google Cloud:
The following code can be added to your workspace (`.qwen/settings.json`) or user (`~/.qwen/settings.json`) settings to enable telemetry and send the output to Google Cloud:
```json
{
@@ -51,16 +50,6 @@ The following code can be added to your workspace (`.gemini/settings.json`) or u
}
```
### Exporting to a file
You can export all telemetry data to a file for local inspection.
To enable file export, use the `--telemetry-outfile` flag with a path to your desired output file. This must be run using `--telemetry-target=local`.
```bash
gemini --telemetry --telemetry-target=local --telemetry-outfile=/path/to/telemetry.log "your prompt"
```
## Running an OTEL Collector
An OTEL Collector is a service that receives, processes, and exports telemetry data.
@@ -72,7 +61,7 @@ Learn more about OTEL exporter standard configuration in [documentation][otel-co
### Local
Use the `npm run telemetry -- --target=local` command to automate the process of setting up a local telemetry pipeline, including configuring the necessary settings in your `.gemini/settings.json` file. The underlying script installs `otelcol-contrib` (the OpenTelemetry Collector) and `jaeger` (The Jaeger UI for viewing traces). To use it:
Use the `npm run telemetry -- --target=local` command to automate the process of setting up a local telemetry pipeline, including configuring the necessary settings in your `.qwen/settings.json` file. The underlying script installs `otelcol-contrib` (the OpenTelemetry Collector) and `jaeger` (The Jaeger UI for viewing traces). To use it:
1. **Run the command**:
Execute the command from the root of the repository:
@@ -92,14 +81,14 @@ Use the `npm run telemetry -- --target=local` command to automate the process of
Open your web browser and navigate to **http://localhost:16686** to access the Jaeger UI. Here you can inspect detailed traces of Gemini CLI operations.
1. **Inspect logs and metrics**:
The script redirects the OTEL collector output (which includes logs and metrics) to `~/.gemini/tmp/<projectHash>/otel/collector.log`. The script will provide links to view and a command to tail your telemetry data (traces, metrics, logs) locally.
The script redirects the OTEL collector output (which includes logs and metrics) to `~/.qwen/tmp/<projectHash>/otel/collector.log`. The script will provide links to view and a command to tail your telemetry data (traces, metrics, logs) locally.
1. **Stop the services**:
Press `Ctrl+C` in the terminal where the script is running to stop the OTEL Collector and Jaeger services.
### Google Cloud
Use the `npm run telemetry -- --target=gcp` command to automate setting up a local OpenTelemetry collector that forwards data to your Google Cloud project, including configuring the necessary settings in your `.gemini/settings.json` file. The underlying script installs `otelcol-contrib`. To use it:
Use the `npm run telemetry -- --target=gcp` command to automate setting up a local OpenTelemetry collector that forwards data to your Google Cloud project, including configuring the necessary settings in your `.qwen/settings.json` file. The underlying script installs `otelcol-contrib`. To use it:
1. **Prerequisites**:
- Have a Google Cloud project ID.
@@ -120,7 +109,7 @@ Use the `npm run telemetry -- --target=gcp` command to automate setting up a loc
The script will:
- Download the `otelcol-contrib` binary if needed.
- Start an OTEL collector configured to receive data from Gemini CLI and export it to your specified Google Cloud project.
- Automatically enable telemetry and disable sandbox mode in your workspace settings (`.gemini/settings.json`).
- Automatically enable telemetry and disable sandbox mode in your workspace settings (`.qwen/settings.json`).
- Provide direct links to view traces, metrics, and logs in your Google Cloud Console.
- On exit (Ctrl+C), it will attempt to restore your original telemetry and sandbox settings.
@@ -131,7 +120,7 @@ Use the `npm run telemetry -- --target=gcp` command to automate setting up a loc
Use the links provided by the script to navigate to the Google Cloud Console and view your traces, metrics, and logs.
1. **Inspect local collector logs**:
The script redirects the local OTEL collector output to `~/.gemini/tmp/<projectHash>/otel/collector-gcp.log`. The script provides links to view and command to tail your collector logs locally.
The script redirects the local OTEL collector output to `~/.qwen/tmp/<projectHash>/otel/collector-gcp.log`. The script provides links to view and command to tail your collector logs locally.
1. **Stop the service**:
Press `Ctrl+C` in the terminal where the script is running to stop the OTEL Collector.

View File

@@ -90,7 +90,7 @@ The Gemini CLI provides a comprehensive suite of tools for interacting with the
- `path` (string, optional): The absolute path to the directory to search within. Defaults to the current working directory.
- `include` (string, optional): A glob pattern to filter which files are searched (e.g., `"*.js"`, `"src/**/*.{ts,tsx}"`). If omitted, searches most files (respecting common ignores).
- **Behavior:**
- Uses `git grep` if available in a Git repository for speed; otherwise, falls back to system `grep` or a JavaScript-based search.
- Uses `git grep` if available in a Git repository for speed, otherwise falls back to system `grep` or a JavaScript-based search.
- Returns a list of matching lines, each prefixed with its file path (relative to the search directory) and line number.
- **Output (`llmContent`):** A formatted string of matches, e.g.:
```

View File

@@ -51,7 +51,7 @@ The Gemini CLI uses the `mcpServers` configuration in your `settings.json` file
### Configure the MCP server in settings.json
You can configure MCP servers at the global level in the `~/.gemini/settings.json` file or in your project's root directory, create or open the `.gemini/settings.json` file. Within the file, add the `mcpServers` configuration block.
You can configure MCP servers at the global level in the `~/.qwen/settings.json` file or in your project's root directory, create or open the `.qwen/settings.json` file. Within the file, add the `mcpServers` configuration block.
### Configuration Structure
@@ -92,114 +92,6 @@ Each server configuration supports the following properties:
- **`cwd`** (string): Working directory for Stdio transport
- **`timeout`** (number): Request timeout in milliseconds (default: 600,000ms = 10 minutes)
- **`trust`** (boolean): When `true`, bypasses all tool call confirmations for this server (default: `false`)
- **`includeTools`** (string[]): List of tool names to include from this MCP server. When specified, only the tools listed here will be available from this server (whitelist behavior). If not specified, all tools from the server are enabled by default.
- **`excludeTools`** (string[]): List of tool names to exclude from this MCP server. Tools listed here will not be available to the model, even if they are exposed by the server. **Note:** `excludeTools` takes precedence over `includeTools` - if a tool is in both lists, it will be excluded.
### OAuth Support for Remote MCP Servers
The Gemini CLI supports OAuth 2.0 authentication for remote MCP servers using SSE or HTTP transports. This enables secure access to MCP servers that require authentication.
#### Automatic OAuth Discovery
For servers that support OAuth discovery, you can omit the OAuth configuration and let the CLI discover it automatically:
```json
{
"mcpServers": {
"discoveredServer": {
"url": "https://api.example.com/sse"
}
}
}
```
The CLI will automatically:
- Detect when a server requires OAuth authentication (401 responses)
- Discover OAuth endpoints from server metadata
- Perform dynamic client registration if supported
- Handle the OAuth flow and token management
#### Authentication Flow
When connecting to an OAuth-enabled server:
1. **Initial connection attempt** fails with 401 Unauthorized
2. **OAuth discovery** finds authorization and token endpoints
3. **Browser opens** for user authentication (requires local browser access)
4. **Authorization code** is exchanged for access tokens
5. **Tokens are stored** securely for future use
6. **Connection retry** succeeds with valid tokens
#### Browser Redirect Requirements
**Important:** OAuth authentication requires that your local machine can:
- Open a web browser for authentication
- Receive redirects on `http://localhost:7777/oauth/callback`
This feature will not work in:
- Headless environments without browser access
- Remote SSH sessions without X11 forwarding
- Containerized environments without browser support
#### Managing OAuth Authentication
Use the `/mcp auth` command to manage OAuth authentication:
```bash
# List servers requiring authentication
/mcp auth
# Authenticate with a specific server
/mcp auth serverName
# Re-authenticate if tokens expire
/mcp auth serverName
```
#### OAuth Configuration Properties
- **`enabled`** (boolean): Enable OAuth for this server
- **`clientId`** (string): OAuth client identifier (optional with dynamic registration)
- **`clientSecret`** (string): OAuth client secret (optional for public clients)
- **`authorizationUrl`** (string): OAuth authorization endpoint (auto-discovered if omitted)
- **`tokenUrl`** (string): OAuth token endpoint (auto-discovered if omitted)
- **`scopes`** (string[]): Required OAuth scopes
- **`redirectUri`** (string): Custom redirect URI (defaults to `http://localhost:7777/oauth/callback`)
- **`tokenParamName`** (string): Query parameter name for tokens in SSE URLs
#### Token Management
OAuth tokens are automatically:
- **Stored securely** in `~/.gemini/mcp-oauth-tokens.json`
- **Refreshed** when expired (if refresh tokens are available)
- **Validated** before each connection attempt
- **Cleaned up** when invalid or expired
#### Authentication Provider Type
You can specify the authentication provider type using the `authProviderType` property:
- **`authProviderType`** (string): Specifies the authentication provider. Can be one of the following:
- **`dynamic_discovery`** (default): The CLI will automatically discover the OAuth configuration from the server.
- **`google_credentials`**: The CLI will use the Google Application Default Credentials (ADC) to authenticate with the server. When using this provider, you must specify the required scopes.
```json
{
"mcpServers": {
"googleCloudServer": {
"httpUrl": "https://my-gcp-service.run.app/mcp",
"authProviderType": "google_credentials",
"oauth": {
"scopes": ["https://www.googleapis.com/auth/userinfo.email"]
}
}
}
}
```
### Example Configurations
@@ -293,22 +185,6 @@ You can specify the authentication provider type using the `authProviderType` pr
}
```
#### MCP Server with Tool Filtering
```json
{
"mcpServers": {
"filteredServer": {
"command": "python",
"args": ["-m", "my_mcp_server"],
"includeTools": ["safe_tool", "file_reader", "data_processor"],
// "excludeTools": ["dangerous_tool", "file_deleter"],
"timeout": 30000
}
}
}
```
## Discovery Process Deep Dive
When the Gemini CLI starts, it performs MCP server discovery through the following detailed process:
@@ -331,8 +207,7 @@ Upon successful connection:
1. **Tool listing:** The client calls the MCP server's tool listing endpoint
2. **Schema validation:** Each tool's function declaration is validated
3. **Tool filtering:** Tools are filtered based on `includeTools` and `excludeTools` configuration
4. **Name sanitization:** Tool names are cleaned to meet Gemini API requirements:
3. **Name sanitization:** Tool names are cleaned to meet Gemini API requirements:
- Invalid characters (non-alphanumeric, underscore, dot, hyphen) are replaced with underscores
- Names longer than 63 characters are truncated with middle replacement (`___`)

View File

@@ -14,7 +14,7 @@ Use `save_memory` to save and recall information across your Gemini CLI sessions
## How to use `save_memory` with the Gemini CLI
The tool appends the provided `fact` to a special `GEMINI.md` file located in the user's home directory (`~/.gemini/GEMINI.md`). This file can be configured to have a different name.
The tool appends the provided `fact` to a special `GEMINI.md` file located in the user's home directory (`~/.qwen/GEMINI.md`). This file can be configured to have a different name.
Once added, the facts are stored under a `## Gemini Added Memories` section. This file is loaded as context in subsequent sessions, allowing the CLI to recall the saved information.

View File

@@ -60,10 +60,6 @@ run_shell_command(command="npm run dev &", description="Start development server
- **Error handling:** Check the `Stderr`, `Error`, and `Exit Code` fields to determine if a command executed successfully.
- **Background processes:** When a command is run in the background with `&`, the tool will return immediately and the process will continue to run in the background. The `Background PIDs` field will contain the process ID of the background process.
## Environment Variables
When `run_shell_command` executes a command, it sets the `GEMINI_CLI=1` environment variable in the subprocess's environment. This allows scripts or tools to detect if they are being run from within the Gemini CLI.
## Command Restrictions
You can restrict the commands that can be executed by the `run_shell_command` tool by using the `coreTools` and `excludeTools` settings in your configuration file.

View File

@@ -19,7 +19,7 @@ This guide provides solutions to common issues and debugging tips.
- A: If installed globally via npm, update Gemini CLI using the command `npm install -g @google/gemini-cli@latest`. If run from source, pull the latest changes from the repository and rebuild using `npm run build`.
- **Q: Where are Gemini CLI configuration files stored?**
- A: The CLI configuration is stored within two `settings.json` files: one in your home directory and one in your project's root directory. In both locations, `settings.json` is found in the `.gemini/` folder. Refer to [CLI Configuration](./cli/configuration.md) for more details.
- A: The CLI configuration is stored within two `settings.json` files: one in your home directory and one in your project's root directory. In both locations, `settings.json` is found in the `.qwen/` folder. Refer to [CLI Configuration](./cli/configuration.md) for more details.
- **Q: Why don't I see cached token counts in my stats output?**
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Vertex AI) but not for OAuth users (Google Personal/Enterprise accounts) at this time, as the Code Assist API does not support cached content creation. You can still view your total token usage with the `/stats` command.
@@ -27,7 +27,7 @@ This guide provides solutions to common issues and debugging tips.
## Common error messages and solutions
- **Error: `EADDRINUSE` (Address already in use) when starting an MCP server.**
- **Cause:** Another process is already using the port that the MCP server is trying to bind to.
- **Cause:** Another process is already using the port the MCP server is trying to bind to.
- **Solution:**
Either stop the other process that is using the port or configure the MCP server to use a different port.

View File

@@ -21,18 +21,11 @@ esbuild
outfile: 'bundle/gemini.js',
platform: 'node',
format: 'esm',
external: [],
alias: {
'is-in-ci': path.resolve(
__dirname,
'packages/cli/src/patches/is-in-ci.ts',
),
},
define: {
'process.env.CLI_VERSION': JSON.stringify(pkg.version),
},
banner: {
js: `import { createRequire } from 'module'; const require = createRequire(import.meta.url); globalThis.__filename = require('url').fileURLToPath(import.meta.url); globalThis.__dirname = require('path').dirname(globalThis.__filename);`,
js: `import { createRequire as _gcliCreateRequire } from 'module'; const require = _gcliCreateRequire(import.meta.url); globalThis.__filename = require('url').fileURLToPath(import.meta.url); globalThis.__dirname = require('path').dirname(globalThis.__filename);`,
},
})
.catch(() => process.exit(1));

View File

@@ -12,6 +12,7 @@ import prettierConfig from 'eslint-config-prettier';
import importPlugin from 'eslint-plugin-import';
import globals from 'globals';
import licenseHeader from 'eslint-plugin-license-header';
import noRelativeCrossPackageImports from './eslint-rules/no-relative-cross-package-imports.js';
import path from 'node:path'; // Use node: prefix for built-ins
import url from 'node:url';
@@ -33,6 +34,7 @@ export default tseslint.config(
'packages/core/dist/**',
'packages/server/dist/**',
'packages/vscode-ide-companion/dist/**',
'eslint-rules/*',
'bundle/**',
],
},
@@ -70,14 +72,6 @@ export default tseslint.config(
{
// General overrides and rules for the project (TS/TSX files)
files: ['packages/*/src/**/*.{ts,tsx}'], // Target only TS/TSX in the cli package
plugins: {
import: importPlugin,
},
settings: {
'import/resolver': {
node: true,
},
},
languageOptions: {
globals: {
...globals.node,
@@ -112,13 +106,6 @@ export default tseslint.config(
caughtErrorsIgnorePattern: '^_',
},
],
'import/no-internal-modules': [
'error',
{
allow: ['react-dom/test-utils', 'memfs/lib/volume.js', 'yargs/**'],
},
],
'import/no-relative-packages': 'error',
'no-cond-assign': 'error',
'no-debugger': 'error',
'no-duplicate-case': 'error',
@@ -226,4 +213,24 @@ export default tseslint.config(
],
},
},
// Custom eslint rules for this repo
{
files: ['packages/**/*.{js,jsx,ts,tsx}'],
plugins: {
custom: {
rules: {
'no-relative-cross-package-imports': noRelativeCrossPackageImports,
},
},
},
rules: {
// Enable and configure your custom rule
'custom/no-relative-cross-package-imports': [
'error',
{
root: path.join(projectRoot, 'packages'),
},
],
},
},
);

View File

@@ -61,7 +61,6 @@ async function main() {
console.log(`\tFound test file: ${testFileName}`);
}
const MAX_RETRIES = 3;
let allTestsPassed = true;
for (const testFile of testFiles) {
@@ -73,97 +72,63 @@ async function main() {
`------------- Running test file: ${testFileName} ------------------------------`,
);
let attempt = 0;
let testFilePassed = false;
let lastStdout = [];
let lastStderr = [];
const nodeArgs = ['--test'];
if (verbose) {
nodeArgs.push('--test-reporter=spec');
}
nodeArgs.push(testFile);
while (attempt < MAX_RETRIES && !testFilePassed) {
attempt++;
if (attempt > 1) {
console.log(
`--- Retrying ${testFileName} (attempt ${attempt} of ${MAX_RETRIES}) ---`,
);
}
const child = spawn('node', nodeArgs, {
stdio: 'pipe',
env: {
...process.env,
GEMINI_CLI_INTEGRATION_TEST: 'true',
INTEGRATION_TEST_FILE_DIR: testFileDir,
KEEP_OUTPUT: keepOutput.toString(),
VERBOSE: verbose.toString(),
TEST_FILE_NAME: testFileName,
},
});
const nodeArgs = ['--test'];
if (verbose) {
nodeArgs.push('--test-reporter=spec');
}
nodeArgs.push(testFile);
const child = spawn('node', nodeArgs, {
stdio: 'pipe',
env: {
...process.env,
GEMINI_CLI_INTEGRATION_TEST: 'true',
INTEGRATION_TEST_FILE_DIR: testFileDir,
KEEP_OUTPUT: keepOutput.toString(),
VERBOSE: verbose.toString(),
TEST_FILE_NAME: testFileName,
},
});
let outputStream;
if (keepOutput) {
const outputFile = join(testFileDir, `output-attempt-${attempt}.log`);
outputStream = createWriteStream(outputFile);
console.log(`Output for ${testFileName} written to: ${outputFile}`);
}
const stdout = [];
const stderr = [];
child.stdout.on('data', (data) => {
if (verbose) {
process.stdout.write(data);
} else {
stdout.push(data);
}
if (outputStream) {
outputStream.write(data);
}
});
child.stderr.on('data', (data) => {
if (verbose) {
process.stderr.write(data);
} else {
stderr.push(data);
}
if (outputStream) {
outputStream.write(data);
}
});
const exitCode = await new Promise((resolve) => {
child.on('close', (code) => {
if (outputStream) {
outputStream.end(() => {
resolve(code);
});
} else {
resolve(code);
}
});
});
if (exitCode === 0) {
testFilePassed = true;
} else {
lastStdout = stdout;
lastStderr = stderr;
}
let outputStream;
if (keepOutput) {
const outputFile = join(testFileDir, 'output.log');
outputStream = createWriteStream(outputFile);
console.log(`Output for ${testFileName} written to: ${outputFile}`);
}
if (!testFilePassed) {
console.error(
`Test file failed after ${MAX_RETRIES} attempts: ${testFileName}`,
);
if (!verbose) {
process.stdout.write(Buffer.concat(lastStdout).toString('utf8'));
process.stderr.write(Buffer.concat(lastStderr).toString('utf8'));
child.stdout.on('data', (data) => {
if (verbose) {
process.stdout.write(data);
}
if (outputStream) {
outputStream.write(data);
}
});
child.stderr.on('data', (data) => {
if (verbose) {
process.stderr.write(data);
}
if (outputStream) {
outputStream.write(data);
}
});
const exitCode = await new Promise((resolve) => {
child.on('close', (code) => {
if (outputStream) {
outputStream.end(() => {
resolve(code);
});
} else {
resolve(code);
}
});
});
if (exitCode !== 0) {
console.error(`Test file failed: ${testFileName}`);
allTestsPassed = false;
}
}

View File

@@ -54,7 +54,7 @@ describe('simple-mcp-server', () => {
console.error(`stderr: ${data}`);
});
// Wait for the server to be ready
return new Promise((resolve) => setTimeout(resolve, 2000));
return new Promise((resolve) => setTimeout(resolve, 500));
});
after(() => {

3027
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,8 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.5-nightly.1",
"version": "0.0.1-alpha.12",
"engines": {
"node": ">=20.0.0"
"node": ">=20"
},
"type": "module",
"workspaces": [
@@ -10,10 +10,10 @@
],
"repository": {
"type": "git",
"url": "git+https://github.com/QwenLM/qwen-code.git"
"url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.5-nightly.1"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.1-alpha.12"
},
"scripts": {
"start": "node scripts/start.js",
@@ -23,8 +23,7 @@
"auth": "npm run auth:npm && npm run auth:docker",
"generate": "node scripts/generate-git-commit-info.js",
"build": "node scripts/build.js",
"build:vscode": "node scripts/build_vscode_companion.js",
"build:all": "npm run build && npm run build:sandbox && npm run build:vscode",
"build:all": "npm run build && npm run build:sandbox",
"build:packages": "npm run build --workspaces",
"build:sandbox": "node scripts/build_sandbox.js --skip-npm-install-build",
"bundle": "npm run generate && node esbuild.config.js && node scripts/copy_bundle_assets.js",
@@ -60,9 +59,8 @@
"@types/micromatch": "^4.0.9",
"@types/mime-types": "^3.0.1",
"@types/minimatch": "^5.1.2",
"@types/mock-fs": "^4.13.4",
"@types/semver": "^7.7.0",
"@types/shell-quote": "^1.7.5",
"@types/uuid": "^10.0.0",
"@vitest/coverage-v8": "^3.1.1",
"concurrently": "^9.2.0",
"cross-env": "^7.0.3",
@@ -78,11 +76,13 @@
"json": "^11.0.0",
"lodash": "^4.17.21",
"memfs": "^4.17.2",
"mock-fs": "^5.5.0",
"prettier": "^3.5.3",
"react-devtools-core": "^4.28.5",
"typescript-eslint": "^8.30.1",
"vitest": "^3.2.4",
"yargs": "^17.7.2"
"yargs": "^18.0.0"
},
"dependencies": {
"@qwen-code/qwen-code": "^0.0.1-alpha.8"
}
}

View File

@@ -1,10 +1,10 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.5-nightly.1",
"description": "Qwen Code",
"version": "0.0.1-alpha.12",
"description": "Gemini CLI",
"repository": {
"type": "git",
"url": "git+https://github.com/QwenLM/qwen-code.git"
"url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
},
"type": "module",
"main": "dist/index.js",
@@ -25,16 +25,15 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.5-nightly.1"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.1-alpha.12"
},
"dependencies": {
"@qwen-code/qwen-code-core": "file:../core",
"@google/genai": "1.9.0",
"@iarna/toml": "^2.2.5",
"@types/update-notifier": "^6.0.8",
"command-exists": "^1.2.9",
"diff": "^7.0.0",
"dotenv": "^17.1.0",
"gaxios": "^7.1.1",
"glob": "^10.4.1",
"highlight.js": "^11.11.1",
"ink": "^6.0.1",
@@ -53,9 +52,7 @@
"strip-ansi": "^7.1.0",
"strip-json-comments": "^3.1.1",
"update-notifier": "^7.3.1",
"yargs": "^17.7.2",
"zod": "^3.23.8",
"tiktoken": "^1.0.21"
"yargs": "^18.0.0"
},
"devDependencies": {
"@babel/runtime": "^7.27.6",

View File

@@ -1,464 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */
import { Icon } from '@qwen-code/qwen-code-core';
import { WritableStream, ReadableStream } from 'node:stream/web';
export class ClientConnection implements Client {
#connection: Connection<Agent>;
constructor(
agent: (client: Client) => Agent,
input: WritableStream<Uint8Array>,
output: ReadableStream<Uint8Array>,
) {
this.#connection = new Connection(agent(this), input, output);
}
/**
* Streams part of an assistant response to the client
*/
async streamAssistantMessageChunk(
params: StreamAssistantMessageChunkParams,
): Promise<void> {
await this.#connection.sendRequest('streamAssistantMessageChunk', params);
}
/**
* Request confirmation before running a tool
*
* When allowed, the client returns a [`ToolCallId`] which can be used
* to update the tool call's `status` and `content` as it runs.
*/
requestToolCallConfirmation(
params: RequestToolCallConfirmationParams,
): Promise<RequestToolCallConfirmationResponse> {
return this.#connection.sendRequest('requestToolCallConfirmation', params);
}
/**
* pushToolCall allows the agent to start a tool call
* when it does not need to request permission to do so.
*
* The returned id can be used to update the UI for the tool
* call as needed.
*/
pushToolCall(params: PushToolCallParams): Promise<PushToolCallResponse> {
return this.#connection.sendRequest('pushToolCall', params);
}
/**
* updateToolCall allows the agent to update the content and status of the tool call.
*
* The new content replaces what is currently displayed in the UI.
*
* The [`ToolCallId`] is included in the response of
* `pushToolCall` or `requestToolCallConfirmation` respectively.
*/
async updateToolCall(params: UpdateToolCallParams): Promise<void> {
await this.#connection.sendRequest('updateToolCall', params);
}
}
type AnyMessage = AnyRequest | AnyResponse;
type AnyRequest = {
id: number;
method: string;
params?: unknown;
};
type AnyResponse = { jsonrpc: '2.0'; id: number } & Result<unknown>;
type Result<T> =
| {
result: T;
}
| {
error: ErrorResponse;
};
type ErrorResponse = {
code: number;
message: string;
data?: { details?: string };
};
type PendingResponse = {
resolve: (response: unknown) => void;
reject: (error: ErrorResponse) => void;
};
class Connection<D> {
#pendingResponses: Map<number, PendingResponse> = new Map();
#nextRequestId: number = 0;
#delegate: D;
#peerInput: WritableStream<Uint8Array>;
#writeQueue: Promise<void> = Promise.resolve();
#textEncoder: TextEncoder;
constructor(
delegate: D,
peerInput: WritableStream<Uint8Array>,
peerOutput: ReadableStream<Uint8Array>,
) {
this.#peerInput = peerInput;
this.#textEncoder = new TextEncoder();
this.#delegate = delegate;
this.#receive(peerOutput);
}
async #receive(output: ReadableStream<Uint8Array>) {
let content = '';
const decoder = new TextDecoder();
for await (const chunk of output) {
content += decoder.decode(chunk, { stream: true });
const lines = content.split('\n');
content = lines.pop() || '';
for (const line of lines) {
const trimmedLine = line.trim();
if (trimmedLine) {
const message = JSON.parse(trimmedLine);
this.#processMessage(message);
}
}
}
}
async #processMessage(message: AnyMessage) {
if ('method' in message) {
const response = await this.#tryCallDelegateMethod(
message.method,
message.params,
);
await this.#sendMessage({
jsonrpc: '2.0',
id: message.id,
...response,
});
} else {
this.#handleResponse(message);
}
}
async #tryCallDelegateMethod(
method: string,
params?: unknown,
): Promise<Result<unknown>> {
const methodName = method as keyof D;
if (typeof this.#delegate[methodName] !== 'function') {
return RequestError.methodNotFound(method).toResult();
}
try {
const result = await this.#delegate[methodName](params);
return { result: result ?? null };
} catch (error: unknown) {
if (error instanceof RequestError) {
return error.toResult();
}
let details;
if (error instanceof Error) {
details = error.message;
} else if (
typeof error === 'object' &&
error != null &&
'message' in error &&
typeof error.message === 'string'
) {
details = error.message;
}
return RequestError.internalError(details).toResult();
}
}
#handleResponse(response: AnyResponse) {
const pendingResponse = this.#pendingResponses.get(response.id);
if (pendingResponse) {
if ('result' in response) {
pendingResponse.resolve(response.result);
} else if ('error' in response) {
pendingResponse.reject(response.error);
}
this.#pendingResponses.delete(response.id);
}
}
async sendRequest<Req, Resp>(method: string, params?: Req): Promise<Resp> {
const id = this.#nextRequestId++;
const responsePromise = new Promise((resolve, reject) => {
this.#pendingResponses.set(id, { resolve, reject });
});
await this.#sendMessage({ jsonrpc: '2.0', id, method, params });
return responsePromise as Promise<Resp>;
}
async #sendMessage(json: AnyMessage) {
const content = JSON.stringify(json) + '\n';
this.#writeQueue = this.#writeQueue
.then(async () => {
const writer = this.#peerInput.getWriter();
try {
await writer.write(this.#textEncoder.encode(content));
} finally {
writer.releaseLock();
}
})
.catch((error) => {
// Continue processing writes on error
console.error('ACP write error:', error);
});
return this.#writeQueue;
}
}
export class RequestError extends Error {
data?: { details?: string };
constructor(
public code: number,
message: string,
details?: string,
) {
super(message);
this.name = 'RequestError';
if (details) {
this.data = { details };
}
}
static parseError(details?: string): RequestError {
return new RequestError(-32700, 'Parse error', details);
}
static invalidRequest(details?: string): RequestError {
return new RequestError(-32600, 'Invalid request', details);
}
static methodNotFound(details?: string): RequestError {
return new RequestError(-32601, 'Method not found', details);
}
static invalidParams(details?: string): RequestError {
return new RequestError(-32602, 'Invalid params', details);
}
static internalError(details?: string): RequestError {
return new RequestError(-32603, 'Internal error', details);
}
toResult<T>(): Result<T> {
return {
error: {
code: this.code,
message: this.message,
data: this.data,
},
};
}
}
// Protocol types
export const LATEST_PROTOCOL_VERSION = '0.0.9';
export type AssistantMessageChunk =
| {
text: string;
}
| {
thought: string;
};
export type ToolCallConfirmation =
| {
description?: string | null;
type: 'edit';
}
| {
description?: string | null;
type: 'execute';
command: string;
rootCommand: string;
}
| {
description?: string | null;
type: 'mcp';
serverName: string;
toolDisplayName: string;
toolName: string;
}
| {
description?: string | null;
type: 'fetch';
urls: string[];
}
| {
description: string;
type: 'other';
};
export type ToolCallContent =
| {
type: 'markdown';
markdown: string;
}
| {
type: 'diff';
newText: string;
oldText: string | null;
path: string;
};
export type ToolCallStatus = 'running' | 'finished' | 'error';
export type ToolCallId = number;
export type ToolCallConfirmationOutcome =
| 'allow'
| 'alwaysAllow'
| 'alwaysAllowMcpServer'
| 'alwaysAllowTool'
| 'reject'
| 'cancel';
/**
* A part in a user message
*/
export type UserMessageChunk =
| {
text: string;
}
| {
path: string;
};
export interface StreamAssistantMessageChunkParams {
chunk: AssistantMessageChunk;
}
export interface RequestToolCallConfirmationParams {
confirmation: ToolCallConfirmation;
content?: ToolCallContent | null;
icon: Icon;
label: string;
locations?: ToolCallLocation[];
}
export interface ToolCallLocation {
line?: number | null;
path: string;
}
export interface PushToolCallParams {
content?: ToolCallContent | null;
icon: Icon;
label: string;
locations?: ToolCallLocation[];
}
export interface UpdateToolCallParams {
content: ToolCallContent | null;
status: ToolCallStatus;
toolCallId: ToolCallId;
}
export interface RequestToolCallConfirmationResponse {
id: ToolCallId;
outcome: ToolCallConfirmationOutcome;
}
export interface PushToolCallResponse {
id: ToolCallId;
}
export interface InitializeParams {
/**
* The version of the protocol that the client supports.
* This should be the latest version supported by the client.
*/
protocolVersion: string;
}
export interface SendUserMessageParams {
chunks: UserMessageChunk[];
}
export interface InitializeResponse {
/**
* Indicates whether the agent is authenticated and
* ready to handle requests.
*/
isAuthenticated: boolean;
/**
* The version of the protocol that the agent supports.
* If the agent supports the requested version, it should respond with the same version.
* Otherwise, the agent should respond with the latest version it supports.
*/
protocolVersion: string;
}
export interface Error {
code: number;
data?: unknown;
message: string;
}
export interface Client {
streamAssistantMessageChunk(
params: StreamAssistantMessageChunkParams,
): Promise<void>;
requestToolCallConfirmation(
params: RequestToolCallConfirmationParams,
): Promise<RequestToolCallConfirmationResponse>;
pushToolCall(params: PushToolCallParams): Promise<PushToolCallResponse>;
updateToolCall(params: UpdateToolCallParams): Promise<void>;
}
export interface Agent {
/**
* Initializes the agent's state. It should be called before any other method,
* and no other methods should be called until it has completed.
*
* If the agent is not authenticated, then the client should prompt the user to authenticate,
* and then call the `authenticate` method.
* Otherwise the client can send other messages to the agent.
*/
initialize(params: InitializeParams): Promise<InitializeResponse>;
/**
* Begins the authentication process.
*
* This method should only be called if `initialize` indicates the user isn't already authenticated.
* The Promise MUST not resolve until authentication is complete.
*/
authenticate(): Promise<void>;
/**
* Allows the user to send a message to the agent.
* This method should complete after the agent is finished, during
* which time the agent may update the client by calling
* streamAssistantMessageChunk and other methods.
*/
sendUserMessage(params: SendUserMessageParams): Promise<void>;
/**
* Cancels the current generation.
*/
cancelSendMessage(): Promise<void>;
}

View File

@@ -1,674 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { WritableStream, ReadableStream } from 'node:stream/web';
import {
AuthType,
Config,
GeminiChat,
ToolRegistry,
logToolCall,
ToolResult,
convertToFunctionResponse,
ToolCallConfirmationDetails,
ToolConfirmationOutcome,
clearCachedCredentialFile,
isNodeError,
getErrorMessage,
isWithinRoot,
getErrorStatus,
} from '@qwen-code/qwen-code-core';
import * as acp from './acp.js';
import { Agent } from './acp.js';
import { Readable, Writable } from 'node:stream';
import { Content, Part, FunctionCall, PartListUnion } from '@google/genai';
import { LoadedSettings, SettingScope } from '../config/settings.js';
import * as fs from 'fs/promises';
import * as path from 'path';
export async function runAcpPeer(config: Config, settings: LoadedSettings) {
const stdout = Writable.toWeb(process.stdout) as WritableStream;
const stdin = Readable.toWeb(process.stdin) as ReadableStream<Uint8Array>;
// Stdout is used to send messages to the client, so console.log/console.info
// messages to stderr so that they don't interfere with ACP.
console.log = console.error;
console.info = console.error;
console.debug = console.error;
new acp.ClientConnection(
(client: acp.Client) => new GeminiAgent(config, settings, client),
stdout,
stdin,
);
}
class GeminiAgent implements Agent {
chat?: GeminiChat;
pendingSend?: AbortController;
constructor(
private config: Config,
private settings: LoadedSettings,
private client: acp.Client,
) {}
async initialize(_: acp.InitializeParams): Promise<acp.InitializeResponse> {
let isAuthenticated = false;
if (this.settings.merged.selectedAuthType) {
try {
await this.config.refreshAuth(this.settings.merged.selectedAuthType);
isAuthenticated = true;
} catch (error) {
console.error('Failed to refresh auth:', error);
}
}
return { protocolVersion: acp.LATEST_PROTOCOL_VERSION, isAuthenticated };
}
async authenticate(): Promise<void> {
await clearCachedCredentialFile();
await this.config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE);
this.settings.setValue(
SettingScope.User,
'selectedAuthType',
AuthType.LOGIN_WITH_GOOGLE,
);
}
async cancelSendMessage(): Promise<void> {
if (!this.pendingSend) {
throw new Error('Not currently generating');
}
this.pendingSend.abort();
delete this.pendingSend;
}
async sendUserMessage(params: acp.SendUserMessageParams): Promise<void> {
this.pendingSend?.abort();
const pendingSend = new AbortController();
this.pendingSend = pendingSend;
if (!this.chat) {
const geminiClient = this.config.getGeminiClient();
this.chat = await geminiClient.startChat();
}
const promptId = Math.random().toString(16).slice(2);
const chat = this.chat!;
const toolRegistry: ToolRegistry = await this.config.getToolRegistry();
const parts = await this.#resolveUserMessage(params, pendingSend.signal);
let nextMessage: Content | null = { role: 'user', parts };
while (nextMessage !== null) {
if (pendingSend.signal.aborted) {
chat.addHistory(nextMessage);
return;
}
const functionCalls: FunctionCall[] = [];
try {
const responseStream = await chat.sendMessageStream(
{
message: nextMessage?.parts ?? [],
config: {
abortSignal: pendingSend.signal,
tools: [
{
functionDeclarations: toolRegistry.getFunctionDeclarations(),
},
],
},
},
promptId,
);
nextMessage = null;
for await (const resp of responseStream) {
if (pendingSend.signal.aborted) {
return;
}
if (resp.candidates && resp.candidates.length > 0) {
const candidate = resp.candidates[0];
for (const part of candidate.content?.parts ?? []) {
if (!part.text) {
continue;
}
this.client.streamAssistantMessageChunk({
chunk: part.thought
? { thought: part.text }
: { text: part.text },
});
}
}
if (resp.functionCalls) {
functionCalls.push(...resp.functionCalls);
}
}
} catch (error) {
if (getErrorStatus(error) === 429) {
throw new acp.RequestError(
429,
'Rate limit exceeded. Try again later.',
);
}
throw error;
}
if (functionCalls.length > 0) {
const toolResponseParts: Part[] = [];
for (const fc of functionCalls) {
const response = await this.#runTool(
pendingSend.signal,
promptId,
fc,
);
const parts = Array.isArray(response) ? response : [response];
for (const part of parts) {
if (typeof part === 'string') {
toolResponseParts.push({ text: part });
} else if (part) {
toolResponseParts.push(part);
}
}
}
nextMessage = { role: 'user', parts: toolResponseParts };
}
}
}
async #runTool(
abortSignal: AbortSignal,
promptId: string,
fc: FunctionCall,
): Promise<PartListUnion> {
const callId = fc.id ?? `${fc.name}-${Date.now()}`;
const args = (fc.args ?? {}) as Record<string, unknown>;
const startTime = Date.now();
const errorResponse = (error: Error) => {
const durationMs = Date.now() - startTime;
logToolCall(this.config, {
'event.name': 'tool_call',
'event.timestamp': new Date().toISOString(),
prompt_id: promptId,
function_name: fc.name ?? '',
function_args: args,
duration_ms: durationMs,
success: false,
error: error.message,
});
return [
{
functionResponse: {
id: callId,
name: fc.name ?? '',
response: { error: error.message },
},
},
];
};
if (!fc.name) {
return errorResponse(new Error('Missing function name'));
}
const toolRegistry: ToolRegistry = await this.config.getToolRegistry();
const tool = toolRegistry.getTool(fc.name as string);
if (!tool) {
return errorResponse(
new Error(`Tool "${fc.name}" not found in registry.`),
);
}
let toolCallId;
const confirmationDetails = await tool.shouldConfirmExecute(
args,
abortSignal,
);
if (confirmationDetails) {
let content: acp.ToolCallContent | null = null;
if (confirmationDetails.type === 'edit') {
content = {
type: 'diff',
path: confirmationDetails.fileName,
oldText: confirmationDetails.originalContent,
newText: confirmationDetails.newContent,
};
}
const result = await this.client.requestToolCallConfirmation({
label: tool.getDescription(args),
icon: tool.icon,
content,
confirmation: toAcpToolCallConfirmation(confirmationDetails),
locations: tool.toolLocations(args),
});
await confirmationDetails.onConfirm(toToolCallOutcome(result.outcome));
switch (result.outcome) {
case 'reject':
return errorResponse(
new Error(`Tool "${fc.name}" not allowed to run by the user.`),
);
case 'cancel':
return errorResponse(
new Error(`Tool "${fc.name}" was canceled by the user.`),
);
case 'allow':
case 'alwaysAllow':
case 'alwaysAllowMcpServer':
case 'alwaysAllowTool':
break;
default: {
const resultOutcome: never = result.outcome;
throw new Error(`Unexpected: ${resultOutcome}`);
}
}
toolCallId = result.id;
} else {
const result = await this.client.pushToolCall({
icon: tool.icon,
label: tool.getDescription(args),
locations: tool.toolLocations(args),
});
toolCallId = result.id;
}
try {
const toolResult: ToolResult = await tool.execute(args, abortSignal);
const toolCallContent = toToolCallContent(toolResult);
await this.client.updateToolCall({
toolCallId,
status: 'finished',
content: toolCallContent,
});
const durationMs = Date.now() - startTime;
logToolCall(this.config, {
'event.name': 'tool_call',
'event.timestamp': new Date().toISOString(),
function_name: fc.name,
function_args: args,
duration_ms: durationMs,
success: true,
prompt_id: promptId,
});
return convertToFunctionResponse(fc.name, callId, toolResult.llmContent);
} catch (e) {
const error = e instanceof Error ? e : new Error(String(e));
await this.client.updateToolCall({
toolCallId,
status: 'error',
content: { type: 'markdown', markdown: error.message },
});
return errorResponse(error);
}
}
async #resolveUserMessage(
message: acp.SendUserMessageParams,
abortSignal: AbortSignal,
): Promise<Part[]> {
const atPathCommandParts = message.chunks.filter((part) => 'path' in part);
if (atPathCommandParts.length === 0) {
return message.chunks.map((chunk) => {
if ('text' in chunk) {
return { text: chunk.text };
} else {
throw new Error('Unexpected chunk type');
}
});
}
// Get centralized file discovery service
const fileDiscovery = this.config.getFileService();
const respectGitIgnore = this.config.getFileFilteringRespectGitIgnore();
const pathSpecsToRead: string[] = [];
const atPathToResolvedSpecMap = new Map<string, string>();
const contentLabelsForDisplay: string[] = [];
const ignoredPaths: string[] = [];
const toolRegistry = await this.config.getToolRegistry();
const readManyFilesTool = toolRegistry.getTool('read_many_files');
const globTool = toolRegistry.getTool('glob');
if (!readManyFilesTool) {
throw new Error('Error: read_many_files tool not found.');
}
for (const atPathPart of atPathCommandParts) {
const pathName = atPathPart.path;
// Check if path should be ignored by git
if (fileDiscovery.shouldGitIgnoreFile(pathName)) {
ignoredPaths.push(pathName);
const reason = respectGitIgnore
? 'git-ignored and will be skipped'
: 'ignored by custom patterns';
console.warn(`Path ${pathName} is ${reason}.`);
continue;
}
let currentPathSpec = pathName;
let resolvedSuccessfully = false;
try {
const absolutePath = path.resolve(this.config.getTargetDir(), pathName);
if (isWithinRoot(absolutePath, this.config.getTargetDir())) {
const stats = await fs.stat(absolutePath);
if (stats.isDirectory()) {
currentPathSpec = pathName.endsWith('/')
? `${pathName}**`
: `${pathName}/**`;
this.#debug(
`Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`,
);
} else {
this.#debug(
`Path ${pathName} resolved to file: ${currentPathSpec}`,
);
}
resolvedSuccessfully = true;
} else {
this.#debug(
`Path ${pathName} is outside the project directory. Skipping.`,
);
}
} catch (error) {
if (isNodeError(error) && error.code === 'ENOENT') {
if (this.config.getEnableRecursiveFileSearch() && globTool) {
this.#debug(
`Path ${pathName} not found directly, attempting glob search.`,
);
try {
const globResult = await globTool.execute(
{
pattern: `**/*${pathName}*`,
path: this.config.getTargetDir(),
},
abortSignal,
);
if (
globResult.llmContent &&
typeof globResult.llmContent === 'string' &&
!globResult.llmContent.startsWith('No files found') &&
!globResult.llmContent.startsWith('Error:')
) {
const lines = globResult.llmContent.split('\n');
if (lines.length > 1 && lines[1]) {
const firstMatchAbsolute = lines[1].trim();
currentPathSpec = path.relative(
this.config.getTargetDir(),
firstMatchAbsolute,
);
this.#debug(
`Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`,
);
resolvedSuccessfully = true;
} else {
this.#debug(
`Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`,
);
}
} else {
this.#debug(
`Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`,
);
}
} catch (globError) {
console.error(
`Error during glob search for ${pathName}: ${getErrorMessage(globError)}`,
);
}
} else {
this.#debug(
`Glob tool not found. Path ${pathName} will be skipped.`,
);
}
} else {
console.error(
`Error stating path ${pathName}. Path ${pathName} will be skipped.`,
);
}
}
if (resolvedSuccessfully) {
pathSpecsToRead.push(currentPathSpec);
atPathToResolvedSpecMap.set(pathName, currentPathSpec);
contentLabelsForDisplay.push(pathName);
}
}
// Construct the initial part of the query for the LLM
let initialQueryText = '';
for (let i = 0; i < message.chunks.length; i++) {
const chunk = message.chunks[i];
if ('text' in chunk) {
initialQueryText += chunk.text;
} else {
// type === 'atPath'
const resolvedSpec = atPathToResolvedSpecMap.get(chunk.path);
if (
i > 0 &&
initialQueryText.length > 0 &&
!initialQueryText.endsWith(' ') &&
resolvedSpec
) {
// Add space if previous part was text and didn't end with space, or if previous was @path
const prevPart = message.chunks[i - 1];
if (
'text' in prevPart ||
('path' in prevPart && atPathToResolvedSpecMap.has(prevPart.path))
) {
initialQueryText += ' ';
}
}
if (resolvedSpec) {
initialQueryText += `@${resolvedSpec}`;
} else {
// If not resolved for reading (e.g. lone @ or invalid path that was skipped),
// add the original @-string back, ensuring spacing if it's not the first element.
if (
i > 0 &&
initialQueryText.length > 0 &&
!initialQueryText.endsWith(' ') &&
!chunk.path.startsWith(' ')
) {
initialQueryText += ' ';
}
initialQueryText += `@${chunk.path}`;
}
}
}
initialQueryText = initialQueryText.trim();
// Inform user about ignored paths
if (ignoredPaths.length > 0) {
const ignoreType = respectGitIgnore ? 'git-ignored' : 'custom-ignored';
this.#debug(
`Ignored ${ignoredPaths.length} ${ignoreType} files: ${ignoredPaths.join(', ')}`,
);
}
// Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText
if (pathSpecsToRead.length === 0) {
console.warn('No valid file paths found in @ commands to read.');
return [{ text: initialQueryText }];
}
const processedQueryParts: Part[] = [{ text: initialQueryText }];
const toolArgs = {
paths: pathSpecsToRead,
respectGitIgnore, // Use configuration setting
};
const toolCall = await this.client.pushToolCall({
icon: readManyFilesTool.icon,
label: readManyFilesTool.getDescription(toolArgs),
});
try {
const result = await readManyFilesTool.execute(toolArgs, abortSignal);
const content = toToolCallContent(result) || {
type: 'markdown',
markdown: `Successfully read: ${contentLabelsForDisplay.join(', ')}`,
};
await this.client.updateToolCall({
toolCallId: toolCall.id,
status: 'finished',
content,
});
if (Array.isArray(result.llmContent)) {
const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/;
processedQueryParts.push({
text: '\n--- Content from referenced files ---',
});
for (const part of result.llmContent) {
if (typeof part === 'string') {
const match = fileContentRegex.exec(part);
if (match) {
const filePathSpecInContent = match[1]; // This is a resolved pathSpec
const fileActualContent = match[2].trim();
processedQueryParts.push({
text: `\nContent from @${filePathSpecInContent}:\n`,
});
processedQueryParts.push({ text: fileActualContent });
} else {
processedQueryParts.push({ text: part });
}
} else {
// part is a Part object.
processedQueryParts.push(part);
}
}
processedQueryParts.push({ text: '\n--- End of content ---' });
} else {
console.warn(
'read_many_files tool returned no content or empty content.',
);
}
return processedQueryParts;
} catch (error: unknown) {
await this.client.updateToolCall({
toolCallId: toolCall.id,
status: 'error',
content: {
type: 'markdown',
markdown: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
},
});
throw error;
}
}
#debug(msg: string) {
if (this.config.getDebugMode()) {
console.warn(msg);
}
}
}
function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null {
if (toolResult.returnDisplay) {
if (typeof toolResult.returnDisplay === 'string') {
return {
type: 'markdown',
markdown: toolResult.returnDisplay,
};
} else {
return {
type: 'diff',
path: toolResult.returnDisplay.fileName,
oldText: toolResult.returnDisplay.originalContent,
newText: toolResult.returnDisplay.newContent,
};
}
} else {
return null;
}
}
function toAcpToolCallConfirmation(
confirmationDetails: ToolCallConfirmationDetails,
): acp.ToolCallConfirmation {
switch (confirmationDetails.type) {
case 'edit':
return { type: 'edit' };
case 'exec':
return {
type: 'execute',
rootCommand: confirmationDetails.rootCommand,
command: confirmationDetails.command,
};
case 'mcp':
return {
type: 'mcp',
serverName: confirmationDetails.serverName,
toolName: confirmationDetails.toolName,
toolDisplayName: confirmationDetails.toolDisplayName,
};
case 'info':
return {
type: 'fetch',
urls: confirmationDetails.urls || [],
description: confirmationDetails.urls?.length
? null
: confirmationDetails.prompt,
};
default: {
const unreachable: never = confirmationDetails;
throw new Error(`Unexpected: ${unreachable}`);
}
}
}
function toToolCallOutcome(
outcome: acp.ToolCallConfirmationOutcome,
): ToolConfirmationOutcome {
switch (outcome) {
case 'allow':
return ToolConfirmationOutcome.ProceedOnce;
case 'alwaysAllow':
return ToolConfirmationOutcome.ProceedAlways;
case 'alwaysAllowMcpServer':
return ToolConfirmationOutcome.ProceedAlwaysServer;
case 'alwaysAllowTool':
return ToolConfirmationOutcome.ProceedAlwaysTool;
case 'reject':
case 'cancel':
return ToolConfirmationOutcome.Cancel;
default: {
const unreachable: never = outcome;
throw new Error(`Unexpected: ${unreachable}`);
}
}
}

View File

@@ -10,6 +10,11 @@ import { loadCliConfig, parseArguments, CliArgs } from './config.js';
import { Settings } from './settings.js';
import { Extension } from './extension.js';
import * as ServerConfig from '@qwen-code/qwen-code-core';
import {
TelemetryTarget,
ConfigParameters,
DEFAULT_TELEMETRY_TARGET,
} from '@qwen-code/qwen-code-core';
vi.mock('os', async (importOriginal) => {
const actualOs = await importOriginal<typeof os>();
@@ -37,19 +42,63 @@ vi.mock('@qwen-code/qwen-code-core', async () => {
...actualServer,
loadEnvironment: vi.fn(),
loadServerHierarchicalMemory: vi.fn(
(cwd, debug, fileService, extensionPaths, _maxDirs) =>
(cwd, debug, fileService, extensionPaths) =>
Promise.resolve({
memoryContent: extensionPaths?.join(',') || '',
fileCount: extensionPaths?.length || 0,
}),
),
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS: {
respectGitIgnore: false,
respectGeminiIgnore: true,
},
DEFAULT_FILE_FILTERING_OPTIONS: {
respectGitIgnore: true,
respectGeminiIgnore: true,
Config: class MockConfig extends actualServer.Config {
private enableOpenAILogging: boolean;
constructor(params: ConfigParameters) {
super(params);
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
}
getEnableOpenAILogging(): boolean {
return this.enableOpenAILogging;
}
// Override other methods to ensure they work correctly
getShowMemoryUsage(): boolean {
return (
(this as unknown as { showMemoryUsage?: boolean }).showMemoryUsage ??
false
);
}
getTelemetryEnabled(): boolean {
return (
(this as unknown as { telemetrySettings?: { enabled?: boolean } })
.telemetrySettings?.enabled ?? false
);
}
getTelemetryLogPromptsEnabled(): boolean {
return (
(this as unknown as { telemetrySettings?: { logPrompts?: boolean } })
.telemetrySettings?.logPrompts ?? false
);
}
getTelemetryOtlpEndpoint(): string {
return (
(this as unknown as { telemetrySettings?: { otlpEndpoint?: string } })
.telemetrySettings?.otlpEndpoint ??
'http://tracing-analysis-dc-hz.aliyuncs.com:8090'
);
}
getTelemetryTarget(): TelemetryTarget {
return (
(
this as unknown as {
telemetrySettings?: { target?: TelemetryTarget };
}
).telemetrySettings?.target ?? DEFAULT_TELEMETRY_TARGET
);
}
},
};
});
@@ -195,85 +244,6 @@ describe('loadCliConfig', () => {
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getShowMemoryUsage()).toBe(true);
});
it(`should leave proxy to empty by default`, async () => {
// Clear all proxy environment variables to ensure clean test
delete process.env.https_proxy;
delete process.env.http_proxy;
delete process.env.HTTPS_PROXY;
delete process.env.HTTP_PROXY;
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
const settings: Settings = {};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getProxy()).toBeFalsy();
});
const proxy_url = 'http://localhost:7890';
const testCases = [
{
input: {
env_name: 'https_proxy',
proxy_url,
},
expected: proxy_url,
},
{
input: {
env_name: 'http_proxy',
proxy_url,
},
expected: proxy_url,
},
{
input: {
env_name: 'HTTPS_PROXY',
proxy_url,
},
expected: proxy_url,
},
{
input: {
env_name: 'HTTP_PROXY',
proxy_url,
},
expected: proxy_url,
},
];
testCases.forEach(({ input, expected }) => {
it(`should set proxy to ${expected} according to environment variable [${input.env_name}]`, async () => {
// Clear all proxy environment variables first
delete process.env.https_proxy;
delete process.env.http_proxy;
delete process.env.HTTPS_PROXY;
delete process.env.HTTP_PROXY;
process.env[input.env_name] = input.proxy_url;
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
const settings: Settings = {};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getProxy()).toBe(expected);
});
});
it('should set proxy when --proxy flag is present', async () => {
process.argv = ['node', 'script.js', '--proxy', 'http://localhost:7890'];
const argv = await parseArguments();
const settings: Settings = {};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getProxy()).toBe('http://localhost:7890');
});
it('should prioritize CLI flag over environment variable for proxy (CLI http://localhost:7890, environment variable http://localhost:7891)', async () => {
process.env['http_proxy'] = 'http://localhost:7891';
process.argv = ['node', 'script.js', '--proxy', 'http://localhost:7890'];
const argv = await parseArguments();
const settings: Settings = {};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getProxy()).toBe('http://localhost:7890');
});
});
describe('loadCliConfig telemetry', () => {
@@ -380,7 +350,9 @@ describe('loadCliConfig telemetry', () => {
const argv = await parseArguments();
const settings: Settings = { telemetry: { enabled: true } };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getTelemetryOtlpEndpoint()).toBe('http://localhost:4317');
expect(config.getTelemetryOtlpEndpoint()).toBe(
'http://tracing-analysis-dc-hz.aliyuncs.com:8090',
);
});
it('should use telemetry target from settings if CLI flag is not present', async () => {
@@ -439,12 +411,81 @@ describe('loadCliConfig telemetry', () => {
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
});
it('should use default log prompts (true) if no value is provided via CLI or settings', async () => {
it('should use default log prompts (false) if no value is provided via CLI or settings', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
const settings: Settings = { telemetry: { enabled: true } };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getTelemetryLogPromptsEnabled()).toBe(true);
expect(config.getTelemetryLogPromptsEnabled()).toBe(false);
});
it('should set enableOpenAILogging to true when --openai-logging flag is present', async () => {
const settings: Settings = {};
const argv = await parseArguments();
argv.openaiLogging = true;
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as { getEnableOpenAILogging(): boolean }
).getEnableOpenAILogging(),
).toBe(true);
});
it('should set enableOpenAILogging to false when --openai-logging flag is not present', async () => {
const settings: Settings = {};
const argv = await parseArguments();
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as { getEnableOpenAILogging(): boolean }
).getEnableOpenAILogging(),
).toBe(false);
});
it('should use enableOpenAILogging value from settings if CLI flag is not present (settings true)', async () => {
const settings: Settings = { enableOpenAILogging: true };
const argv = await parseArguments();
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as { getEnableOpenAILogging(): boolean }
).getEnableOpenAILogging(),
).toBe(true);
});
it('should use enableOpenAILogging value from settings if CLI flag is not present (settings false)', async () => {
const settings: Settings = { enableOpenAILogging: false };
const argv = await parseArguments();
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as { getEnableOpenAILogging(): boolean }
).getEnableOpenAILogging(),
).toBe(false);
});
it('should prioritize --openai-logging CLI flag (true) over settings (false)', async () => {
const settings: Settings = { enableOpenAILogging: false };
const argv = await parseArguments();
argv.openaiLogging = true;
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as { getEnableOpenAILogging(): boolean }
).getEnableOpenAILogging(),
).toBe(true);
});
it('should prioritize --openai-logging CLI flag (false) over settings (true)', async () => {
const settings: Settings = { enableOpenAILogging: true };
const argv = await parseArguments();
argv.openaiLogging = false;
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as { getEnableOpenAILogging(): boolean }
).getEnableOpenAILogging(),
).toBe(false);
});
});
@@ -499,11 +540,6 @@ describe('Hierarchical Memory Loading (config.ts) - Placeholder Suite', () => {
'/path/to/ext3/context1.md',
'/path/to/ext3/context2.md',
],
{
respectGitIgnore: false,
respectGeminiIgnore: true,
},
undefined, // maxDirs
);
});
@@ -561,68 +597,6 @@ describe('mergeMcpServers', () => {
});
});
describe('loadCliConfig systemPromptMappings', () => {
it('should use default systemPromptMappings when not provided in settings', async () => {
const mockSettings: Settings = {
theme: 'dark',
};
const mockExtensions: Extension[] = [];
const mockSessionId = 'test-session';
const mockArgv: CliArgs = {
model: 'test-model',
} as CliArgs;
const config = await loadCliConfig(
mockSettings,
mockExtensions,
mockSessionId,
mockArgv,
);
expect(config.getSystemPromptMappings()).toEqual([
{
baseUrls: [
'https://dashscope.aliyuncs.com/compatible-mode/v1/',
'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/',
],
modelNames: ['qwen3-coder-plus'],
template:
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
},
]);
});
it('should use custom systemPromptMappings when provided in settings', async () => {
const customSystemPromptMappings = [
{
baseUrls: ['https://custom-api.com'],
modelNames: ['custom-model'],
template: 'Custom template',
},
];
const mockSettings: Settings = {
theme: 'dark',
systemPromptMappings: customSystemPromptMappings,
};
const mockExtensions: Extension[] = [];
const mockSessionId = 'test-session';
const mockArgv: CliArgs = {
model: 'test-model',
} as CliArgs;
const config = await loadCliConfig(
mockSettings,
mockExtensions,
mockSessionId,
mockArgv,
);
expect(config.getSystemPromptMappings()).toEqual(
customSystemPromptMappings,
);
});
});
describe('mergeExcludeTools', () => {
it('should merge excludeTools from settings and extensions', async () => {
const settings: Settings = { excludeTools: ['tool1', 'tool2'] };
@@ -879,66 +853,6 @@ describe('loadCliConfig with allowed-mcp-server-names', () => {
const config = await loadCliConfig(baseSettings, [], 'test-session', argv);
expect(config.getMcpServers()).toEqual({});
});
it('should read allowMCPServers from settings', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
const settings: Settings = {
...baseSettings,
allowMCPServers: ['server1', 'server2'],
};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getMcpServers()).toEqual({
server1: { url: 'http://localhost:8080' },
server2: { url: 'http://localhost:8081' },
});
});
it('should read excludeMCPServers from settings', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
const settings: Settings = {
...baseSettings,
excludeMCPServers: ['server1', 'server2'],
};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getMcpServers()).toEqual({
server3: { url: 'http://localhost:8082' },
});
});
it('should override allowMCPServers with excludeMCPServers if overlapping ', async () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
const settings: Settings = {
...baseSettings,
excludeMCPServers: ['server1'],
allowMCPServers: ['server1', 'server2'],
};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getMcpServers()).toEqual({
server2: { url: 'http://localhost:8081' },
});
});
it('should prioritize mcp server flag if set ', async () => {
process.argv = [
'node',
'script.js',
'--allowed-mcp-server-names',
'server1',
];
const argv = await parseArguments();
const settings: Settings = {
...baseSettings,
excludeMCPServers: ['server1'],
allowMCPServers: ['server2'],
};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getMcpServers()).toEqual({
server1: { url: 'http://localhost:8080' },
});
});
});
describe('loadCliConfig extensions', () => {
@@ -994,7 +908,6 @@ describe('loadCliConfig ideMode', () => {
// Explicitly delete TERM_PROGRAM and SANDBOX before each test
delete process.env.TERM_PROGRAM;
delete process.env.SANDBOX;
delete process.env.GEMINI_CLI_IDE_SERVER_PORT;
});
afterEach(() => {
@@ -1031,7 +944,6 @@ describe('loadCliConfig ideMode', () => {
process.argv = ['node', 'script.js', '--ide-mode'];
const argv = await parseArguments();
process.env.TERM_PROGRAM = 'vscode';
process.env.GEMINI_CLI_IDE_SERVER_PORT = '3000';
const settings: Settings = {};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getIdeMode()).toBe(true);
@@ -1041,7 +953,6 @@ describe('loadCliConfig ideMode', () => {
process.argv = ['node', 'script.js'];
const argv = await parseArguments();
process.env.TERM_PROGRAM = 'vscode';
process.env.GEMINI_CLI_IDE_SERVER_PORT = '3000';
const settings: Settings = { ideMode: true };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getIdeMode()).toBe(true);
@@ -1051,7 +962,6 @@ describe('loadCliConfig ideMode', () => {
process.argv = ['node', 'script.js', '--ide-mode'];
const argv = await parseArguments();
process.env.TERM_PROGRAM = 'vscode';
process.env.GEMINI_CLI_IDE_SERVER_PORT = '3000';
const settings: Settings = { ideMode: false };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getIdeMode()).toBe(true);
@@ -1085,4 +995,82 @@ describe('loadCliConfig ideMode', () => {
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getIdeMode()).toBe(false);
});
it('should add __ide_server when ideMode is true', async () => {
process.argv = ['node', 'script.js', '--ide-mode'];
const argv = await parseArguments();
process.env.TERM_PROGRAM = 'vscode';
const settings: Settings = {};
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getIdeMode()).toBe(true);
const mcpServers = config.getMcpServers();
expect(mcpServers?.['_ide_server']).toBeDefined();
expect(mcpServers?.['_ide_server']?.httpUrl).toBe(
'http://localhost:3000/mcp',
);
expect(mcpServers?.['_ide_server']?.description).toBe('IDE connection');
expect(mcpServers?.['_ide_server']?.trust).toBe(false);
});
});
describe('loadCliConfig systemPromptMappings', () => {
it('should use default systemPromptMappings when not provided in settings', async () => {
const mockSettings: Settings = {
theme: 'dark',
};
const mockExtensions: Extension[] = [];
const mockSessionId = 'test-session';
const mockArgv: CliArgs = {
model: 'test-model',
} as CliArgs;
const config = await loadCliConfig(
mockSettings,
mockExtensions,
mockSessionId,
mockArgv,
);
expect(config.getSystemPromptMappings()).toEqual([
{
baseUrls: [
'https://dashscope.aliyuncs.com/compatible-mode/v1/',
'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/',
],
modelNames: ['qwen3-coder-plus'],
template:
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
},
]);
});
it('should use custom systemPromptMappings when provided in settings', async () => {
const customSystemPromptMappings = [
{
baseUrls: ['https://custom-api.com'],
modelNames: ['custom-model'],
template: 'Custom template',
},
];
const mockSettings: Settings = {
theme: 'dark',
systemPromptMappings: customSystemPromptMappings,
};
const mockExtensions: Extension[] = [];
const mockSessionId = 'test-session';
const mockArgv: CliArgs = {
model: 'test-model',
} as CliArgs;
const config = await loadCliConfig(
mockSettings,
mockExtensions,
mockSessionId,
mockArgv,
);
expect(config.getSystemPromptMappings()).toEqual(
customSystemPromptMappings,
);
});
});

View File

@@ -15,15 +15,13 @@ import {
ApprovalMode,
DEFAULT_GEMINI_MODEL,
DEFAULT_GEMINI_EMBEDDING_MODEL,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
FileDiscoveryService,
TelemetryTarget,
FileFilteringOptions,
IdeClient,
MCPServerConfig,
} from '@qwen-code/qwen-code-core';
import { Settings } from './settings.js';
import { Extension, annotateActiveExtensions } from './extension.js';
import { Extension, filterActiveExtensions } from './extension.js';
import { getCliVersion } from '../utils/version.js';
import { loadSandboxConfig } from './sandboxConfig.js';
@@ -54,16 +52,13 @@ export interface CliArgs {
telemetryTarget: string | undefined;
telemetryOtlpEndpoint: string | undefined;
telemetryLogPrompts: boolean | undefined;
telemetryOutfile: string | undefined;
allowedMcpServerNames: string[] | undefined;
experimentalAcp: boolean | undefined;
extensions: string[] | undefined;
listExtensions: boolean | undefined;
ideMode: boolean | undefined;
openaiLogging: boolean | undefined;
openaiApiKey: string | undefined;
openaiBaseUrl: string | undefined;
proxy: string | undefined;
}
export async function parseArguments(): Promise<CliArgs> {
@@ -162,20 +157,12 @@ export async function parseArguments(): Promise<CliArgs> {
description:
'Enable or disable logging of user prompts for telemetry. Overrides settings files.',
})
.option('telemetry-outfile', {
type: 'string',
description: 'Redirect all telemetry output to the specified file.',
})
.option('checkpointing', {
alias: 'c',
type: 'boolean',
description: 'Enables checkpointing of file edits',
default: false,
})
.option('experimental-acp', {
type: 'boolean',
description: 'Starts the agent in ACP mode',
})
.option('allowed-mcp-server-names', {
type: 'array',
string: true,
@@ -210,11 +197,7 @@ export async function parseArguments(): Promise<CliArgs> {
type: 'string',
description: 'OpenAI base URL (for custom endpoints)',
})
.option('proxy', {
type: 'string',
description:
'Proxy for gemini client, like schema://user:password@host:port',
})
.version(await getCliVersion()) // This will enable the --version flag based on package.json
.alias('v', 'version')
.help()
@@ -240,16 +223,13 @@ export async function loadHierarchicalGeminiMemory(
currentWorkingDirectory: string,
debugMode: boolean,
fileService: FileDiscoveryService,
settings: Settings,
extensionContextFilePaths: string[] = [],
fileFilteringOptions?: FileFilteringOptions,
): Promise<{ memoryContent: string; fileCount: number }> {
if (debugMode) {
logger.debug(
`CLI: Delegating hierarchical memory load to server for CWD: ${currentWorkingDirectory}`,
);
}
// Directly call the server function.
// The server function will use its own homedir() for the global path.
return loadServerHierarchicalMemory(
@@ -257,8 +237,6 @@ export async function loadHierarchicalGeminiMemory(
debugMode,
fileService,
extensionContextFilePaths,
fileFilteringOptions,
settings.memoryDiscoveryMaxDirs,
);
}
@@ -279,19 +257,11 @@ export async function loadCliConfig(
process.env.TERM_PROGRAM === 'vscode' &&
!process.env.SANDBOX;
let ideClient: IdeClient | undefined;
if (ideMode) {
ideClient = new IdeClient();
}
const allExtensions = annotateActiveExtensions(
const activeExtensions = filterActiveExtensions(
extensions,
argv.extensions || [],
);
const activeExtensions = extensions.filter(
(_, i) => allExtensions[i].isActive,
);
// Handle OpenAI API key from command line
if (argv.openaiApiKey) {
process.env.OPENAI_API_KEY = argv.openaiApiKey;
@@ -318,72 +288,46 @@ export async function loadCliConfig(
);
const fileService = new FileDiscoveryService(process.cwd());
const fileFiltering = {
...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
...settings.fileFiltering,
};
// Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
process.cwd(),
debugMode,
fileService,
settings,
extensionContextFilePaths,
fileFiltering,
);
let mcpServers = mergeMcpServers(settings, activeExtensions);
const excludeTools = mergeExcludeTools(settings, activeExtensions);
const blockedMcpServers: Array<{ name: string; extensionName: string }> = [];
if (!argv.allowedMcpServerNames) {
if (settings.allowMCPServers) {
const allowedNames = new Set(settings.allowMCPServers.filter(Boolean));
if (allowedNames.size > 0) {
mcpServers = Object.fromEntries(
Object.entries(mcpServers).filter(([key]) => allowedNames.has(key)),
);
}
}
if (settings.excludeMCPServers) {
const excludedNames = new Set(settings.excludeMCPServers.filter(Boolean));
if (excludedNames.size > 0) {
mcpServers = Object.fromEntries(
Object.entries(mcpServers).filter(([key]) => !excludedNames.has(key)),
);
}
}
}
if (argv.allowedMcpServerNames) {
const allowedNames = new Set(argv.allowedMcpServerNames.filter(Boolean));
if (allowedNames.size > 0) {
mcpServers = Object.fromEntries(
Object.entries(mcpServers).filter(([key, server]) => {
const isAllowed = allowedNames.has(key);
if (!isAllowed) {
blockedMcpServers.push({
name: key,
extensionName: server.extensionName || '',
});
}
return isAllowed;
}),
Object.entries(mcpServers).filter(([key]) => allowedNames.has(key)),
);
} else {
blockedMcpServers.push(
...Object.entries(mcpServers).map(([key, server]) => ({
name: key,
extensionName: server.extensionName || '',
})),
);
mcpServers = {};
}
}
if (ideMode) {
mcpServers['_ide_server'] = new MCPServerConfig(
undefined, // command
undefined, // args
undefined, // env
undefined, // cwd
undefined, // url
'http://localhost:3000/mcp', // httpUrl
undefined, // headers
undefined, // tcp
undefined, // timeout
false, // trust
'IDE connection', // description
undefined, // includeTools
undefined, // excludeTools
);
}
const sandboxConfig = await loadSandboxConfig(settings, argv);
return new Config({
@@ -418,19 +362,16 @@ export async function loadCliConfig(
process.env.OTEL_EXPORTER_OTLP_ENDPOINT ??
settings.telemetry?.otlpEndpoint,
logPrompts: argv.telemetryLogPrompts ?? settings.telemetry?.logPrompts,
outfile: argv.telemetryOutfile ?? settings.telemetry?.outfile,
},
usageStatisticsEnabled: settings.usageStatisticsEnabled ?? true,
// Git-aware file filtering settings
fileFiltering: {
respectGitIgnore: settings.fileFiltering?.respectGitIgnore,
respectGeminiIgnore: settings.fileFiltering?.respectGeminiIgnore,
enableRecursiveFileSearch:
settings.fileFiltering?.enableRecursiveFileSearch,
},
checkpointing: argv.checkpointing || settings.checkpointing?.enabled,
proxy:
argv.proxy ||
process.env.HTTPS_PROXY ||
process.env.https_proxy ||
process.env.HTTP_PROXY ||
@@ -443,14 +384,13 @@ export async function loadCliConfig(
maxSessionTurns: settings.maxSessionTurns ?? -1,
sessionTokenLimit: settings.sessionTokenLimit ?? 32000,
maxFolderItems: settings.maxFolderItems ?? 20,
experimentalAcp: argv.experimentalAcp || false,
listExtensions: argv.listExtensions || false,
extensions: allExtensions,
blockedMcpServers,
activeExtensions: activeExtensions.map((e) => ({
name: e.config.name,
version: e.config.version,
})),
noBrowser: !!process.env.NO_BROWSER,
summarizeToolOutput: settings.summarizeToolOutput,
ideMode,
ideClient,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.enableOpenAILogging
@@ -467,7 +407,6 @@ export async function loadCliConfig(
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
},
],
contentGenerator: settings.contentGenerator,
});
}
@@ -482,10 +421,7 @@ function mergeMcpServers(settings: Settings, extensions: Extension[]) {
);
return;
}
mcpServers[key] = {
...server,
extensionName: extension.config.name,
};
mcpServers[key] = server;
},
);
}

View File

@@ -11,7 +11,7 @@ import * as path from 'path';
import {
EXTENSIONS_CONFIG_FILENAME,
EXTENSIONS_DIRECTORY_NAME,
annotateActiveExtensions,
filterActiveExtensions,
loadExtensions,
} from './extension.js';
@@ -42,7 +42,7 @@ describe('loadExtensions', () => {
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should load context file path when QWEN.md is present', () => {
it('should load context file path when GEMINI.md is present', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
@@ -86,52 +86,42 @@ describe('loadExtensions', () => {
});
});
describe('annotateActiveExtensions', () => {
describe('filterActiveExtensions', () => {
const extensions = [
{ config: { name: 'ext1', version: '1.0.0' }, contextFiles: [] },
{ config: { name: 'ext2', version: '1.0.0' }, contextFiles: [] },
{ config: { name: 'ext3', version: '1.0.0' }, contextFiles: [] },
];
it('should mark all extensions as active if no enabled extensions are provided', () => {
const activeExtensions = annotateActiveExtensions(extensions, []);
it('should return all extensions if no enabled extensions are provided', () => {
const activeExtensions = filterActiveExtensions(extensions, []);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.every((e) => e.isActive)).toBe(true);
});
it('should mark only the enabled extensions as active', () => {
const activeExtensions = annotateActiveExtensions(extensions, [
it('should return only the enabled extensions', () => {
const activeExtensions = filterActiveExtensions(extensions, [
'ext1',
'ext3',
]);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe(
true,
);
expect(activeExtensions.find((e) => e.name === 'ext2')?.isActive).toBe(
false,
);
expect(activeExtensions.find((e) => e.name === 'ext3')?.isActive).toBe(
true,
);
expect(activeExtensions).toHaveLength(2);
expect(activeExtensions.some((e) => e.config.name === 'ext1')).toBe(true);
expect(activeExtensions.some((e) => e.config.name === 'ext3')).toBe(true);
});
it('should mark all extensions as inactive when "none" is provided', () => {
const activeExtensions = annotateActiveExtensions(extensions, ['none']);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.every((e) => !e.isActive)).toBe(true);
it('should return no extensions when "none" is provided', () => {
const activeExtensions = filterActiveExtensions(extensions, ['none']);
expect(activeExtensions).toHaveLength(0);
});
it('should handle case-insensitivity', () => {
const activeExtensions = annotateActiveExtensions(extensions, ['EXT1']);
expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe(
true,
);
const activeExtensions = filterActiveExtensions(extensions, ['EXT1']);
expect(activeExtensions).toHaveLength(1);
expect(activeExtensions[0].config.name).toBe('ext1');
});
it('should log an error for unknown extensions', () => {
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
annotateActiveExtensions(extensions, ['ext4']);
const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
filterActiveExtensions(extensions, ['ext4']);
expect(consoleSpy).toHaveBeenCalledWith('Extension not found: ext4');
consoleSpy.mockRestore();
});

View File

@@ -4,7 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { MCPServerConfig, GeminiCLIExtension } from '@qwen-code/qwen-code-core';
import { MCPServerConfig } from '@qwen-code/qwen-code-core';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
@@ -34,6 +34,9 @@ export function loadExtensions(workspaceDir: string): Extension[] {
const uniqueExtensions = new Map<string, Extension>();
for (const extension of allExtensions) {
if (!uniqueExtensions.has(extension.config.name)) {
console.log(
`Loading extension: ${extension.config.name} (version: ${extension.config.version})`,
);
uniqueExtensions.set(extension.config.name, extension);
}
}
@@ -110,18 +113,12 @@ function getContextFileNames(config: ExtensionConfig): string[] {
return config.contextFileName;
}
export function annotateActiveExtensions(
export function filterActiveExtensions(
extensions: Extension[],
enabledExtensionNames: string[],
): GeminiCLIExtension[] {
const annotatedExtensions: GeminiCLIExtension[] = [];
): Extension[] {
if (enabledExtensionNames.length === 0) {
return extensions.map((extension) => ({
name: extension.config.name,
version: extension.config.version,
isActive: true,
}));
return extensions;
}
const lowerCaseEnabledExtensions = new Set(
@@ -132,33 +129,31 @@ export function annotateActiveExtensions(
lowerCaseEnabledExtensions.size === 1 &&
lowerCaseEnabledExtensions.has('none')
) {
return extensions.map((extension) => ({
name: extension.config.name,
version: extension.config.version,
isActive: false,
}));
if (extensions.length > 0) {
console.log('All extensions are disabled.');
}
return [];
}
const activeExtensions: Extension[] = [];
const notFoundNames = new Set(lowerCaseEnabledExtensions);
for (const extension of extensions) {
const lowerCaseName = extension.config.name.toLowerCase();
const isActive = lowerCaseEnabledExtensions.has(lowerCaseName);
if (isActive) {
if (lowerCaseEnabledExtensions.has(lowerCaseName)) {
console.log(
`Activated extension: ${extension.config.name} (version: ${extension.config.version})`,
);
activeExtensions.push(extension);
notFoundNames.delete(lowerCaseName);
} else {
console.log(`Disabled extension: ${extension.config.name}`);
}
annotatedExtensions.push({
name: extension.config.name,
version: extension.config.version,
isActive,
});
}
for (const requestedName of notFoundNames) {
console.error(`Extension not found: ${requestedName}`);
console.log(`Extension not found: ${requestedName}`);
}
return annotatedExtensions;
return activeExtensions;
}

View File

@@ -46,7 +46,7 @@ import stripJsonComments from 'strip-json-comments'; // Will be mocked separatel
import {
loadSettings,
USER_SETTINGS_PATH, // This IS the mocked path.
getSystemSettingsPath,
SYSTEM_SETTINGS_PATH,
SETTINGS_DIRECTORY_NAME, // This is from the original module, but used by the mock.
SettingScope,
} from './settings.js';
@@ -95,16 +95,13 @@ describe('Settings Loading and Merging', () => {
expect(settings.system.settings).toEqual({});
expect(settings.user.settings).toEqual({});
expect(settings.workspace.settings).toEqual({});
expect(settings.merged).toEqual({
customThemes: {},
mcpServers: {},
});
expect(settings.merged).toEqual({});
expect(settings.errors.length).toBe(0);
});
it('should load system settings if only system file exists', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === getSystemSettingsPath(),
(p: fs.PathLike) => p === SYSTEM_SETTINGS_PATH,
);
const systemSettingsContent = {
theme: 'system-default',
@@ -112,7 +109,7 @@ describe('Settings Loading and Merging', () => {
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === getSystemSettingsPath())
if (p === SYSTEM_SETTINGS_PATH)
return JSON.stringify(systemSettingsContent);
return '{}';
},
@@ -121,17 +118,13 @@ describe('Settings Loading and Merging', () => {
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(fs.readFileSync).toHaveBeenCalledWith(
getSystemSettingsPath(),
SYSTEM_SETTINGS_PATH,
'utf-8',
);
expect(settings.system.settings).toEqual(systemSettingsContent);
expect(settings.user.settings).toEqual({});
expect(settings.workspace.settings).toEqual({});
expect(settings.merged).toEqual({
...systemSettingsContent,
customThemes: {},
mcpServers: {},
});
expect(settings.merged).toEqual(systemSettingsContent);
});
it('should load user settings if only user file exists', () => {
@@ -160,11 +153,7 @@ describe('Settings Loading and Merging', () => {
);
expect(settings.user.settings).toEqual(userSettingsContent);
expect(settings.workspace.settings).toEqual({});
expect(settings.merged).toEqual({
...userSettingsContent,
customThemes: {},
mcpServers: {},
});
expect(settings.merged).toEqual(userSettingsContent);
});
it('should load workspace settings if only workspace file exists', () => {
@@ -191,11 +180,7 @@ describe('Settings Loading and Merging', () => {
);
expect(settings.user.settings).toEqual({});
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
expect(settings.merged).toEqual({
...workspaceSettingsContent,
customThemes: {},
mcpServers: {},
});
expect(settings.merged).toEqual(workspaceSettingsContent);
});
it('should merge user and workspace settings, with workspace taking precedence', () => {
@@ -230,8 +215,6 @@ describe('Settings Loading and Merging', () => {
sandbox: true,
coreTools: ['tool1'],
contextFileName: 'WORKSPACE_CONTEXT.md',
customThemes: {},
mcpServers: {},
});
});
@@ -240,7 +223,6 @@ describe('Settings Loading and Merging', () => {
const systemSettingsContent = {
theme: 'system-theme',
sandbox: false,
allowMCPServers: ['server1', 'server2'],
telemetry: { enabled: false },
};
const userSettingsContent = {
@@ -252,12 +234,11 @@ describe('Settings Loading and Merging', () => {
sandbox: false,
coreTools: ['tool1'],
contextFileName: 'WORKSPACE_CONTEXT.md',
allowMCPServers: ['server1', 'server2', 'server3'],
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === getSystemSettingsPath())
if (p === SYSTEM_SETTINGS_PATH)
return JSON.stringify(systemSettingsContent);
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
@@ -278,9 +259,6 @@ describe('Settings Loading and Merging', () => {
telemetry: { enabled: false },
coreTools: ['tool1'],
contextFileName: 'WORKSPACE_CONTEXT.md',
allowMCPServers: ['server1', 'server2'],
customThemes: {},
mcpServers: {},
});
});
@@ -392,134 +370,6 @@ describe('Settings Loading and Merging', () => {
(fs.readFileSync as Mock).mockReturnValue('{}');
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.merged.telemetry).toBeUndefined();
expect(settings.merged.customThemes).toEqual({});
expect(settings.merged.mcpServers).toEqual({});
});
it('should merge MCP servers correctly, with workspace taking precedence', () => {
(mockFsExistsSync as Mock).mockReturnValue(true);
const userSettingsContent = {
mcpServers: {
'user-server': {
command: 'user-command',
args: ['--user-arg'],
description: 'User MCP server',
},
'shared-server': {
command: 'user-shared-command',
description: 'User shared server config',
},
},
};
const workspaceSettingsContent = {
mcpServers: {
'workspace-server': {
command: 'workspace-command',
args: ['--workspace-arg'],
description: 'Workspace MCP server',
},
'shared-server': {
command: 'workspace-shared-command',
description: 'Workspace shared server config',
},
},
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
if (p === MOCK_WORKSPACE_SETTINGS_PATH)
return JSON.stringify(workspaceSettingsContent);
return '';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.user.settings).toEqual(userSettingsContent);
expect(settings.workspace.settings).toEqual(workspaceSettingsContent);
expect(settings.merged.mcpServers).toEqual({
'user-server': {
command: 'user-command',
args: ['--user-arg'],
description: 'User MCP server',
},
'workspace-server': {
command: 'workspace-command',
args: ['--workspace-arg'],
description: 'Workspace MCP server',
},
'shared-server': {
command: 'workspace-shared-command',
description: 'Workspace shared server config',
},
});
});
it('should handle MCP servers when only in user settings', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === USER_SETTINGS_PATH,
);
const userSettingsContent = {
mcpServers: {
'user-only-server': {
command: 'user-only-command',
description: 'User only server',
},
},
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === USER_SETTINGS_PATH)
return JSON.stringify(userSettingsContent);
return '';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.merged.mcpServers).toEqual({
'user-only-server': {
command: 'user-only-command',
description: 'User only server',
},
});
});
it('should handle MCP servers when only in workspace settings', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH,
);
const workspaceSettingsContent = {
mcpServers: {
'workspace-only-server': {
command: 'workspace-only-command',
description: 'Workspace only server',
},
},
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === MOCK_WORKSPACE_SETTINGS_PATH)
return JSON.stringify(workspaceSettingsContent);
return '';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.merged.mcpServers).toEqual({
'workspace-only-server': {
command: 'workspace-only-command',
description: 'Workspace only server',
},
});
});
it('should have mcpServers as empty object if not in any settings file', () => {
(mockFsExistsSync as Mock).mockReturnValue(false); // No settings files exist
(fs.readFileSync as Mock).mockReturnValue('{}');
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(settings.merged.mcpServers).toEqual({});
});
it('should handle JSON parsing errors gracefully', () => {
@@ -557,10 +407,7 @@ describe('Settings Loading and Merging', () => {
// Check that settings are empty due to parsing errors
expect(settings.user.settings).toEqual({});
expect(settings.workspace.settings).toEqual({});
expect(settings.merged).toEqual({
customThemes: {},
mcpServers: {},
});
expect(settings.merged).toEqual({});
// Check that error objects are populated in settings.errors
expect(settings.errors).toBeDefined();
@@ -601,13 +448,10 @@ describe('Settings Loading and Merging', () => {
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
// @ts-expect-error: dynamic property for test
expect(settings.user.settings.apiKey).toBe('user_api_key_from_env');
// @ts-expect-error: dynamic property for test
expect(settings.user.settings.someUrl).toBe(
'https://test.com/user_api_key_from_env',
);
// @ts-expect-error: dynamic property for test
expect(settings.merged.apiKey).toBe('user_api_key_from_env');
delete process.env.TEST_API_KEY;
});
@@ -636,7 +480,6 @@ describe('Settings Loading and Merging', () => {
expect(settings.workspace.settings.nested.value).toBe(
'workspace_endpoint_from_env',
);
// @ts-expect-error: dynamic property for test
expect(settings.merged.endpoint).toBe('workspace_endpoint_from_env/api');
delete process.env.WORKSPACE_ENDPOINT;
});
@@ -666,16 +509,13 @@ describe('Settings Loading and Merging', () => {
const settings = loadSettings(MOCK_WORKSPACE_DIR);
// @ts-expect-error: dynamic property for test
expect(settings.user.settings.configValue).toBe(
'user_value_for_user_read',
);
// @ts-expect-error: dynamic property for test
expect(settings.workspace.settings.configValue).toBe(
'workspace_value_for_workspace_read',
);
// Merged should take workspace's resolved value
// @ts-expect-error: dynamic property for test
expect(settings.merged.configValue).toBe(
'workspace_value_for_workspace_read',
);
@@ -743,7 +583,7 @@ describe('Settings Loading and Merging', () => {
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === getSystemSettingsPath()) {
if (p === SYSTEM_SETTINGS_PATH) {
process.env.SHARED_VAR = 'system_value_for_system_read'; // Set for system settings read
return JSON.stringify(systemSettingsContent);
}
@@ -757,16 +597,13 @@ describe('Settings Loading and Merging', () => {
const settings = loadSettings(MOCK_WORKSPACE_DIR);
// @ts-expect-error: dynamic property for test
expect(settings.system.settings.configValue).toBe(
'system_value_for_system_read',
);
// @ts-expect-error: dynamic property for test
expect(settings.workspace.settings.configValue).toBe(
'workspace_value_for_workspace_read',
);
// Merged should take system's resolved value
// @ts-expect-error: dynamic property for test
// Merged should take workspace's resolved value
expect(settings.merged.configValue).toBe('system_value_for_system_read');
// Restore original environment variable state
@@ -913,50 +750,6 @@ describe('Settings Loading and Merging', () => {
delete process.env.TEST_HOST;
delete process.env.TEST_PORT;
});
describe('when GEMINI_CLI_SYSTEM_SETTINGS_PATH is set', () => {
const MOCK_ENV_SYSTEM_SETTINGS_PATH = '/mock/env/system/settings.json';
beforeEach(() => {
process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH =
MOCK_ENV_SYSTEM_SETTINGS_PATH;
});
afterEach(() => {
delete process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH;
});
it('should load system settings from the path specified in the environment variable', () => {
(mockFsExistsSync as Mock).mockImplementation(
(p: fs.PathLike) => p === MOCK_ENV_SYSTEM_SETTINGS_PATH,
);
const systemSettingsContent = {
theme: 'env-var-theme',
sandbox: true,
};
(fs.readFileSync as Mock).mockImplementation(
(p: fs.PathOrFileDescriptor) => {
if (p === MOCK_ENV_SYSTEM_SETTINGS_PATH)
return JSON.stringify(systemSettingsContent);
return '{}';
},
);
const settings = loadSettings(MOCK_WORKSPACE_DIR);
expect(fs.readFileSync).toHaveBeenCalledWith(
MOCK_ENV_SYSTEM_SETTINGS_PATH,
'utf-8',
);
expect(settings.system.path).toBe(MOCK_ENV_SYSTEM_SETTINGS_PATH);
expect(settings.system.settings).toEqual(systemSettingsContent);
expect(settings.merged).toEqual({
...systemSettingsContent,
customThemes: {},
mcpServers: {},
});
});
});
});
describe('LoadedSettings class', () => {

View File

@@ -19,16 +19,12 @@ import {
import stripJsonComments from 'strip-json-comments';
import { DefaultLight } from '../ui/themes/default-light.js';
import { DefaultDark } from '../ui/themes/default.js';
import { CustomTheme } from '../ui/themes/theme.js';
export const SETTINGS_DIRECTORY_NAME = '.qwen';
export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME);
export const USER_SETTINGS_PATH = path.join(USER_SETTINGS_DIR, 'settings.json');
export function getSystemSettingsPath(): string {
if (process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH) {
return process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH;
}
function getSystemSettingsPath(): string {
if (platform() === 'darwin') {
return '/Library/Application Support/QwenCode/settings.json';
} else if (platform() === 'win32') {
@@ -38,6 +34,8 @@ export function getSystemSettingsPath(): string {
}
}
export const SYSTEM_SETTINGS_PATH = getSystemSettingsPath();
export enum SettingScope {
User = 'User',
Workspace = 'Workspace',
@@ -48,17 +46,12 @@ export interface CheckpointingSettings {
enabled?: boolean;
}
export interface SummarizeToolOutputSettings {
tokenBudget?: number;
}
export interface AccessibilitySettings {
disableLoadingPhrases?: boolean;
}
export interface Settings {
theme?: string;
customThemes?: Record<string, CustomTheme>;
selectedAuthType?: AuthType;
sandbox?: boolean | string;
coreTools?: string[];
@@ -67,8 +60,6 @@ export interface Settings {
toolCallCommand?: string;
mcpServerCommand?: string;
mcpServers?: Record<string, MCPServerConfig>;
allowMCPServers?: string[];
excludeMCPServers?: string[];
showMemoryUsage?: boolean;
contextFileName?: string | string[];
accessibility?: AccessibilitySettings;
@@ -83,12 +74,11 @@ export interface Settings {
// Git-aware file filtering settings
fileFiltering?: {
respectGitIgnore?: boolean;
respectGeminiIgnore?: boolean;
enableRecursiveFileSearch?: boolean;
};
// UI setting. Does not display the ANSI-controlled terminal title.
hideWindowTitle?: boolean;
hideTips?: boolean;
hideBanner?: boolean;
@@ -101,24 +91,26 @@ export interface Settings {
// Setting for maximum number of files and folders to show in folder structure
maxFolderItems?: number;
// A map of tool names to their summarization settings.
summarizeToolOutput?: Record<string, SummarizeToolOutputSettings>;
// Sampling parameters for content generation
sampling_params?: {
top_p?: number;
top_k?: number;
repetition_penalty?: number;
presence_penalty?: number;
frequency_penalty?: number;
temperature?: number;
max_tokens?: number;
};
vimMode?: boolean;
// System prompt mappings for different base URLs and model names
systemPromptMappings?: Array<{
baseUrls?: string[];
modelNames?: string[];
template?: string;
}>;
// Add other settings here.
ideMode?: boolean;
memoryDiscoveryMaxDirs?: number;
sampling_params?: Record<string, unknown>;
systemPromptMappings?: Array<{
baseUrls: string[];
modelNames: string[];
template: string;
}>;
contentGenerator?: {
timeout?: number;
maxRetries?: number;
};
}
export interface SettingsError {
@@ -156,24 +148,10 @@ export class LoadedSettings {
}
private computeMergedSettings(): Settings {
const system = this.system.settings;
const user = this.user.settings;
const workspace = this.workspace.settings;
return {
...user,
...workspace,
...system,
customThemes: {
...(user.customThemes || {}),
...(workspace.customThemes || {}),
...(system.customThemes || {}),
},
mcpServers: {
...(user.mcpServers || {}),
...(workspace.mcpServers || {}),
...(system.mcpServers || {}),
},
...this.user.settings,
...this.workspace.settings,
...this.system.settings,
};
}
@@ -190,12 +168,13 @@ export class LoadedSettings {
}
}
setValue<K extends keyof Settings>(
setValue(
scope: SettingScope,
key: K,
value: Settings[K],
key: keyof Settings,
value: string | Record<string, MCPServerConfig> | undefined,
): void {
const settingsFile = this.forScope(scope);
// @ts-expect-error - value can be string | Record<string, MCPServerConfig>
settingsFile.settings[key] = value;
this._merged = this.computeMergedSettings();
saveSettings(settingsFile);
@@ -317,11 +296,11 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
let userSettings: Settings = {};
let workspaceSettings: Settings = {};
const settingsErrors: SettingsError[] = [];
const systemSettingsPath = getSystemSettingsPath();
// Load system settings
try {
if (fs.existsSync(systemSettingsPath)) {
const systemContent = fs.readFileSync(systemSettingsPath, 'utf-8');
if (fs.existsSync(SYSTEM_SETTINGS_PATH)) {
const systemContent = fs.readFileSync(SYSTEM_SETTINGS_PATH, 'utf-8');
const parsedSystemSettings = JSON.parse(
stripJsonComments(systemContent),
) as Settings;
@@ -330,7 +309,7 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: systemSettingsPath,
path: SYSTEM_SETTINGS_PATH,
});
}
@@ -388,7 +367,7 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
return new LoadedSettings(
{
path: systemSettingsPath,
path: SYSTEM_SETTINGS_PATH,
settings: systemSettings,
},
{

View File

@@ -6,13 +6,12 @@
import stripAnsi from 'strip-ansi';
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { main, setupUnhandledRejectionHandler } from './gemini.js';
import { main } from './gemini.js';
import {
LoadedSettings,
SettingsFile,
loadSettings,
} from './config/settings.js';
import { appEvents, AppEvent } from './utils/events.js';
// Custom error to identify mock process.exit calls
class MockProcessExitError extends Error {
@@ -56,16 +55,6 @@ vi.mock('update-notifier', () => ({
})),
}));
vi.mock('./utils/events.js', async (importOriginal) => {
const actual = await importOriginal<typeof import('./utils/events.js')>();
return {
...actual,
appEvents: {
emit: vi.fn(),
},
};
});
vi.mock('./utils/sandbox.js', () => ({
sandbox_command: vi.fn(() => ''), // Default to no sandbox command
start_sandbox: vi.fn(() => Promise.resolve()), // Mock as an async function that resolves
@@ -76,8 +65,6 @@ describe('gemini.tsx main function', () => {
let loadSettingsMock: ReturnType<typeof vi.mocked<typeof loadSettings>>;
let originalEnvGeminiSandbox: string | undefined;
let originalEnvSandbox: string | undefined;
let initialUnhandledRejectionListeners: NodeJS.UnhandledRejectionListener[] =
[];
const processExitSpy = vi
.spyOn(process, 'exit')
@@ -95,8 +82,6 @@ describe('gemini.tsx main function', () => {
delete process.env.SANDBOX;
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
initialUnhandledRejectionListeners =
process.listeners('unhandledRejection');
});
afterEach(() => {
@@ -111,15 +96,6 @@ describe('gemini.tsx main function', () => {
} else {
delete process.env.SANDBOX;
}
const currentListeners = process.listeners('unhandledRejection');
const addedListener = currentListeners.find(
(listener) => !initialUnhandledRejectionListeners.includes(listener),
);
if (addedListener) {
process.removeListener('unhandledRejection', addedListener);
}
vi.restoreAllMocks();
});
@@ -133,7 +109,7 @@ describe('gemini.tsx main function', () => {
settings: {},
};
const workspaceSettingsFile: SettingsFile = {
path: '/workspace/.gemini/settings.json',
path: '/workspace/.qwen/settings.json',
settings: {},
};
const systemSettingsFile: SettingsFile = {
@@ -169,45 +145,7 @@ describe('gemini.tsx main function', () => {
'Please fix /test/settings.json and try again.',
);
// Verify process.exit was called.
// Verify process.exit was called (indirectly, via the thrown error)
expect(processExitSpy).toHaveBeenCalledWith(1);
});
it('should log unhandled promise rejections and open debug console on first error', async () => {
const appEventsMock = vi.mocked(appEvents);
const rejectionError = new Error('Test unhandled rejection');
setupUnhandledRejectionHandler();
// Simulate an unhandled rejection.
// We are not using Promise.reject here as vitest will catch it.
// Instead we will dispatch the event manually.
process.emit('unhandledRejection', rejectionError, Promise.resolve());
// We need to wait for the rejection handler to be called.
await new Promise(process.nextTick);
expect(appEventsMock.emit).toHaveBeenCalledWith(AppEvent.OpenDebugConsole);
expect(appEventsMock.emit).toHaveBeenCalledWith(
AppEvent.LogError,
expect.stringContaining('Unhandled Promise Rejection'),
);
expect(appEventsMock.emit).toHaveBeenCalledWith(
AppEvent.LogError,
expect.stringContaining('Please file a bug report using the /bug tool.'),
);
// Simulate a second rejection
const secondRejectionError = new Error('Second test unhandled rejection');
process.emit('unhandledRejection', secondRejectionError, Promise.resolve());
await new Promise(process.nextTick);
// Ensure emit was only called once for OpenDebugConsole
const openDebugConsoleCalls = appEventsMock.emit.mock.calls.filter(
(call) => call[0] === AppEvent.OpenDebugConsole,
);
expect(openDebugConsoleCalls.length).toBe(1);
// Avoid the process.exit error from being thrown.
processExitSpy.mockRestore();
});
});

View File

@@ -17,6 +17,7 @@ import { start_sandbox } from './utils/sandbox.js';
import {
LoadedSettings,
loadSettings,
USER_SETTINGS_PATH,
SettingScope,
} from './config/settings.js';
import { themeManager } from './ui/themes/theme-manager.js';
@@ -39,8 +40,6 @@ import {
} from '@qwen-code/qwen-code-core';
import { validateAuthMethod } from './config/auth.js';
import { setMaxSizedBoxDebugging } from './ui/components/shared/MaxSizedBox.js';
import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js';
import { appEvents, AppEvent } from './utils/events.js';
function getNodeMemoryArgs(config: Config): string[] {
const totalMemoryMB = os.totalmem() / (1024 * 1024);
@@ -85,32 +84,8 @@ async function relaunchWithAdditionalArgs(additionalArgs: string[]) {
await new Promise((resolve) => child.on('close', resolve));
process.exit(0);
}
import { runAcpPeer } from './acp/acpPeer.js';
export function setupUnhandledRejectionHandler() {
let unhandledRejectionOccurred = false;
process.on('unhandledRejection', (reason, _promise) => {
const errorMessage = `=========================================
This is an unexpected error. Please file a bug report using the /bug tool.
CRITICAL: Unhandled Promise Rejection!
=========================================
Reason: ${reason}${
reason instanceof Error && reason.stack
? `
Stack trace:
${reason.stack}`
: ''
}`;
appEvents.emit(AppEvent.LogError, errorMessage);
if (!unhandledRejectionOccurred) {
unhandledRejectionOccurred = true;
appEvents.emit(AppEvent.OpenDebugConsole);
}
});
}
export async function main() {
setupUnhandledRejectionHandler();
const workspaceRoot = process.cwd();
const settings = loadSettings(workspaceRoot);
@@ -166,9 +141,6 @@ export async function main() {
await config.initialize();
// Load custom themes from settings
themeManager.loadCustomThemes(settings.merged.customThemes);
if (settings.merged.theme) {
if (!themeManager.setActiveTheme(settings.merged.theme)) {
// If the theme is not found during initial load, log a warning and continue.
@@ -211,16 +183,12 @@ export async function main() {
if (
settings.merged.selectedAuthType === AuthType.LOGIN_WITH_GOOGLE &&
config.isBrowserLaunchSuppressed()
config.getNoBrowser()
) {
// Do oauth before app renders to make copying the link possible.
await getOauthClient(settings.merged.selectedAuthType, config);
}
if (config.getExperimentalAcp()) {
return runAcpPeer(config, settings);
}
let input = config.getQuestion();
const startupWarnings = [
...(await getStartupWarnings()),
@@ -296,6 +264,21 @@ function setWindowTitle(title: string, settings: LoadedSettings) {
}
}
// --- Global Unhandled Rejection Handler ---
process.on('unhandledRejection', (reason, _promise) => {
// Log other unexpected unhandled rejections as critical errors
console.error('=========================================');
console.error('CRITICAL: Unhandled Promise Rejection!');
console.error('=========================================');
console.error('Reason:', reason);
console.error('Stack trace may follow:');
if (!(reason instanceof Error)) {
console.error(reason);
}
// Exit for genuinely unhandled errors
process.exit(1);
});
async function loadNonInteractiveConfig(
config: Config,
extensions: Extension[],
@@ -329,8 +312,51 @@ async function loadNonInteractiveConfig(
await finalConfig.initialize();
}
return await validateNonInteractiveAuth(
return await validateNonInterActiveAuth(
settings.merged.selectedAuthType,
finalConfig,
);
}
async function validateNonInterActiveAuth(
selectedAuthType: AuthType | undefined,
nonInteractiveConfig: Config,
) {
// making a special case for the cli. many headless environments might not have a settings.json set
// so if GEMINI_API_KEY or OPENAI_API_KEY is set, we'll use that. However since the oauth things are interactive anyway, we'll
// still expect that exists
if (
!selectedAuthType &&
!process.env.GEMINI_API_KEY &&
!process.env.OPENAI_API_KEY
) {
console.error(
`Please set an Auth method in your ${USER_SETTINGS_PATH} OR specify GEMINI_API_KEY or OPENAI_API_KEY env variable before running`,
);
process.exit(1);
}
// Determine auth type based on available environment variables
if (!selectedAuthType) {
if (process.env.OPENAI_API_KEY) {
selectedAuthType = AuthType.USE_OPENAI;
} else if (process.env.GEMINI_API_KEY) {
selectedAuthType = AuthType.USE_GEMINI;
}
}
// This should never happen due to the check above, but TypeScript needs assurance
if (!selectedAuthType) {
console.error('No valid authentication method found');
process.exit(1);
}
const err = validateAuthMethod(selectedAuthType);
if (err != null) {
console.error(err);
process.exit(1);
}
await nonInteractiveConfig.refreshAuth(selectedAuthType);
return nonInteractiveConfig;
}

View File

@@ -229,14 +229,14 @@ describe('runNonInteractive', () => {
it('should not exit if a tool is not found, and should send error back to model', async () => {
const functionCall: FunctionCall = {
id: 'fcNotFound',
name: 'nonexistentTool',
name: 'nonExistentTool',
args: {},
};
const errorResponsePart: Part = {
functionResponse: {
name: 'nonexistentTool',
name: 'nonExistentTool',
id: 'fcNotFound',
response: { error: 'Tool "nonexistentTool" not found in registry.' },
response: { error: 'Tool "nonExistentTool" not found in registry.' },
},
};
@@ -246,8 +246,8 @@ describe('runNonInteractive', () => {
vi.mocked(mockCoreExecuteToolCall).mockResolvedValue({
callId: 'fcNotFound',
responseParts: [errorResponsePart],
resultDisplay: 'Tool "nonexistentTool" not found in registry.',
error: new Error('Tool "nonexistentTool" not found in registry.'),
resultDisplay: 'Tool "nonExistentTool" not found in registry.',
error: new Error('Tool "nonExistentTool" not found in registry.'),
});
const stream1 = (async function* () {
@@ -278,7 +278,7 @@ describe('runNonInteractive', () => {
);
expect(consoleErrorSpy).toHaveBeenCalledWith(
'Error executing tool nonexistentTool: Tool "nonexistentTool" not found in registry.',
'Error executing tool nonExistentTool: Tool "nonExistentTool" not found in registry.',
);
expect(mockProcessExit).not.toHaveBeenCalled();

View File

@@ -11,6 +11,7 @@ import {
ToolRegistry,
shutdownTelemetry,
isTelemetrySdkInitialized,
ToolResultDisplay,
} from '@qwen-code/qwen-code-core';
import {
Content,
@@ -43,6 +44,83 @@ function getResponseText(response: GenerateContentResponse): string | null {
return null;
}
// Helper function to format tool call arguments for display
function formatToolArgs(args: Record<string, unknown>): string {
if (!args || Object.keys(args).length === 0) {
return '(no arguments)';
}
const formattedArgs = Object.entries(args)
.map(([key, value]) => {
if (typeof value === 'string') {
return `${key}: "${value}"`;
} else if (typeof value === 'object' && value !== null) {
return `${key}: ${JSON.stringify(value)}`;
} else {
return `${key}: ${value}`;
}
})
.join(', ');
return `(${formattedArgs})`;
}
// Helper function to display tool call information
function displayToolCallInfo(
toolName: string,
args: Record<string, unknown>,
status: 'start' | 'success' | 'error',
resultDisplay?: ToolResultDisplay,
errorMessage?: string,
): void {
const timestamp = new Date().toLocaleTimeString();
const argsStr = formatToolArgs(args);
switch (status) {
case 'start':
process.stdout.write(
`\n[${timestamp}] 🔧 Executing tool: ${toolName} ${argsStr}\n`,
);
break;
case 'success':
if (resultDisplay) {
if (typeof resultDisplay === 'string' && resultDisplay.trim()) {
process.stdout.write(
`[${timestamp}] ✅ Tool ${toolName} completed successfully\n`,
);
process.stdout.write(`📋 Result:\n${resultDisplay}\n`);
} else if (
typeof resultDisplay === 'object' &&
'fileDiff' in resultDisplay
) {
process.stdout.write(
`[${timestamp}] ✅ Tool ${toolName} completed successfully\n`,
);
process.stdout.write(`📋 File: ${resultDisplay.fileName}\n`);
process.stdout.write(`📋 Diff:\n${resultDisplay.fileDiff}\n`);
} else {
process.stdout.write(
`[${timestamp}] ✅ Tool ${toolName} completed successfully (no output)\n`,
);
}
} else {
process.stdout.write(
`[${timestamp}] ✅ Tool ${toolName} completed successfully (no output)\n`,
);
}
break;
case 'error':
process.stdout.write(
`[${timestamp}] ❌ Tool ${toolName} failed: ${errorMessage}\n`,
);
break;
default:
process.stdout.write(
`[${timestamp}] ⚠️ Tool ${toolName} reported unknown status: ${status}\n`,
);
break;
}
}
export async function runNonInteractive(
config: Config,
input: string,
@@ -118,6 +196,9 @@ export async function runNonInteractive(
prompt_id,
};
//Display tool call start information
displayToolCallInfo(fc.name as string, fc.args ?? {}, 'start');
const toolResponse = await executeToolCall(
config,
requestInfo,
@@ -126,6 +207,20 @@ export async function runNonInteractive(
);
if (toolResponse.error) {
// Display tool call error information
const errorMessage =
typeof toolResponse.resultDisplay === 'string'
? toolResponse.resultDisplay
: toolResponse.error?.message;
displayToolCallInfo(
fc.name as string,
fc.args ?? {},
'error',
undefined,
errorMessage,
);
const isToolNotFound = toolResponse.error.message.includes(
'not found in registry',
);
@@ -135,6 +230,14 @@ export async function runNonInteractive(
if (!isToolNotFound) {
process.exit(1);
}
} else {
// Display tool call success information
displayToolCallInfo(
fc.name as string,
fc.args ?? {},
'success',
toolResponse.resultDisplay,
);
}
if (toolResponse.responseParts) {

View File

@@ -1,17 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
// This is a replacement for the `is-in-ci` package that always returns false.
// We are doing this to avoid the issue where `ink` does not render the UI
// when it detects that it is running in a CI environment.
// This is safe because `ink` (and thus `is-in-ci`) is only used in the
// interactive code path of the CLI.
// See issue #1563 for more details.
const isInCi = false;
// eslint-disable-next-line import/no-default-export
export default isInCi;

View File

@@ -1,127 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
vi.mock('../ui/commands/aboutCommand.js', async () => {
const { CommandKind } = await import('../ui/commands/types.js');
return {
aboutCommand: {
name: 'about',
description: 'About the CLI',
kind: CommandKind.BUILT_IN,
},
};
});
vi.mock('../ui/commands/ideCommand.js', () => ({ ideCommand: vi.fn() }));
vi.mock('../ui/commands/restoreCommand.js', () => ({
restoreCommand: vi.fn(),
}));
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { BuiltinCommandLoader } from './BuiltinCommandLoader.js';
import { Config } from '@qwen-code/qwen-code-core';
import { CommandKind } from '../ui/commands/types.js';
import { ideCommand } from '../ui/commands/ideCommand.js';
import { restoreCommand } from '../ui/commands/restoreCommand.js';
vi.mock('../ui/commands/authCommand.js', () => ({ authCommand: {} }));
vi.mock('../ui/commands/bugCommand.js', () => ({ bugCommand: {} }));
vi.mock('../ui/commands/chatCommand.js', () => ({ chatCommand: {} }));
vi.mock('../ui/commands/clearCommand.js', () => ({ clearCommand: {} }));
vi.mock('../ui/commands/compressCommand.js', () => ({ compressCommand: {} }));
vi.mock('../ui/commands/corgiCommand.js', () => ({ corgiCommand: {} }));
vi.mock('../ui/commands/docsCommand.js', () => ({ docsCommand: {} }));
vi.mock('../ui/commands/editorCommand.js', () => ({ editorCommand: {} }));
vi.mock('../ui/commands/extensionsCommand.js', () => ({
extensionsCommand: {},
}));
vi.mock('../ui/commands/helpCommand.js', () => ({ helpCommand: {} }));
vi.mock('../ui/commands/memoryCommand.js', () => ({ memoryCommand: {} }));
vi.mock('../ui/commands/privacyCommand.js', () => ({ privacyCommand: {} }));
vi.mock('../ui/commands/quitCommand.js', () => ({ quitCommand: {} }));
vi.mock('../ui/commands/statsCommand.js', () => ({ statsCommand: {} }));
vi.mock('../ui/commands/themeCommand.js', () => ({ themeCommand: {} }));
vi.mock('../ui/commands/toolsCommand.js', () => ({ toolsCommand: {} }));
vi.mock('../ui/commands/mcpCommand.js', () => ({
mcpCommand: {
name: 'mcp',
description: 'MCP command',
kind: 'BUILT_IN',
},
}));
describe('BuiltinCommandLoader', () => {
let mockConfig: Config;
const ideCommandMock = ideCommand as Mock;
const restoreCommandMock = restoreCommand as Mock;
beforeEach(() => {
vi.clearAllMocks();
mockConfig = { some: 'config' } as unknown as Config;
ideCommandMock.mockReturnValue({
name: 'ide',
description: 'IDE command',
kind: CommandKind.BUILT_IN,
});
restoreCommandMock.mockReturnValue({
name: 'restore',
description: 'Restore command',
kind: CommandKind.BUILT_IN,
});
});
it('should correctly pass the config object to command factory functions', async () => {
const loader = new BuiltinCommandLoader(mockConfig);
await loader.loadCommands(new AbortController().signal);
expect(ideCommandMock).toHaveBeenCalledTimes(1);
expect(ideCommandMock).toHaveBeenCalledWith(mockConfig);
expect(restoreCommandMock).toHaveBeenCalledTimes(1);
expect(restoreCommandMock).toHaveBeenCalledWith(mockConfig);
});
it('should filter out null command definitions returned by factories', async () => {
// Override the mock's behavior for this specific test.
ideCommandMock.mockReturnValue(null);
const loader = new BuiltinCommandLoader(mockConfig);
const commands = await loader.loadCommands(new AbortController().signal);
// The 'ide' command should be filtered out.
const ideCmd = commands.find((c) => c.name === 'ide');
expect(ideCmd).toBeUndefined();
// Other commands should still be present.
const aboutCmd = commands.find((c) => c.name === 'about');
expect(aboutCmd).toBeDefined();
});
it('should handle a null config gracefully when calling factories', async () => {
const loader = new BuiltinCommandLoader(null);
await loader.loadCommands(new AbortController().signal);
expect(ideCommandMock).toHaveBeenCalledTimes(1);
expect(ideCommandMock).toHaveBeenCalledWith(null);
expect(restoreCommandMock).toHaveBeenCalledTimes(1);
expect(restoreCommandMock).toHaveBeenCalledWith(null);
});
it('should return a list of all loaded commands', async () => {
const loader = new BuiltinCommandLoader(mockConfig);
const commands = await loader.loadCommands(new AbortController().signal);
const aboutCmd = commands.find((c) => c.name === 'about');
expect(aboutCmd).toBeDefined();
expect(aboutCmd?.kind).toBe(CommandKind.BUILT_IN);
const ideCmd = commands.find((c) => c.name === 'ide');
expect(ideCmd).toBeDefined();
const mcpCmd = commands.find((c) => c.name === 'mcp');
expect(mcpCmd).toBeDefined();
});
});

View File

@@ -1,75 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { ICommandLoader } from './types.js';
import { SlashCommand } from '../ui/commands/types.js';
import { Config } from '@qwen-code/qwen-code-core';
import { aboutCommand } from '../ui/commands/aboutCommand.js';
import { authCommand } from '../ui/commands/authCommand.js';
import { bugCommand } from '../ui/commands/bugCommand.js';
import { chatCommand } from '../ui/commands/chatCommand.js';
import { clearCommand } from '../ui/commands/clearCommand.js';
import { compressCommand } from '../ui/commands/compressCommand.js';
import { copyCommand } from '../ui/commands/copyCommand.js';
import { corgiCommand } from '../ui/commands/corgiCommand.js';
import { docsCommand } from '../ui/commands/docsCommand.js';
import { editorCommand } from '../ui/commands/editorCommand.js';
import { extensionsCommand } from '../ui/commands/extensionsCommand.js';
import { helpCommand } from '../ui/commands/helpCommand.js';
import { ideCommand } from '../ui/commands/ideCommand.js';
import { mcpCommand } from '../ui/commands/mcpCommand.js';
import { memoryCommand } from '../ui/commands/memoryCommand.js';
import { privacyCommand } from '../ui/commands/privacyCommand.js';
import { quitCommand } from '../ui/commands/quitCommand.js';
import { restoreCommand } from '../ui/commands/restoreCommand.js';
import { statsCommand } from '../ui/commands/statsCommand.js';
import { themeCommand } from '../ui/commands/themeCommand.js';
import { toolsCommand } from '../ui/commands/toolsCommand.js';
import { vimCommand } from '../ui/commands/vimCommand.js';
/**
* Loads the core, hard-coded slash commands that are an integral part
* of the Gemini CLI application.
*/
export class BuiltinCommandLoader implements ICommandLoader {
constructor(private config: Config | null) {}
/**
* Gathers all raw built-in command definitions, injects dependencies where
* needed (e.g., config) and filters out any that are not available.
*
* @param _signal An AbortSignal (unused for this synchronous loader).
* @returns A promise that resolves to an array of `SlashCommand` objects.
*/
async loadCommands(_signal: AbortSignal): Promise<SlashCommand[]> {
const allDefinitions: Array<SlashCommand | null> = [
aboutCommand,
authCommand,
bugCommand,
chatCommand,
clearCommand,
compressCommand,
copyCommand,
corgiCommand,
docsCommand,
editorCommand,
extensionsCommand,
helpCommand,
ideCommand(this.config),
memoryCommand,
privacyCommand,
mcpCommand,
quitCommand,
restoreCommand(this.config),
statsCommand,
themeCommand,
toolsCommand,
vimCommand,
];
return allDefinitions.filter((cmd): cmd is SlashCommand => cmd !== null);
}
}

View File

@@ -4,177 +4,135 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
import { vi, describe, it, expect, beforeEach } from 'vitest';
import { CommandService } from './CommandService.js';
import { type ICommandLoader } from './types.js';
import { CommandKind, type SlashCommand } from '../ui/commands/types.js';
import { type SlashCommand } from '../ui/commands/types.js';
import { memoryCommand } from '../ui/commands/memoryCommand.js';
import { helpCommand } from '../ui/commands/helpCommand.js';
import { clearCommand } from '../ui/commands/clearCommand.js';
import { authCommand } from '../ui/commands/authCommand.js';
import { themeCommand } from '../ui/commands/themeCommand.js';
import { privacyCommand } from '../ui/commands/privacyCommand.js';
import { aboutCommand } from '../ui/commands/aboutCommand.js';
const createMockCommand = (name: string, kind: CommandKind): SlashCommand => ({
name,
description: `Description for ${name}`,
kind,
action: vi.fn(),
});
const mockCommandA = createMockCommand('command-a', CommandKind.BUILT_IN);
const mockCommandB = createMockCommand('command-b', CommandKind.BUILT_IN);
const mockCommandC = createMockCommand('command-c', CommandKind.FILE);
const mockCommandB_Override = createMockCommand('command-b', CommandKind.FILE);
class MockCommandLoader implements ICommandLoader {
private commandsToLoad: SlashCommand[];
constructor(commandsToLoad: SlashCommand[]) {
this.commandsToLoad = commandsToLoad;
}
loadCommands = vi.fn(
async (): Promise<SlashCommand[]> => Promise.resolve(this.commandsToLoad),
);
}
// Mock the command modules to isolate the service from the command implementations.
vi.mock('../ui/commands/memoryCommand.js', () => ({
memoryCommand: { name: 'memory', description: 'Mock Memory' },
}));
vi.mock('../ui/commands/helpCommand.js', () => ({
helpCommand: { name: 'help', description: 'Mock Help' },
}));
vi.mock('../ui/commands/clearCommand.js', () => ({
clearCommand: { name: 'clear', description: 'Mock Clear' },
}));
vi.mock('../ui/commands/authCommand.js', () => ({
authCommand: { name: 'auth', description: 'Mock Auth' },
}));
vi.mock('../ui/commands/themeCommand.js', () => ({
themeCommand: { name: 'theme', description: 'Mock Theme' },
}));
vi.mock('../ui/commands/privacyCommand.js', () => ({
privacyCommand: { name: 'privacy', description: 'Mock Privacy' },
}));
vi.mock('../ui/commands/aboutCommand.js', () => ({
aboutCommand: { name: 'about', description: 'Mock About' },
}));
describe('CommandService', () => {
beforeEach(() => {
vi.spyOn(console, 'debug').mockImplementation(() => {});
describe('when using default production loader', () => {
let commandService: CommandService;
beforeEach(() => {
commandService = new CommandService();
});
it('should initialize with an empty command tree', () => {
const tree = commandService.getCommands();
expect(tree).toBeInstanceOf(Array);
expect(tree.length).toBe(0);
});
describe('loadCommands', () => {
it('should load the built-in commands into the command tree', async () => {
// Pre-condition check
expect(commandService.getCommands().length).toBe(0);
// Action
await commandService.loadCommands();
const tree = commandService.getCommands();
// Post-condition assertions
expect(tree.length).toBe(7);
const commandNames = tree.map((cmd) => cmd.name);
expect(commandNames).toContain('auth');
expect(commandNames).toContain('memory');
expect(commandNames).toContain('help');
expect(commandNames).toContain('clear');
expect(commandNames).toContain('theme');
expect(commandNames).toContain('privacy');
expect(commandNames).toContain('about');
});
it('should overwrite any existing commands when called again', async () => {
// Load once
await commandService.loadCommands();
expect(commandService.getCommands().length).toBe(7);
// Load again
await commandService.loadCommands();
const tree = commandService.getCommands();
// Should not append, but overwrite
expect(tree.length).toBe(7);
});
});
describe('getCommandTree', () => {
it('should return the current command tree', async () => {
const initialTree = commandService.getCommands();
expect(initialTree).toEqual([]);
await commandService.loadCommands();
const loadedTree = commandService.getCommands();
expect(loadedTree.length).toBe(7);
expect(loadedTree).toEqual([
aboutCommand,
authCommand,
clearCommand,
helpCommand,
memoryCommand,
privacyCommand,
themeCommand,
]);
});
});
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('when initialized with an injected loader function', () => {
it('should use the provided loader instead of the built-in one', async () => {
// Arrange: Create a set of mock commands.
const mockCommands: SlashCommand[] = [
{ name: 'injected-test-1', description: 'injected 1' },
{ name: 'injected-test-2', description: 'injected 2' },
];
it('should load commands from a single loader', async () => {
const mockLoader = new MockCommandLoader([mockCommandA, mockCommandB]);
const service = await CommandService.create(
[mockLoader],
new AbortController().signal,
);
// Arrange: Create a mock loader FUNCTION that resolves with our mock commands.
const mockLoader = vi.fn().mockResolvedValue(mockCommands);
const commands = service.getCommands();
// Act: Instantiate the service WITH the injected loader function.
const commandService = new CommandService(mockLoader);
await commandService.loadCommands();
const tree = commandService.getCommands();
expect(mockLoader.loadCommands).toHaveBeenCalledTimes(1);
expect(commands).toHaveLength(2);
expect(commands).toEqual(
expect.arrayContaining([mockCommandA, mockCommandB]),
);
});
// Assert: The tree should contain ONLY our injected commands.
expect(mockLoader).toHaveBeenCalled(); // Verify our mock loader was actually called.
expect(tree.length).toBe(2);
expect(tree).toEqual(mockCommands);
it('should aggregate commands from multiple loaders', async () => {
const loader1 = new MockCommandLoader([mockCommandA]);
const loader2 = new MockCommandLoader([mockCommandC]);
const service = await CommandService.create(
[loader1, loader2],
new AbortController().signal,
);
const commands = service.getCommands();
expect(loader1.loadCommands).toHaveBeenCalledTimes(1);
expect(loader2.loadCommands).toHaveBeenCalledTimes(1);
expect(commands).toHaveLength(2);
expect(commands).toEqual(
expect.arrayContaining([mockCommandA, mockCommandC]),
);
});
it('should override commands from earlier loaders with those from later loaders', async () => {
const loader1 = new MockCommandLoader([mockCommandA, mockCommandB]);
const loader2 = new MockCommandLoader([
mockCommandB_Override,
mockCommandC,
]);
const service = await CommandService.create(
[loader1, loader2],
new AbortController().signal,
);
const commands = service.getCommands();
expect(commands).toHaveLength(3); // Should be A, C, and the overridden B.
// The final list should contain the override from the *last* loader.
const commandB = commands.find((cmd) => cmd.name === 'command-b');
expect(commandB).toBeDefined();
expect(commandB?.kind).toBe(CommandKind.FILE); // Verify it's the overridden version.
expect(commandB).toEqual(mockCommandB_Override);
// Ensure the other commands are still present.
expect(commands).toEqual(
expect.arrayContaining([
mockCommandA,
mockCommandC,
mockCommandB_Override,
]),
);
});
it('should handle loaders that return an empty array of commands gracefully', async () => {
const loader1 = new MockCommandLoader([mockCommandA]);
const emptyLoader = new MockCommandLoader([]);
const loader3 = new MockCommandLoader([mockCommandB]);
const service = await CommandService.create(
[loader1, emptyLoader, loader3],
new AbortController().signal,
);
const commands = service.getCommands();
expect(emptyLoader.loadCommands).toHaveBeenCalledTimes(1);
expect(commands).toHaveLength(2);
expect(commands).toEqual(
expect.arrayContaining([mockCommandA, mockCommandB]),
);
});
it('should load commands from successful loaders even if one fails', async () => {
const successfulLoader = new MockCommandLoader([mockCommandA]);
const failingLoader = new MockCommandLoader([]);
const error = new Error('Loader failed');
vi.spyOn(failingLoader, 'loadCommands').mockRejectedValue(error);
const service = await CommandService.create(
[successfulLoader, failingLoader],
new AbortController().signal,
);
const commands = service.getCommands();
expect(commands).toHaveLength(1);
expect(commands).toEqual([mockCommandA]);
expect(console.debug).toHaveBeenCalledWith(
'A command loader failed:',
error,
);
});
it('getCommands should return a readonly array that cannot be mutated', async () => {
const service = await CommandService.create(
[new MockCommandLoader([mockCommandA])],
new AbortController().signal,
);
const commands = service.getCommands();
// Expect it to throw a TypeError at runtime because the array is frozen.
expect(() => {
// @ts-expect-error - Testing immutability is intentional here.
commands.push(mockCommandB);
}).toThrow();
// Verify the original array was not mutated.
expect(service.getCommands()).toHaveLength(1);
});
it('should pass the abort signal to all loaders', async () => {
const controller = new AbortController();
const signal = controller.signal;
const loader1 = new MockCommandLoader([mockCommandA]);
const loader2 = new MockCommandLoader([mockCommandB]);
await CommandService.create([loader1, loader2], signal);
expect(loader1.loadCommands).toHaveBeenCalledTimes(1);
expect(loader1.loadCommands).toHaveBeenCalledWith(signal);
expect(loader2.loadCommands).toHaveBeenCalledTimes(1);
expect(loader2.loadCommands).toHaveBeenCalledWith(signal);
const commandNames = tree.map((cmd) => cmd.name);
expect(commandNames).not.toContain('memory'); // Verify it didn't load production commands.
});
});
});

View File

@@ -5,79 +5,40 @@
*/
import { SlashCommand } from '../ui/commands/types.js';
import { ICommandLoader } from './types.js';
import { memoryCommand } from '../ui/commands/memoryCommand.js';
import { helpCommand } from '../ui/commands/helpCommand.js';
import { clearCommand } from '../ui/commands/clearCommand.js';
import { authCommand } from '../ui/commands/authCommand.js';
import { themeCommand } from '../ui/commands/themeCommand.js';
import { privacyCommand } from '../ui/commands/privacyCommand.js';
import { aboutCommand } from '../ui/commands/aboutCommand.js';
const loadBuiltInCommands = async (): Promise<SlashCommand[]> => [
aboutCommand,
authCommand,
clearCommand,
helpCommand,
memoryCommand,
privacyCommand,
themeCommand,
];
/**
* Orchestrates the discovery and loading of all slash commands for the CLI.
*
* This service operates on a provider-based loader pattern. It is initialized
* with an array of `ICommandLoader` instances, each responsible for fetching
* commands from a specific source (e.g., built-in code, local files).
*
* The CommandService is responsible for invoking these loaders, aggregating their
* results, and resolving any name conflicts. This architecture allows the command
* system to be extended with new sources without modifying the service itself.
*/
export class CommandService {
/**
* Private constructor to enforce the use of the async factory.
* @param commands A readonly array of the fully loaded and de-duplicated commands.
*/
private constructor(private readonly commands: readonly SlashCommand[]) {}
private commands: SlashCommand[] = [];
/**
* Asynchronously creates and initializes a new CommandService instance.
*
* This factory method orchestrates the entire command loading process. It
* runs all provided loaders in parallel, aggregates their results, handles
* name conflicts by letting the last-loaded command win, and then returns a
* fully constructed `CommandService` instance.
*
* @param loaders An array of objects that conform to the `ICommandLoader`
* interface. The order of loaders is significant: if multiple loaders
* provide a command with the same name, the command from the loader that
* appears later in the array will take precedence.
* @param signal An AbortSignal to cancel the loading process.
* @returns A promise that resolves to a new, fully initialized `CommandService` instance.
*/
static async create(
loaders: ICommandLoader[],
signal: AbortSignal,
): Promise<CommandService> {
const results = await Promise.allSettled(
loaders.map((loader) => loader.loadCommands(signal)),
);
const allCommands: SlashCommand[] = [];
for (const result of results) {
if (result.status === 'fulfilled') {
allCommands.push(...result.value);
} else {
console.debug('A command loader failed:', result.reason);
}
}
// De-duplicate commands using a Map. The last one found with a given name wins.
// This creates a natural override system based on the order of the loaders
// passed to the constructor.
const commandMap = new Map<string, SlashCommand>();
for (const cmd of allCommands) {
commandMap.set(cmd.name, cmd);
}
const finalCommands = Object.freeze(Array.from(commandMap.values()));
return new CommandService(finalCommands);
constructor(
private commandLoader: () => Promise<SlashCommand[]> = loadBuiltInCommands,
) {
// The constructor can be used for dependency injection in the future.
}
/**
* Retrieves the currently loaded and de-duplicated list of slash commands.
*
* This method is a safe accessor for the service's state. It returns a
* readonly array, preventing consumers from modifying the service's internal state.
*
* @returns A readonly, unified array of available `SlashCommand` objects.
*/
getCommands(): readonly SlashCommand[] {
async loadCommands(): Promise<void> {
// For now, we only load the built-in commands.
// File-based and remote commands will be added later.
this.commands = await this.commandLoader();
}
getCommands(): SlashCommand[] {
return this.commands;
}
}

View File

@@ -1,606 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { FileCommandLoader } from './FileCommandLoader.js';
import {
Config,
getProjectCommandsDir,
getUserCommandsDir,
} from '@qwen-code/qwen-code-core';
import mock from 'mock-fs';
import { assert, vi } from 'vitest';
import { createMockCommandContext } from '../test-utils/mockCommandContext.js';
import {
SHELL_INJECTION_TRIGGER,
SHORTHAND_ARGS_PLACEHOLDER,
} from './prompt-processors/types.js';
import {
ConfirmationRequiredError,
ShellProcessor,
} from './prompt-processors/shellProcessor.js';
import { ShorthandArgumentProcessor } from './prompt-processors/argumentProcessor.js';
const mockShellProcess = vi.hoisted(() => vi.fn());
vi.mock('./prompt-processors/shellProcessor.js', () => ({
ShellProcessor: vi.fn().mockImplementation(() => ({
process: mockShellProcess,
})),
ConfirmationRequiredError: class extends Error {
constructor(
message: string,
public commandsToConfirm: string[],
) {
super(message);
this.name = 'ConfirmationRequiredError';
}
},
}));
vi.mock('./prompt-processors/argumentProcessor.js', async (importOriginal) => {
const original =
await importOriginal<
typeof import('./prompt-processors/argumentProcessor.js')
>();
return {
ShorthandArgumentProcessor: vi
.fn()
.mockImplementation(() => new original.ShorthandArgumentProcessor()),
DefaultArgumentProcessor: vi
.fn()
.mockImplementation(() => new original.DefaultArgumentProcessor()),
};
});
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original =
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
return {
...original,
isCommandAllowed: vi.fn(),
ShellExecutionService: {
execute: vi.fn(),
},
};
});
describe('FileCommandLoader', () => {
const signal: AbortSignal = new AbortController().signal;
beforeEach(() => {
vi.clearAllMocks();
mockShellProcess.mockImplementation((prompt) => Promise.resolve(prompt));
});
afterEach(() => {
mock.restore();
});
it('loads a single command from a file', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "This is a test prompt"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
const command = commands[0];
expect(command).toBeDefined();
expect(command.name).toBe('test');
const result = await command.action?.(
createMockCommandContext({
invocation: {
raw: '/test',
name: 'test',
args: '',
},
}),
'',
);
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('This is a test prompt');
} else {
assert.fail('Incorrect action type');
}
});
// Symlink creation on Windows requires special permissions that are not
// available in the standard CI environment. Therefore, we skip these tests
// on Windows to prevent CI failures. The core functionality is still
// validated on Linux and macOS.
const itif = (condition: boolean) => (condition ? it : it.skip);
itif(process.platform !== 'win32')(
'loads commands from a symlinked directory',
async () => {
const userCommandsDir = getUserCommandsDir();
const realCommandsDir = '/real/commands';
mock({
[realCommandsDir]: {
'test.toml': 'prompt = "This is a test prompt"',
},
// Symlink the user commands directory to the real one
[userCommandsDir]: mock.symlink({
path: realCommandsDir,
}),
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
const command = commands[0];
expect(command).toBeDefined();
expect(command.name).toBe('test');
},
);
itif(process.platform !== 'win32')(
'loads commands from a symlinked subdirectory',
async () => {
const userCommandsDir = getUserCommandsDir();
const realNamespacedDir = '/real/namespaced-commands';
mock({
[userCommandsDir]: {
namespaced: mock.symlink({
path: realNamespacedDir,
}),
},
[realNamespacedDir]: {
'my-test.toml': 'prompt = "This is a test prompt"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
const command = commands[0];
expect(command).toBeDefined();
expect(command.name).toBe('namespaced:my-test');
},
);
it('loads multiple commands', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'test1.toml': 'prompt = "Prompt 1"',
'test2.toml': 'prompt = "Prompt 2"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(2);
});
it('creates deeply nested namespaces correctly', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
gcp: {
pipelines: {
'run.toml': 'prompt = "run pipeline"',
},
},
},
});
const loader = new FileCommandLoader({
getProjectRoot: () => '/path/to/project',
} as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
expect(commands[0]!.name).toBe('gcp:pipelines:run');
});
it('creates namespaces from nested directories', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
git: {
'commit.toml': 'prompt = "git commit prompt"',
},
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
const command = commands[0];
expect(command).toBeDefined();
expect(command.name).toBe('git:commit');
});
it('overrides user commands with project commands', async () => {
const userCommandsDir = getUserCommandsDir();
const projectCommandsDir = getProjectCommandsDir(process.cwd());
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "User prompt"',
},
[projectCommandsDir]: {
'test.toml': 'prompt = "Project prompt"',
},
});
const loader = new FileCommandLoader({
getProjectRoot: () => process.cwd(),
} as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
const command = commands[0];
expect(command).toBeDefined();
const result = await command.action?.(
createMockCommandContext({
invocation: {
raw: '/test',
name: 'test',
args: '',
},
}),
'',
);
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('Project prompt');
} else {
assert.fail('Incorrect action type');
}
});
it('ignores files with TOML syntax errors', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'invalid.toml': 'this is not valid toml',
'good.toml': 'prompt = "This one is fine"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
expect(commands[0].name).toBe('good');
});
it('ignores files that are semantically invalid (missing prompt)', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'no_prompt.toml': 'description = "This file is missing a prompt"',
'good.toml': 'prompt = "This one is fine"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
expect(commands[0].name).toBe('good');
});
it('handles filename edge cases correctly', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.v1.toml': 'prompt = "Test prompt"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands[0];
expect(command).toBeDefined();
expect(command.name).toBe('test.v1');
});
it('handles file system errors gracefully', async () => {
mock({}); // Mock an empty file system
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(0);
});
it('uses a default description if not provided', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "Test prompt"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands[0];
expect(command).toBeDefined();
expect(command.description).toBe('Custom command from test.toml');
});
it('uses the provided description', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "Test prompt"\ndescription = "My test command"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands[0];
expect(command).toBeDefined();
expect(command.description).toBe('My test command');
});
it('should sanitize colons in filenames to prevent namespace conflicts', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'legacy:command.toml': 'prompt = "This is a legacy command"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
expect(commands).toHaveLength(1);
const command = commands[0];
expect(command).toBeDefined();
// Verify that the ':' in the filename was replaced with an '_'
expect(command.name).toBe('legacy_command');
});
describe('Shorthand Argument Processor Integration', () => {
it('correctly processes a command with {{args}}', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'shorthand.toml':
'prompt = "The user wants to: {{args}}"\ndescription = "Shorthand test"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'shorthand');
expect(command).toBeDefined();
const result = await command!.action?.(
createMockCommandContext({
invocation: {
raw: '/shorthand do something cool',
name: 'shorthand',
args: 'do something cool',
},
}),
'do something cool',
);
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('The user wants to: do something cool');
}
});
});
describe('Default Argument Processor Integration', () => {
it('correctly processes a command without {{args}}', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'model_led.toml':
'prompt = "This is the instruction."\ndescription = "Default processor test"',
},
});
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'model_led');
expect(command).toBeDefined();
const result = await command!.action?.(
createMockCommandContext({
invocation: {
raw: '/model_led 1.2.0 added "a feature"',
name: 'model_led',
args: '1.2.0 added "a feature"',
},
}),
'1.2.0 added "a feature"',
);
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
const expectedContent =
'This is the instruction.\n\n/model_led 1.2.0 added "a feature"';
expect(result.content).toBe(expectedContent);
}
});
});
describe('Shell Processor Integration', () => {
it('instantiates ShellProcessor if the trigger is present', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run this: ${SHELL_INJECTION_TRIGGER}echo hello}"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(ShellProcessor).toHaveBeenCalledWith('shell');
});
it('does not instantiate ShellProcessor if trigger is missing', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'regular.toml': `prompt = "Just a regular prompt"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(ShellProcessor).not.toHaveBeenCalled();
});
it('returns a "submit_prompt" action if shell processing succeeds', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run !{echo 'hello'}"`,
},
});
mockShellProcess.mockResolvedValue('Run hello');
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'shell');
expect(command).toBeDefined();
const result = await command!.action!(
createMockCommandContext({
invocation: { raw: '/shell', name: 'shell', args: '' },
}),
'',
);
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('Run hello');
}
});
it('returns a "confirm_shell_commands" action if shell processing requires it', async () => {
const userCommandsDir = getUserCommandsDir();
const rawInvocation = '/shell rm -rf /';
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run !{rm -rf /}"`,
},
});
// Mock the processor to throw the specific error
const error = new ConfirmationRequiredError('Confirmation needed', [
'rm -rf /',
]);
mockShellProcess.mockRejectedValue(error);
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'shell');
expect(command).toBeDefined();
const result = await command!.action!(
createMockCommandContext({
invocation: { raw: rawInvocation, name: 'shell', args: 'rm -rf /' },
}),
'rm -rf /',
);
expect(result?.type).toBe('confirm_shell_commands');
if (result?.type === 'confirm_shell_commands') {
expect(result.commandsToConfirm).toEqual(['rm -rf /']);
expect(result.originalInvocation.raw).toBe(rawInvocation);
}
});
it('re-throws other errors from the processor', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run !{something}"`,
},
});
const genericError = new Error('Something else went wrong');
mockShellProcess.mockRejectedValue(genericError);
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'shell');
expect(command).toBeDefined();
await expect(
command!.action!(
createMockCommandContext({
invocation: { raw: '/shell', name: 'shell', args: '' },
}),
'',
),
).rejects.toThrow('Something else went wrong');
});
it('assembles the processor pipeline in the correct order (Shell -> Argument)', async () => {
const userCommandsDir = getUserCommandsDir();
mock({
[userCommandsDir]: {
'pipeline.toml': `
prompt = "Shell says: ${SHELL_INJECTION_TRIGGER}echo foo} and user says: ${SHORTHAND_ARGS_PLACEHOLDER}"
`,
},
});
// Mock the process methods to track call order
const argProcessMock = vi
.fn()
.mockImplementation((p) => `${p}-arg-processed`);
// Redefine the mock for this specific test
mockShellProcess.mockImplementation((p) =>
Promise.resolve(`${p}-shell-processed`),
);
vi.mocked(ShorthandArgumentProcessor).mockImplementation(
() =>
({
process: argProcessMock,
}) as unknown as ShorthandArgumentProcessor,
);
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'pipeline');
expect(command).toBeDefined();
await command!.action!(
createMockCommandContext({
invocation: {
raw: '/pipeline bar',
name: 'pipeline',
args: 'bar',
},
}),
'bar',
);
// Verify that the shell processor was called before the argument processor
expect(mockShellProcess.mock.invocationCallOrder[0]).toBeLessThan(
argProcessMock.mock.invocationCallOrder[0],
);
// Also verify the flow of the prompt through the processors
expect(mockShellProcess).toHaveBeenCalledWith(
expect.any(String),
expect.any(Object),
);
expect(argProcessMock).toHaveBeenCalledWith(
expect.stringContaining('-shell-processed'), // It receives the output of the shell processor
expect.any(Object),
);
});
});
});

View File

@@ -1,240 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { promises as fs } from 'fs';
import path from 'path';
import toml from '@iarna/toml';
import { glob } from 'glob';
import { z } from 'zod';
import {
Config,
getProjectCommandsDir,
getUserCommandsDir,
} from '@qwen-code/qwen-code-core';
import { ICommandLoader } from './types.js';
import {
CommandContext,
CommandKind,
SlashCommand,
SlashCommandActionReturn,
} from '../ui/commands/types.js';
import {
DefaultArgumentProcessor,
ShorthandArgumentProcessor,
} from './prompt-processors/argumentProcessor.js';
import {
IPromptProcessor,
SHORTHAND_ARGS_PLACEHOLDER,
SHELL_INJECTION_TRIGGER,
} from './prompt-processors/types.js';
import {
ConfirmationRequiredError,
ShellProcessor,
} from './prompt-processors/shellProcessor.js';
/**
* Defines the Zod schema for a command definition file. This serves as the
* single source of truth for both validation and type inference.
*/
const TomlCommandDefSchema = z.object({
prompt: z.string({
required_error: "The 'prompt' field is required.",
invalid_type_error: "The 'prompt' field must be a string.",
}),
description: z.string().optional(),
});
/**
* Discovers and loads custom slash commands from .toml files in both the
* user's global config directory and the current project's directory.
*
* This loader is responsible for:
* - Recursively scanning command directories.
* - Parsing and validating TOML files.
* - Adapting valid definitions into executable SlashCommand objects.
* - Handling file system errors and malformed files gracefully.
*/
export class FileCommandLoader implements ICommandLoader {
private readonly projectRoot: string;
constructor(private readonly config: Config | null) {
this.projectRoot = config?.getProjectRoot() || process.cwd();
}
/**
* Loads all commands, applying the precedence rule where project-level
* commands override user-level commands with the same name.
* @param signal An AbortSignal to cancel the loading process.
* @returns A promise that resolves to an array of loaded SlashCommands.
*/
async loadCommands(signal: AbortSignal): Promise<SlashCommand[]> {
const commandMap = new Map<string, SlashCommand>();
const globOptions = {
nodir: true,
dot: true,
signal,
follow: true,
};
try {
// User Commands
const userDir = getUserCommandsDir();
const userFiles = await glob('**/*.toml', {
...globOptions,
cwd: userDir,
});
const userCommandPromises = userFiles.map((file) =>
this.parseAndAdaptFile(path.join(userDir, file), userDir),
);
const userCommands = (await Promise.all(userCommandPromises)).filter(
(cmd): cmd is SlashCommand => cmd !== null,
);
for (const cmd of userCommands) {
commandMap.set(cmd.name, cmd);
}
// Project Commands (these intentionally override user commands)
const projectDir = getProjectCommandsDir(this.projectRoot);
const projectFiles = await glob('**/*.toml', {
...globOptions,
cwd: projectDir,
});
const projectCommandPromises = projectFiles.map((file) =>
this.parseAndAdaptFile(path.join(projectDir, file), projectDir),
);
const projectCommands = (
await Promise.all(projectCommandPromises)
).filter((cmd): cmd is SlashCommand => cmd !== null);
for (const cmd of projectCommands) {
commandMap.set(cmd.name, cmd);
}
} catch (error) {
console.error(`[FileCommandLoader] Error during file search:`, error);
}
return Array.from(commandMap.values());
}
/**
* Parses a single .toml file and transforms it into a SlashCommand object.
* @param filePath The absolute path to the .toml file.
* @param baseDir The root command directory for name calculation.
* @returns A promise resolving to a SlashCommand, or null if the file is invalid.
*/
private async parseAndAdaptFile(
filePath: string,
baseDir: string,
): Promise<SlashCommand | null> {
let fileContent: string;
try {
fileContent = await fs.readFile(filePath, 'utf-8');
} catch (error: unknown) {
console.error(
`[FileCommandLoader] Failed to read file ${filePath}:`,
error instanceof Error ? error.message : String(error),
);
return null;
}
let parsed: unknown;
try {
parsed = toml.parse(fileContent);
} catch (error: unknown) {
console.error(
`[FileCommandLoader] Failed to parse TOML file ${filePath}:`,
error instanceof Error ? error.message : String(error),
);
return null;
}
const validationResult = TomlCommandDefSchema.safeParse(parsed);
if (!validationResult.success) {
console.error(
`[FileCommandLoader] Skipping invalid command file: ${filePath}. Validation errors:`,
validationResult.error.flatten(),
);
return null;
}
const validDef = validationResult.data;
const relativePathWithExt = path.relative(baseDir, filePath);
const relativePath = relativePathWithExt.substring(
0,
relativePathWithExt.length - 5, // length of '.toml'
);
const commandName = relativePath
.split(path.sep)
// Sanitize each path segment to prevent ambiguity. Since ':' is our
// namespace separator, we replace any literal colons in filenames
// with underscores to avoid naming conflicts.
.map((segment) => segment.replaceAll(':', '_'))
.join(':');
const processors: IPromptProcessor[] = [];
// Add the Shell Processor if needed.
if (validDef.prompt.includes(SHELL_INJECTION_TRIGGER)) {
processors.push(new ShellProcessor(commandName));
}
// The presence of '{{args}}' is the switch that determines the behavior.
if (validDef.prompt.includes(SHORTHAND_ARGS_PLACEHOLDER)) {
processors.push(new ShorthandArgumentProcessor());
} else {
processors.push(new DefaultArgumentProcessor());
}
return {
name: commandName,
description:
validDef.description ||
`Custom command from ${path.basename(filePath)}`,
kind: CommandKind.FILE,
action: async (
context: CommandContext,
_args: string,
): Promise<SlashCommandActionReturn> => {
if (!context.invocation) {
console.error(
`[FileCommandLoader] Critical error: Command '${commandName}' was executed without invocation context.`,
);
return {
type: 'submit_prompt',
content: validDef.prompt, // Fallback to unprocessed prompt
};
}
try {
let processedPrompt = validDef.prompt;
for (const processor of processors) {
processedPrompt = await processor.process(processedPrompt, context);
}
return {
type: 'submit_prompt',
content: processedPrompt,
};
} catch (e) {
// Check if it's our specific error type
if (e instanceof ConfirmationRequiredError) {
// Halt and request confirmation from the UI layer.
return {
type: 'confirm_shell_commands',
commandsToConfirm: e.commandsToConfirm,
originalInvocation: {
raw: context.invocation.raw,
},
};
}
// Re-throw other errors to be handled by the global error handler.
throw e;
}
},
};
}
}

View File

@@ -1,231 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
Config,
getErrorMessage,
getMCPServerPrompts,
} from '@qwen-code/qwen-code-core';
import {
CommandContext,
CommandKind,
SlashCommand,
SlashCommandActionReturn,
} from '../ui/commands/types.js';
import { ICommandLoader } from './types.js';
import { PromptArgument } from '@modelcontextprotocol/sdk/types.js';
/**
* Discovers and loads executable slash commands from prompts exposed by
* Model-Context-Protocol (MCP) servers.
*/
export class McpPromptLoader implements ICommandLoader {
constructor(private readonly config: Config | null) {}
/**
* Loads all available prompts from all configured MCP servers and adapts
* them into executable SlashCommand objects.
*
* @param _signal An AbortSignal (unused for this synchronous loader).
* @returns A promise that resolves to an array of loaded SlashCommands.
*/
loadCommands(_signal: AbortSignal): Promise<SlashCommand[]> {
const promptCommands: SlashCommand[] = [];
if (!this.config) {
return Promise.resolve([]);
}
const mcpServers = this.config.getMcpServers() || {};
for (const serverName in mcpServers) {
const prompts = getMCPServerPrompts(this.config, serverName) || [];
for (const prompt of prompts) {
const commandName = `${prompt.name}`;
const newPromptCommand: SlashCommand = {
name: commandName,
description: prompt.description || `Invoke prompt ${prompt.name}`,
kind: CommandKind.MCP_PROMPT,
subCommands: [
{
name: 'help',
description: 'Show help for this prompt',
kind: CommandKind.MCP_PROMPT,
action: async (): Promise<SlashCommandActionReturn> => {
if (!prompt.arguments || prompt.arguments.length === 0) {
return {
type: 'message',
messageType: 'info',
content: `Prompt "${prompt.name}" has no arguments.`,
};
}
let helpMessage = `Arguments for "${prompt.name}":\n\n`;
if (prompt.arguments && prompt.arguments.length > 0) {
helpMessage += `You can provide arguments by name (e.g., --argName="value") or by position.\n\n`;
helpMessage += `e.g., ${prompt.name} ${prompt.arguments?.map((_) => `"foo"`)} is equivalent to ${prompt.name} ${prompt.arguments?.map((arg) => `--${arg.name}="foo"`)}\n\n`;
}
for (const arg of prompt.arguments) {
helpMessage += ` --${arg.name}\n`;
if (arg.description) {
helpMessage += ` ${arg.description}\n`;
}
helpMessage += ` (required: ${
arg.required ? 'yes' : 'no'
})\n\n`;
}
return {
type: 'message',
messageType: 'info',
content: helpMessage,
};
},
},
],
action: async (
context: CommandContext,
args: string,
): Promise<SlashCommandActionReturn> => {
if (!this.config) {
return {
type: 'message',
messageType: 'error',
content: 'Config not loaded.',
};
}
const promptInputs = this.parseArgs(args, prompt.arguments);
if (promptInputs instanceof Error) {
return {
type: 'message',
messageType: 'error',
content: promptInputs.message,
};
}
try {
const mcpServers = this.config.getMcpServers() || {};
const mcpServerConfig = mcpServers[serverName];
if (!mcpServerConfig) {
return {
type: 'message',
messageType: 'error',
content: `MCP server config not found for '${serverName}'.`,
};
}
const result = await prompt.invoke(promptInputs);
if (result.error) {
return {
type: 'message',
messageType: 'error',
content: `Error invoking prompt: ${result.error}`,
};
}
if (!result.messages?.[0]?.content?.text) {
return {
type: 'message',
messageType: 'error',
content:
'Received an empty or invalid prompt response from the server.',
};
}
return {
type: 'submit_prompt',
content: JSON.stringify(result.messages[0].content.text),
};
} catch (error) {
return {
type: 'message',
messageType: 'error',
content: `Error: ${getErrorMessage(error)}`,
};
}
},
completion: async (_: CommandContext, partialArg: string) => {
if (!prompt || !prompt.arguments) {
return [];
}
const suggestions: string[] = [];
const usedArgNames = new Set(
(partialArg.match(/--([^=]+)/g) || []).map((s) => s.substring(2)),
);
for (const arg of prompt.arguments) {
if (!usedArgNames.has(arg.name)) {
suggestions.push(`--${arg.name}=""`);
}
}
return suggestions;
},
};
promptCommands.push(newPromptCommand);
}
}
return Promise.resolve(promptCommands);
}
private parseArgs(
userArgs: string,
promptArgs: PromptArgument[] | undefined,
): Record<string, unknown> | Error {
const argValues: { [key: string]: string } = {};
const promptInputs: Record<string, unknown> = {};
// arg parsing: --key="value" or --key=value
const namedArgRegex = /--([^=]+)=(?:"((?:\\.|[^"\\])*)"|([^ ]*))/g;
let match;
const remainingArgs: string[] = [];
let lastIndex = 0;
while ((match = namedArgRegex.exec(userArgs)) !== null) {
const key = match[1];
const value = match[2] ?? match[3]; // Quoted or unquoted value
argValues[key] = value;
// Capture text between matches as potential positional args
if (match.index > lastIndex) {
remainingArgs.push(userArgs.substring(lastIndex, match.index).trim());
}
lastIndex = namedArgRegex.lastIndex;
}
// Capture any remaining text after the last named arg
if (lastIndex < userArgs.length) {
remainingArgs.push(userArgs.substring(lastIndex).trim());
}
const positionalArgs = remainingArgs.join(' ').split(/ +/);
if (!promptArgs) {
return promptInputs;
}
for (const arg of promptArgs) {
if (argValues[arg.name]) {
promptInputs[arg.name] = argValues[arg.name];
}
}
const unfilledArgs = promptArgs.filter(
(arg) => arg.required && !promptInputs[arg.name],
);
const missingArgs: string[] = [];
for (let i = 0; i < unfilledArgs.length; i++) {
if (positionalArgs.length > i && positionalArgs[i]) {
promptInputs[unfilledArgs[i].name] = positionalArgs[i];
} else {
missingArgs.push(unfilledArgs[i].name);
}
}
if (missingArgs.length > 0) {
const missingArgNames = missingArgs.map((name) => `--${name}`).join(', ');
return new Error(`Missing required argument(s): ${missingArgNames}`);
}
return promptInputs;
}
}

View File

@@ -1,99 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
DefaultArgumentProcessor,
ShorthandArgumentProcessor,
} from './argumentProcessor.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
describe('Argument Processors', () => {
describe('ShorthandArgumentProcessor', () => {
const processor = new ShorthandArgumentProcessor();
it('should replace a single {{args}} instance', async () => {
const prompt = 'Refactor the following code: {{args}}';
const context = createMockCommandContext({
invocation: {
raw: '/refactor make it faster',
name: 'refactor',
args: 'make it faster',
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('Refactor the following code: make it faster');
});
it('should replace multiple {{args}} instances', async () => {
const prompt = 'User said: {{args}}. I repeat: {{args}}!';
const context = createMockCommandContext({
invocation: {
raw: '/repeat hello world',
name: 'repeat',
args: 'hello world',
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('User said: hello world. I repeat: hello world!');
});
it('should handle an empty args string', async () => {
const prompt = 'The user provided no input: {{args}}.';
const context = createMockCommandContext({
invocation: {
raw: '/input',
name: 'input',
args: '',
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('The user provided no input: .');
});
it('should not change the prompt if {{args}} is not present', async () => {
const prompt = 'This is a static prompt.';
const context = createMockCommandContext({
invocation: {
raw: '/static some arguments',
name: 'static',
args: 'some arguments',
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('This is a static prompt.');
});
});
describe('DefaultArgumentProcessor', () => {
const processor = new DefaultArgumentProcessor();
it('should append the full command if args are provided', async () => {
const prompt = 'Parse the command.';
const context = createMockCommandContext({
invocation: {
raw: '/mycommand arg1 "arg two"',
name: 'mycommand',
args: 'arg1 "arg two"',
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('Parse the command.\n\n/mycommand arg1 "arg two"');
});
it('should NOT append the full command if no args are provided', async () => {
const prompt = 'Parse the command.';
const context = createMockCommandContext({
invocation: {
raw: '/mycommand',
name: 'mycommand',
args: '',
},
});
const result = await processor.process(prompt, context);
expect(result).toBe('Parse the command.');
});
});
});

View File

@@ -1,34 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { IPromptProcessor, SHORTHAND_ARGS_PLACEHOLDER } from './types.js';
import { CommandContext } from '../../ui/commands/types.js';
/**
* Replaces all instances of `{{args}}` in a prompt with the user-provided
* argument string.
*/
export class ShorthandArgumentProcessor implements IPromptProcessor {
async process(prompt: string, context: CommandContext): Promise<string> {
return prompt.replaceAll(
SHORTHAND_ARGS_PLACEHOLDER,
context.invocation!.args,
);
}
}
/**
* Appends the user's full command invocation to the prompt if arguments are
* provided, allowing the model to perform its own argument parsing.
*/
export class DefaultArgumentProcessor implements IPromptProcessor {
async process(prompt: string, context: CommandContext): Promise<string> {
if (context.invocation!.args) {
return `${prompt}\n\n${context.invocation!.raw}`;
}
return prompt;
}
}

View File

@@ -1,300 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { vi, describe, it, expect, beforeEach } from 'vitest';
import { ConfirmationRequiredError, ShellProcessor } from './shellProcessor.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { CommandContext } from '../../ui/commands/types.js';
import { Config } from '@qwen-code/qwen-code-core';
const mockCheckCommandPermissions = vi.hoisted(() => vi.fn());
const mockShellExecute = vi.hoisted(() => vi.fn());
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original = await importOriginal<object>();
return {
...original,
checkCommandPermissions: mockCheckCommandPermissions,
ShellExecutionService: {
execute: mockShellExecute,
},
};
});
describe('ShellProcessor', () => {
let context: CommandContext;
let mockConfig: Partial<Config>;
beforeEach(() => {
vi.clearAllMocks();
mockConfig = {
getTargetDir: vi.fn().mockReturnValue('/test/dir'),
};
context = createMockCommandContext({
services: {
config: mockConfig as Config,
},
session: {
sessionShellAllowlist: new Set(),
},
});
mockShellExecute.mockReturnValue({
result: Promise.resolve({
output: 'default shell output',
}),
});
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
});
});
it('should not change the prompt if no shell injections are present', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'This is a simple prompt with no injections.';
const result = await processor.process(prompt, context);
expect(result).toBe(prompt);
expect(mockShellExecute).not.toHaveBeenCalled();
});
it('should process a single valid shell injection if allowed', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'The current status is: !{git status}';
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
});
mockShellExecute.mockReturnValue({
result: Promise.resolve({ output: 'On branch main' }),
});
const result = await processor.process(prompt, context);
expect(mockCheckCommandPermissions).toHaveBeenCalledWith(
'git status',
expect.any(Object),
context.session.sessionShellAllowlist,
);
expect(mockShellExecute).toHaveBeenCalledWith(
'git status',
expect.any(String),
expect.any(Function),
expect.any(Object),
);
expect(result).toBe('The current status is: On branch main');
});
it('should process multiple valid shell injections if all are allowed', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{git status} in !{pwd}';
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
});
mockShellExecute
.mockReturnValueOnce({
result: Promise.resolve({ output: 'On branch main' }),
})
.mockReturnValueOnce({
result: Promise.resolve({ output: '/usr/home' }),
});
const result = await processor.process(prompt, context);
expect(mockCheckCommandPermissions).toHaveBeenCalledTimes(2);
expect(mockShellExecute).toHaveBeenCalledTimes(2);
expect(result).toBe('On branch main in /usr/home');
});
it('should throw ConfirmationRequiredError if a command is not allowed', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Do something dangerous: !{rm -rf /}';
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['rm -rf /'],
});
await expect(processor.process(prompt, context)).rejects.toThrow(
ConfirmationRequiredError,
);
});
it('should throw ConfirmationRequiredError with the correct command', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Do something dangerous: !{rm -rf /}';
mockCheckCommandPermissions.mockReturnValue({
allAllowed: false,
disallowedCommands: ['rm -rf /'],
});
try {
await processor.process(prompt, context);
// Fail if it doesn't throw
expect(true).toBe(false);
} catch (e) {
expect(e).toBeInstanceOf(ConfirmationRequiredError);
if (e instanceof ConfirmationRequiredError) {
expect(e.commandsToConfirm).toEqual(['rm -rf /']);
}
}
expect(mockShellExecute).not.toHaveBeenCalled();
});
it('should throw ConfirmationRequiredError with multiple commands if multiple are disallowed', async () => {
const processor = new ShellProcessor('test-command');
const prompt = '!{cmd1} and !{cmd2}';
mockCheckCommandPermissions.mockImplementation((cmd) => {
if (cmd === 'cmd1') {
return { allAllowed: false, disallowedCommands: ['cmd1'] };
}
if (cmd === 'cmd2') {
return { allAllowed: false, disallowedCommands: ['cmd2'] };
}
return { allAllowed: true, disallowedCommands: [] };
});
try {
await processor.process(prompt, context);
// Fail if it doesn't throw
expect(true).toBe(false);
} catch (e) {
expect(e).toBeInstanceOf(ConfirmationRequiredError);
if (e instanceof ConfirmationRequiredError) {
expect(e.commandsToConfirm).toEqual(['cmd1', 'cmd2']);
}
}
});
it('should not execute any commands if at least one requires confirmation', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'First: !{echo "hello"}, Second: !{rm -rf /}';
mockCheckCommandPermissions.mockImplementation((cmd) => {
if (cmd.includes('rm')) {
return { allAllowed: false, disallowedCommands: [cmd] };
}
return { allAllowed: true, disallowedCommands: [] };
});
await expect(processor.process(prompt, context)).rejects.toThrow(
ConfirmationRequiredError,
);
// Ensure no commands were executed because the pipeline was halted.
expect(mockShellExecute).not.toHaveBeenCalled();
});
it('should only request confirmation for disallowed commands in a mixed prompt', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Allowed: !{ls -l}, Disallowed: !{rm -rf /}';
mockCheckCommandPermissions.mockImplementation((cmd) => ({
allAllowed: !cmd.includes('rm'),
disallowedCommands: cmd.includes('rm') ? [cmd] : [],
}));
try {
await processor.process(prompt, context);
expect.fail('Should have thrown ConfirmationRequiredError');
} catch (e) {
expect(e).toBeInstanceOf(ConfirmationRequiredError);
if (e instanceof ConfirmationRequiredError) {
expect(e.commandsToConfirm).toEqual(['rm -rf /']);
}
}
});
it('should execute all commands if they are on the session allowlist', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Run !{cmd1} and !{cmd2}';
// Add commands to the session allowlist
context.session.sessionShellAllowlist = new Set(['cmd1', 'cmd2']);
// checkCommandPermissions should now pass for these
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
});
mockShellExecute
.mockReturnValueOnce({ result: Promise.resolve({ output: 'output1' }) })
.mockReturnValueOnce({ result: Promise.resolve({ output: 'output2' }) });
const result = await processor.process(prompt, context);
expect(mockCheckCommandPermissions).toHaveBeenCalledWith(
'cmd1',
expect.any(Object),
context.session.sessionShellAllowlist,
);
expect(mockCheckCommandPermissions).toHaveBeenCalledWith(
'cmd2',
expect.any(Object),
context.session.sessionShellAllowlist,
);
expect(mockShellExecute).toHaveBeenCalledTimes(2);
expect(result).toBe('Run output1 and output2');
});
it('should trim whitespace from the command inside the injection', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'Files: !{ ls -l }';
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
});
mockShellExecute.mockReturnValue({
result: Promise.resolve({ output: 'total 0' }),
});
await processor.process(prompt, context);
expect(mockCheckCommandPermissions).toHaveBeenCalledWith(
'ls -l', // Verifies that the command was trimmed
expect.any(Object),
context.session.sessionShellAllowlist,
);
expect(mockShellExecute).toHaveBeenCalledWith(
'ls -l',
expect.any(String),
expect.any(Function),
expect.any(Object),
);
});
it('should handle an empty command inside the injection gracefully', async () => {
const processor = new ShellProcessor('test-command');
const prompt = 'This is weird: !{}';
mockCheckCommandPermissions.mockReturnValue({
allAllowed: true,
disallowedCommands: [],
});
mockShellExecute.mockReturnValue({
result: Promise.resolve({ output: 'empty output' }),
});
const result = await processor.process(prompt, context);
expect(mockCheckCommandPermissions).toHaveBeenCalledWith(
'',
expect.any(Object),
context.session.sessionShellAllowlist,
);
expect(mockShellExecute).toHaveBeenCalledWith(
'',
expect.any(String),
expect.any(Function),
expect.any(Object),
);
expect(result).toBe('This is weird: empty output');
});
});

View File

@@ -1,106 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
checkCommandPermissions,
ShellExecutionService,
} from '@qwen-code/qwen-code-core';
import { CommandContext } from '../../ui/commands/types.js';
import { IPromptProcessor } from './types.js';
export class ConfirmationRequiredError extends Error {
constructor(
message: string,
public commandsToConfirm: string[],
) {
super(message);
this.name = 'ConfirmationRequiredError';
}
}
/**
* Finds all instances of shell command injections (`!{...}`) in a prompt,
* executes them, and replaces the injection site with the command's output.
*
* This processor ensures that only allowlisted commands are executed. If a
* disallowed command is found, it halts execution and reports an error.
*/
export class ShellProcessor implements IPromptProcessor {
/**
* A regular expression to find all instances of `!{...}`. The inner
* capture group extracts the command itself.
*/
private static readonly SHELL_INJECTION_REGEX = /!\{([^}]*)\}/g;
/**
* @param commandName The name of the custom command being executed, used
* for logging and error messages.
*/
constructor(private readonly commandName: string) {}
async process(prompt: string, context: CommandContext): Promise<string> {
const { config, sessionShellAllowlist } = {
...context.services,
...context.session,
};
const commandsToExecute: Array<{ fullMatch: string; command: string }> = [];
const commandsToConfirm = new Set<string>();
const matches = [...prompt.matchAll(ShellProcessor.SHELL_INJECTION_REGEX)];
if (matches.length === 0) {
return prompt; // No shell commands, nothing to do.
}
// Discover all commands and check permissions.
for (const match of matches) {
const command = match[1].trim();
const { allAllowed, disallowedCommands, blockReason, isHardDenial } =
checkCommandPermissions(command, config!, sessionShellAllowlist);
if (!allAllowed) {
// If it's a hard denial, this is a non-recoverable security error.
if (isHardDenial) {
throw new Error(
`${this.commandName} cannot be run. ${blockReason || 'A shell command in this custom command is explicitly blocked in your config settings.'}`,
);
}
// Add each soft denial disallowed command to the set for confirmation.
disallowedCommands.forEach((uc) => commandsToConfirm.add(uc));
}
commandsToExecute.push({ fullMatch: match[0], command });
}
// If any commands require confirmation, throw a special error to halt the
// pipeline and trigger the UI flow.
if (commandsToConfirm.size > 0) {
throw new ConfirmationRequiredError(
'Shell command confirmation required',
Array.from(commandsToConfirm),
);
}
// Execute all commands (only runs if no confirmation was needed).
let processedPrompt = prompt;
for (const { fullMatch, command } of commandsToExecute) {
const { result } = ShellExecutionService.execute(
command,
config!.getTargetDir(),
() => {}, // No streaming needed.
new AbortController().signal, // For now, we don't support cancellation from here.
);
const executionResult = await result;
processedPrompt = processedPrompt.replace(
fullMatch,
executionResult.output,
);
}
return processedPrompt;
}
}

View File

@@ -1,42 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { CommandContext } from '../../ui/commands/types.js';
/**
* Defines the interface for a prompt processor, a module that can transform
* a prompt string before it is sent to the model. Processors are chained
* together to create a processing pipeline.
*/
export interface IPromptProcessor {
/**
* Processes a prompt string, applying a specific transformation as part of a pipeline.
*
* Each processor in a command's pipeline receives the output of the previous
* processor. This method provides the full command context, allowing for
* complex transformations that may require access to invocation details,
* application services, or UI state.
*
* @param prompt The current state of the prompt string. This may have been
* modified by previous processors in the pipeline.
* @param context The full command context, providing access to invocation
* details (like `context.invocation.raw` and `context.invocation.args`),
* application services, and UI handlers.
* @returns A promise that resolves to the transformed prompt string, which
* will be passed to the next processor or, if it's the last one, sent to the model.
*/
process(prompt: string, context: CommandContext): Promise<string>;
}
/**
* The placeholder string for shorthand argument injection in custom commands.
*/
export const SHORTHAND_ARGS_PLACEHOLDER = '{{args}}';
/**
* The trigger string for shell command injection in custom commands.
*/
export const SHELL_INJECTION_TRIGGER = '!{';

View File

@@ -1,24 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { SlashCommand } from '../ui/commands/types.js';
/**
* Defines the contract for any class that can load and provide slash commands.
* This allows the CommandService to be extended with new command sources
* (e.g., file-based, remote APIs) without modification.
*
* Loaders should receive any necessary dependencies (like Config) via their
* constructor.
*/
export interface ICommandLoader {
/**
* Discovers and returns a list of slash commands from the loader's source.
* @param signal An AbortSignal to allow cancellation.
* @returns A promise that resolves to an array of SlashCommand objects.
*/
loadCommands(signal: AbortSignal): Promise<SlashCommand[]>;
}

View File

@@ -7,7 +7,7 @@
import { vi } from 'vitest';
import { CommandContext } from '../ui/commands/types.js';
import { LoadedSettings } from '../config/settings.js';
import { GitService } from '@qwen-code/qwen-code-core';
import { GitService } from '@google/gemini-cli-core';
import { SessionStatsState } from '../ui/contexts/SessionContext.js';
// A utility type to make all properties of an object, and its nested objects, partial.
@@ -28,11 +28,6 @@ export const createMockCommandContext = (
overrides: DeepPartial<CommandContext> = {},
): CommandContext => {
const defaultMocks: CommandContext = {
invocation: {
raw: '',
name: '',
args: '',
},
services: {
config: null,
settings: { merged: {} } as LoadedSettings,
@@ -49,10 +44,6 @@ export const createMockCommandContext = (
addItem: vi.fn(),
clear: vi.fn(),
setDebugMessage: vi.fn(),
pendingItem: null,
setPendingItem: vi.fn(),
loadHistory: vi.fn(),
toggleCorgiMode: vi.fn(),
},
session: {
stats: {
@@ -85,13 +76,15 @@ export const createMockCommandContext = (
const targetValue = output[key];
if (
// We only want to recursivlty merge plain objects
Object.prototype.toString.call(sourceValue) === '[object Object]' &&
Object.prototype.toString.call(targetValue) === '[object Object]'
sourceValue &&
typeof sourceValue === 'object' &&
!Array.isArray(sourceValue) &&
targetValue &&
typeof targetValue === 'object' &&
!Array.isArray(targetValue)
) {
output[key] = merge(targetValue, sourceValue);
} else {
// If not, we do a direct assignment. This preserves Date objects and others.
output[key] = sourceValue;
}
}

View File

@@ -15,13 +15,11 @@ import {
AccessibilitySettings,
SandboxConfig,
GeminiClient,
ideContext,
} from '@qwen-code/qwen-code-core';
import { LoadedSettings, SettingsFile, Settings } from '../config/settings.js';
import process from 'node:process';
import { useGeminiStream } from './hooks/useGeminiStream.js';
import { useConsoleMessages } from './hooks/useConsoleMessages.js';
import { StreamingState, ConsoleMessageItem } from './types.js';
import { StreamingState } from './types.js';
import { Tips } from './components/Tips.js';
// Define a more complete mock server config based on actual Config
@@ -60,12 +58,6 @@ interface MockServerConfig {
getToolCallCommand: Mock<() => string | undefined>;
getMcpServerCommand: Mock<() => string | undefined>;
getMcpServers: Mock<() => Record<string, MCPServerConfig> | undefined>;
getExtensions: Mock<
() => Array<{ name: string; version: string; isActive: boolean }>
>;
getBlockedMcpServers: Mock<
() => Array<{ name: string; extensionName: string }>
>;
getUserAgent: Mock<() => string>;
getUserMemory: Mock<() => string>;
setUserMemory: Mock<(newUserMemory: string) => void>;
@@ -126,9 +118,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
getToolCallCommand: vi.fn(() => opts.toolCallCommand),
getMcpServerCommand: vi.fn(() => opts.mcpServerCommand),
getMcpServers: vi.fn(() => opts.mcpServers),
getPromptRegistry: vi.fn(),
getExtensions: vi.fn(() => []),
getBlockedMcpServers: vi.fn(() => []),
getUserAgent: vi.fn(() => opts.userAgent || 'test-agent'),
getUserMemory: vi.fn(() => opts.userMemory || ''),
setUserMemory: vi.fn(),
@@ -140,29 +129,19 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
getShowMemoryUsage: vi.fn(() => opts.showMemoryUsage ?? false),
getAccessibility: vi.fn(() => opts.accessibility ?? {}),
getProjectRoot: vi.fn(() => opts.targetDir),
getGeminiClient: vi.fn(() => ({
getUserTier: vi.fn(),
})),
getGeminiClient: vi.fn(() => ({})),
getCheckpointingEnabled: vi.fn(() => opts.checkpointing ?? true),
getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']),
setFlashFallbackHandler: vi.fn(),
getSessionId: vi.fn(() => 'test-session-id'),
getUserTier: vi.fn().mockResolvedValue(undefined),
getIdeMode: vi.fn(() => false),
};
});
const ideContextMock = {
getOpenFilesContext: vi.fn(),
subscribeToOpenFiles: vi.fn(() => vi.fn()), // subscribe returns an unsubscribe function
};
return {
...actualCore,
Config: ConfigClassMock,
MCPServerConfig: actualCore.MCPServerConfig,
getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']),
ideContext: ideContextMock,
};
});
@@ -193,14 +172,6 @@ vi.mock('./hooks/useLogger', () => ({
})),
}));
vi.mock('./hooks/useConsoleMessages.js', () => ({
useConsoleMessages: vi.fn(() => ({
consoleMessages: [],
handleNewMessage: vi.fn(),
clearConsoleMessages: vi.fn(),
})),
}));
vi.mock('../config/config.js', async (importOriginal) => {
const actual = await importOriginal();
return {
@@ -242,7 +213,7 @@ describe('App UI', () => {
settings: settings.user || {},
};
const workspaceSettingsFile: SettingsFile = {
path: '/workspace/.gemini/settings.json',
path: '/workspace/.qwen/settings.json',
settings: settings.workspace || {},
};
return new LoadedSettings(
@@ -277,7 +248,6 @@ describe('App UI', () => {
// Ensure a theme is set so the theme dialog does not appear.
mockSettings = createMockSettings({ workspace: { theme: 'Default' } });
vi.mocked(ideContext.getOpenFilesContext).mockReturnValue(undefined);
});
afterEach(() => {
@@ -288,68 +258,8 @@ describe('App UI', () => {
vi.clearAllMocks(); // Clear mocks after each test
});
it('should display active file when available', async () => {
vi.mocked(ideContext.getOpenFilesContext).mockReturnValue({
activeFile: '/path/to/my-file.ts',
recentOpenFiles: [{ filePath: '/path/to/my-file.ts', content: 'hello' }],
selectedText: 'hello',
});
const { lastFrame, unmount } = render(
<App
config={mockConfig as unknown as ServerConfig}
settings={mockSettings}
version={mockVersion}
/>,
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain('1 recent file (ctrl+e to view)');
});
it('should not display active file when not available', async () => {
vi.mocked(ideContext.getOpenFilesContext).mockReturnValue({
activeFile: '',
});
const { lastFrame, unmount } = render(
<App
config={mockConfig as unknown as ServerConfig}
settings={mockSettings}
version={mockVersion}
/>,
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).not.toContain('Open File');
});
it('should display active file and other context', async () => {
vi.mocked(ideContext.getOpenFilesContext).mockReturnValue({
activeFile: '/path/to/my-file.ts',
recentOpenFiles: [{ filePath: '/path/to/my-file.ts', content: 'hello' }],
selectedText: 'hello',
});
mockConfig.getGeminiMdFileCount.mockReturnValue(1);
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']);
const { lastFrame, unmount } = render(
<App
config={mockConfig as unknown as ServerConfig}
settings={mockSettings}
version={mockVersion}
/>,
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain(
'Using: 1 recent file (ctrl+e to view) | 1 GEMINI.md file',
);
});
it('should display default "GEMINI.md" in footer when contextFileName is not set and count is 1', async () => {
mockConfig.getGeminiMdFileCount.mockReturnValue(1);
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']);
// For this test, ensure showMemoryUsage is false or debugMode is false if it relies on that
mockConfig.getDebugMode.mockReturnValue(false);
mockConfig.getShowMemoryUsage.mockReturnValue(false);
@@ -363,15 +273,11 @@ describe('App UI', () => {
);
currentUnmount = unmount;
await Promise.resolve(); // Wait for any async updates
expect(lastFrame()).toContain('Using: 1 GEMINI.md file');
expect(lastFrame()).toContain('Using 1 GEMINI.md file');
});
it('should display default "GEMINI.md" with plural when contextFileName is not set and count is > 1', async () => {
mockConfig.getGeminiMdFileCount.mockReturnValue(2);
mockConfig.getAllGeminiMdFilenames.mockReturnValue([
'GEMINI.md',
'GEMINI.md',
]);
mockConfig.getDebugMode.mockReturnValue(false);
mockConfig.getShowMemoryUsage.mockReturnValue(false);
@@ -384,7 +290,7 @@ describe('App UI', () => {
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain('Using: 2 GEMINI.md files');
expect(lastFrame()).toContain('Using 2 GEMINI.md files');
});
it('should display custom contextFileName in footer when set and count is 1', async () => {
@@ -392,7 +298,6 @@ describe('App UI', () => {
workspace: { contextFileName: 'AGENTS.md', theme: 'Default' },
});
mockConfig.getGeminiMdFileCount.mockReturnValue(1);
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['AGENTS.md']);
mockConfig.getDebugMode.mockReturnValue(false);
mockConfig.getShowMemoryUsage.mockReturnValue(false);
@@ -405,7 +310,7 @@ describe('App UI', () => {
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain('Using: 1 AGENTS.md file');
expect(lastFrame()).toContain('Using 1 AGENTS.md file');
});
it('should display a generic message when multiple context files with different names are provided', async () => {
@@ -416,10 +321,6 @@ describe('App UI', () => {
},
});
mockConfig.getGeminiMdFileCount.mockReturnValue(2);
mockConfig.getAllGeminiMdFilenames.mockReturnValue([
'AGENTS.md',
'CONTEXT.md',
]);
mockConfig.getDebugMode.mockReturnValue(false);
mockConfig.getShowMemoryUsage.mockReturnValue(false);
@@ -432,7 +333,7 @@ describe('App UI', () => {
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain('Using: 2 context files');
expect(lastFrame()).toContain('Using 2 context files');
});
it('should display custom contextFileName with plural when set and count is > 1', async () => {
@@ -440,11 +341,6 @@ describe('App UI', () => {
workspace: { contextFileName: 'MY_NOTES.TXT', theme: 'Default' },
});
mockConfig.getGeminiMdFileCount.mockReturnValue(3);
mockConfig.getAllGeminiMdFilenames.mockReturnValue([
'MY_NOTES.TXT',
'MY_NOTES.TXT',
'MY_NOTES.TXT',
]);
mockConfig.getDebugMode.mockReturnValue(false);
mockConfig.getShowMemoryUsage.mockReturnValue(false);
@@ -457,7 +353,7 @@ describe('App UI', () => {
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain('Using: 3 MY_NOTES.TXT files');
expect(lastFrame()).toContain('Using 3 MY_NOTES.TXT files');
});
it('should not display context file message if count is 0, even if contextFileName is set', async () => {
@@ -465,7 +361,6 @@ describe('App UI', () => {
workspace: { contextFileName: 'ANY_FILE.MD', theme: 'Default' },
});
mockConfig.getGeminiMdFileCount.mockReturnValue(0);
mockConfig.getAllGeminiMdFilenames.mockReturnValue([]);
mockConfig.getDebugMode.mockReturnValue(false);
mockConfig.getShowMemoryUsage.mockReturnValue(false);
@@ -483,10 +378,6 @@ describe('App UI', () => {
it('should display GEMINI.md and MCP server count when both are present', async () => {
mockConfig.getGeminiMdFileCount.mockReturnValue(2);
mockConfig.getAllGeminiMdFilenames.mockReturnValue([
'GEMINI.md',
'GEMINI.md',
]);
mockConfig.getMcpServers.mockReturnValue({
server1: {} as MCPServerConfig,
});
@@ -502,12 +393,11 @@ describe('App UI', () => {
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain('1 MCP server');
expect(lastFrame()).toContain('server');
});
it('should display only MCP server count when GEMINI.md count is 0', async () => {
mockConfig.getGeminiMdFileCount.mockReturnValue(0);
mockConfig.getAllGeminiMdFilenames.mockReturnValue([]);
mockConfig.getMcpServers.mockReturnValue({
server1: {} as MCPServerConfig,
server2: {} as MCPServerConfig,
@@ -524,7 +414,7 @@ describe('App UI', () => {
);
currentUnmount = unmount;
await Promise.resolve();
expect(lastFrame()).toContain('Using: 2 MCP servers (ctrl+t to view)');
expect(lastFrame()).toContain('Using 2 MCP servers');
});
it('should display Tips component by default', async () => {
@@ -637,7 +527,7 @@ describe('App UI', () => {
);
currentUnmount = unmount;
expect(lastFrame()).toContain("I'm Feeling Lucky (esc to cancel");
expect(lastFrame()).toContain('Select Theme');
});
it('should display a message if NO_COLOR is set', async () => {
@@ -652,43 +542,13 @@ describe('App UI', () => {
);
currentUnmount = unmount;
expect(lastFrame()).toContain("I'm Feeling Lucky (esc to cancel");
expect(lastFrame()).toContain(
'Theme configuration unavailable due to NO_COLOR env variable.',
);
expect(lastFrame()).not.toContain('Select Theme');
});
});
it('should render the initial UI correctly', () => {
const { lastFrame, unmount } = render(
<App
config={mockConfig as unknown as ServerConfig}
settings={mockSettings}
version={mockVersion}
/>,
);
currentUnmount = unmount;
expect(lastFrame()).toMatchSnapshot();
});
it('should render correctly with the prompt input box', () => {
vi.mocked(useGeminiStream).mockReturnValue({
streamingState: StreamingState.Idle,
submitQuery: vi.fn(),
initError: null,
pendingHistoryItems: [],
thought: null,
});
const { lastFrame, unmount } = render(
<App
config={mockConfig as unknown as ServerConfig}
settings={mockSettings}
version={mockVersion}
/>,
);
currentUnmount = unmount;
expect(lastFrame()).toMatchSnapshot();
});
describe('with initial prompt from --prompt-interactive', () => {
it('should submit the initial prompt automatically', async () => {
const mockSubmitQuery = vi.fn();
@@ -705,7 +565,6 @@ describe('App UI', () => {
mockConfig.getGeminiClient.mockReturnValue({
isInitialized: vi.fn(() => true),
getUserTier: vi.fn(),
} as unknown as GeminiClient);
const { unmount, rerender } = render(
@@ -733,35 +592,4 @@ describe('App UI', () => {
);
});
});
describe('errorCount', () => {
it('should correctly sum the counts of error messages', async () => {
const mockConsoleMessages: ConsoleMessageItem[] = [
{ type: 'error', content: 'First error', count: 1 },
{ type: 'log', content: 'some log', count: 1 },
{ type: 'error', content: 'Second error', count: 3 },
{ type: 'warn', content: 'a warning', count: 1 },
{ type: 'error', content: 'Third error', count: 1 },
];
vi.mocked(useConsoleMessages).mockReturnValue({
consoleMessages: mockConsoleMessages,
handleNewMessage: vi.fn(),
clearConsoleMessages: vi.fn(),
});
const { lastFrame, unmount } = render(
<App
config={mockConfig as unknown as ServerConfig}
settings={mockSettings}
version={mockVersion}
/>,
);
currentUnmount = unmount;
await Promise.resolve();
// Total error count should be 1 + 3 + 1 = 5
expect(lastFrame()).toContain('5 errors');
});
});
});

View File

@@ -36,7 +36,6 @@ import { ThemeDialog } from './components/ThemeDialog.js';
import { AuthDialog } from './components/AuthDialog.js';
import { AuthInProgress } from './components/AuthInProgress.js';
import { EditorSettingsDialog } from './components/EditorSettingsDialog.js';
import { ShellConfirmationDialog } from './components/ShellConfirmationDialog.js';
import { Colors } from './colors.js';
import { Help } from './components/Help.js';
import { loadHierarchicalGeminiMemory } from '../config/config.js';
@@ -47,7 +46,6 @@ import { registerCleanup } from '../utils/cleanup.js';
import { DetailedMessagesDisplay } from './components/DetailedMessagesDisplay.js';
import { HistoryItemDisplay } from './components/HistoryItemDisplay.js';
import { ContextSummaryDisplay } from './components/ContextSummaryDisplay.js';
import { IDEContextDetailDisplay } from './components/IDEContextDetailDisplay.js';
import { useHistory } from './hooks/useHistoryManager.js';
import process from 'node:process';
import {
@@ -59,9 +57,6 @@ import {
EditorType,
FlashFallbackEvent,
logFlashFallback,
AuthType,
type OpenFiles,
ideContext,
} from '@qwen-code/qwen-code-core';
import { validateAuthMethod } from '../config/auth.js';
import { useLogger } from './hooks/useLogger.js';
@@ -71,11 +66,8 @@ import {
useSessionStats,
} from './contexts/SessionContext.js';
import { useGitBranchName } from './hooks/useGitBranchName.js';
import { useFocus } from './hooks/useFocus.js';
import { useBracketedPaste } from './hooks/useBracketedPaste.js';
import { useTextBuffer } from './components/shared/text-buffer.js';
import { useVimMode, VimModeProvider } from './contexts/VimModeContext.js';
import { useVim } from './hooks/vim.js';
import * as fs from 'fs';
import { UpdateNotification } from './components/UpdateNotification.js';
import {
@@ -88,7 +80,6 @@ import ansiEscapes from 'ansi-escapes';
import { OverflowProvider } from './contexts/OverflowContext.js';
import { ShowMoreLines } from './components/ShowMoreLines.js';
import { PrivacyNotice } from './privacy/PrivacyNotice.js';
import { appEvents, AppEvent } from '../utils/events.js';
const CTRL_EXIT_PROMPT_DURATION_MS = 1000;
@@ -101,14 +92,11 @@ interface AppProps {
export const AppWrapper = (props: AppProps) => (
<SessionStatsProvider>
<VimModeProvider settings={props.settings}>
<App {...props} />
</VimModeProvider>
<App {...props} />
</SessionStatsProvider>
);
const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
const isFocused = useFocus();
useBracketedPaste();
const [updateMessage, setUpdateMessage] = useState<string | null>(null);
const { stdout } = useStdout();
@@ -155,8 +143,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
const [showErrorDetails, setShowErrorDetails] = useState<boolean>(false);
const [showToolDescriptions, setShowToolDescriptions] =
useState<boolean>(false);
const [showIDEContextDetail, setShowIDEContextDetail] =
useState<boolean>(false);
const [ctrlCPressedOnce, setCtrlCPressedOnce] = useState(false);
const [quittingMessages, setQuittingMessages] = useState<
HistoryItem[] | null
@@ -169,37 +155,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
const [modelSwitchedFromQuotaError, setModelSwitchedFromQuotaError] =
useState<boolean>(false);
const [userTier, setUserTier] = useState<UserTierId | undefined>(undefined);
const [openFiles, setOpenFiles] = useState<OpenFiles | undefined>();
const [isProcessing, setIsProcessing] = useState<boolean>(false);
useEffect(() => {
const unsubscribe = ideContext.subscribeToOpenFiles(setOpenFiles);
// Set the initial value
setOpenFiles(ideContext.getOpenFilesContext());
return unsubscribe;
}, []);
useEffect(() => {
const openDebugConsole = () => {
setShowErrorDetails(true);
setConstrainHeight(false); // Make sure the user sees the full message.
};
appEvents.on(AppEvent.OpenDebugConsole, openDebugConsole);
const logErrorHandler = (errorMessage: unknown) => {
handleNewMessage({
type: 'error',
content: String(errorMessage),
count: 1,
});
};
appEvents.on(AppEvent.LogError, logErrorHandler);
return () => {
appEvents.off(AppEvent.OpenDebugConsole, openDebugConsole);
appEvents.off(AppEvent.LogError, logErrorHandler);
};
}, [handleNewMessage]);
const openPrivacyNotice = useCallback(() => {
setShowPrivacyNotice(true);
@@ -207,10 +162,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
const initialPromptSubmitted = useRef(false);
const errorCount = useMemo(
() =>
consoleMessages
.filter((msg) => msg.type === 'error')
.reduce((total, msg) => total + msg.count, 0),
() => consoleMessages.filter((msg) => msg.type === 'error').length,
[consoleMessages],
);
@@ -241,11 +193,26 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
// Sync user tier from config when authentication changes
useEffect(() => {
const syncUserTier = async () => {
try {
const configUserTier = await config.getUserTier();
if (configUserTier !== userTier) {
setUserTier(configUserTier);
}
} catch (error) {
// Silently fail - this is not critical functionality
// Only log in debug mode to avoid cluttering the console
if (config.getDebugMode()) {
console.debug('Failed to sync user tier:', error);
}
}
};
// Only sync when not currently authenticating
if (!isAuthenticating) {
setUserTier(config.getGeminiClient()?.getUserTier());
syncUserTier();
}
}, [config, isAuthenticating]);
}, [config, userTier, isAuthenticating]);
const {
isEditorDialogOpen,
@@ -271,11 +238,8 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
process.cwd(),
config.getDebugMode(),
config.getFileService(),
settings.merged,
config.getExtensionContextFilePaths(),
config.getFileFilteringOptions(),
);
config.setUserMemory(memoryContent);
config.setGeminiMdFileCount(fileCount);
setGeminiMdFileCount(fileCount);
@@ -303,7 +267,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
);
console.error('Error refreshing memory:', error);
}
}, [config, addItem, settings.merged]);
}, [config, addItem]);
// Watch for model changes (e.g., from Flash fallback)
useEffect(() => {
@@ -330,70 +294,64 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
): Promise<boolean> => {
let message: string;
if (
config.getContentGeneratorConfig().authType ===
AuthType.LOGIN_WITH_GOOGLE
) {
// Use actual user tier if available; otherwise, default to FREE tier behavior (safe default)
const isPaidTier =
userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD;
// Use actual user tier if available, otherwise default to FREE tier behavior (safe default)
const isPaidTier =
userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD;
// Check if this is a Pro quota exceeded error
if (error && isProQuotaExceededError(error)) {
if (isPaidTier) {
message = `⚡ You have reached your daily ${currentModel} quota limit.
// Check if this is a Pro quota exceeded error
if (error && isProQuotaExceededError(error)) {
if (isPaidTier) {
message = `⚡ You have reached your daily ${currentModel} quota limit.
⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session.
⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `⚡ You have reached your daily ${currentModel} quota limit.
⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session.
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else if (error && isGenericQuotaExceededError(error)) {
if (isPaidTier) {
message = `⚡ You have reached your daily quota limit.
⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session.
⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `⚡ You have reached your daily quota limit.
⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session.
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else {
if (isPaidTier) {
// Default fallback message for other cases (like consecutive 429s)
message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session.
message = `⚡ You have reached your daily ${currentModel} quota limit.
⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session.
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else if (error && isGenericQuotaExceededError(error)) {
if (isPaidTier) {
message = `⚡ You have reached your daily quota limit.
⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session.
⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
message = `⚡ You have reached your daily quota limit.
⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session.
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
} else {
if (isPaidTier) {
// Default fallback message for other cases (like consecutive 429s)
message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session.
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${currentModel} quota limit
⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
} else {
// Default fallback message for other cases (like consecutive 429s)
message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session.
} else {
// Default fallback message for other cases (like consecutive 429s)
message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session.
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${currentModel} quota limit
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
⚡ You can switch authentication methods by typing /auth`;
}
}
// Add message to UI history
addItem(
{
type: MessageType.INFO,
text: message,
},
Date.now(),
);
// Set the flag to prevent tool continuation
setModelSwitchedFromQuotaError(true);
// Set global quota error flag to prevent Flash model calls
config.setQuotaErrorOccurred(true);
}
// Add message to UI history
addItem(
{
type: MessageType.INFO,
text: message,
},
Date.now(),
);
// Set the flag to prevent tool continuation
setModelSwitchedFromQuotaError(true);
// Set global quota error flag to prevent Flash model calls
config.setQuotaErrorOccurred(true);
// Switch model for future use but return false to stop current retry
config.setModel(fallbackModel);
logFlashFallback(
@@ -406,58 +364,15 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
config.setFlashFallbackHandler(flashFallbackHandler);
}, [config, addItem, userTier]);
// Terminal and UI setup
const { rows: terminalHeight, columns: terminalWidth } = useTerminalSize();
const { stdin, setRawMode } = useStdin();
const isInitialMount = useRef(true);
const widthFraction = 0.9;
const inputWidth = Math.max(
20,
Math.floor(terminalWidth * widthFraction) - 3,
);
const suggestionsWidth = Math.max(60, Math.floor(terminalWidth * 0.8));
// Utility callbacks
const isValidPath = useCallback((filePath: string): boolean => {
try {
return fs.existsSync(filePath) && fs.statSync(filePath).isFile();
} catch (_e) {
return false;
}
}, []);
const getPreferredEditor = useCallback(() => {
const editorType = settings.merged.preferredEditor;
const isValidEditor = isEditorAvailable(editorType);
if (!isValidEditor) {
openEditorDialog();
return;
}
return editorType as EditorType;
}, [settings, openEditorDialog]);
const onAuthError = useCallback(() => {
setAuthError('reauth required');
openAuthDialog();
}, [openAuthDialog, setAuthError]);
// Core hooks and processors
const {
vimEnabled: vimModeEnabled,
vimMode,
toggleVimEnabled,
} = useVimMode();
const {
handleSlashCommand,
slashCommands,
pendingHistoryItems: pendingSlashCommandHistoryItems,
commandContext,
shellConfirmationRequest,
} = useSlashCommandProcessor(
config,
settings,
history,
addItem,
clearItems,
loadHistory,
@@ -468,44 +383,29 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
openAuthDialog,
openEditorDialog,
toggleCorgiMode,
showToolDescriptions,
setQuittingMessages,
openPrivacyNotice,
toggleVimEnabled,
setIsProcessing,
);
const pendingHistoryItems = [...pendingSlashCommandHistoryItems];
const {
streamingState,
submitQuery,
initError,
pendingHistoryItems: pendingGeminiHistoryItems,
thought,
} = useGeminiStream(
config.getGeminiClient(),
history,
addItem,
setShowHelp,
config,
setDebugMessage,
handleSlashCommand,
shellModeActive,
getPreferredEditor,
onAuthError,
performMemoryRefresh,
modelSwitchedFromQuotaError,
setModelSwitchedFromQuotaError,
);
const { rows: terminalHeight, columns: terminalWidth } = useTerminalSize();
const isInitialMount = useRef(true);
const { stdin, setRawMode } = useStdin();
const isValidPath = useCallback((filePath: string): boolean => {
try {
return fs.existsSync(filePath) && fs.statSync(filePath).isFile();
} catch (_e) {
return false;
}
}, []);
// Input handling
const handleFinalSubmit = useCallback(
(submittedValue: string) => {
const trimmedValue = submittedValue.trim();
if (trimmedValue.length > 0) {
submitQuery(trimmedValue);
}
},
[submitQuery],
const widthFraction = 0.9;
const inputWidth = Math.max(
20,
Math.floor(terminalWidth * widthFraction) - 3,
);
const suggestionsWidth = Math.max(60, Math.floor(terminalWidth * 0.8));
const buffer = useTextBuffer({
initialText: '',
@@ -516,14 +416,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
shellModeActive,
});
const { handleInput: vimHandleInput } = useVim(buffer, handleFinalSubmit);
const pendingHistoryItems = [...pendingSlashCommandHistoryItems];
pendingHistoryItems.push(...pendingGeminiHistoryItems);
const { elapsedTime, currentLoadingPhrase } =
useLoadingIndicator(streamingState);
const showAutoAcceptIndicator = useAutoAcceptIndicator({ config });
const handleExit = useCallback(
(
pressedOnce: boolean,
@@ -534,8 +426,15 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
if (timerRef.current) {
clearTimeout(timerRef.current);
}
// Directly invoke the central command handler.
handleSlashCommand('/quit');
const quitCommand = slashCommands.find(
(cmd) => cmd.name === 'quit' || cmd.altName === 'exit',
);
if (quitCommand && quitCommand.action) {
quitCommand.action(commandContext, '');
} else {
// This is unlikely to be needed but added for an additional fallback.
process.exit(0);
}
} else {
setPressedOnce(true);
timerRef.current = setTimeout(() => {
@@ -544,7 +443,8 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
}, CTRL_EXIT_PROMPT_DURATION_MS);
}
},
[handleSlashCommand],
// Add commandContext to the dependency array here!
[slashCommands, commandContext],
);
useInput((input: string, key: InkKeyType) => {
@@ -568,8 +468,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
if (Object.keys(mcpServers || {}).length > 0) {
handleSlashCommand(newValue ? '/mcp desc' : '/mcp nodesc');
}
} else if (key.ctrl && input === 'e' && ideContext) {
setShowIDEContextDetail((prev) => !prev);
} else if (key.ctrl && (input === 'c' || input === 'C')) {
handleExit(ctrlCPressedOnce, setCtrlCPressedOnce, ctrlCTimerRef);
} else if (key.ctrl && (input === 'd' || input === 'D')) {
@@ -589,6 +487,57 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
}
}, [config]);
const getPreferredEditor = useCallback(() => {
const editorType = settings.merged.preferredEditor;
const isValidEditor = isEditorAvailable(editorType);
if (!isValidEditor) {
openEditorDialog();
return;
}
return editorType as EditorType;
}, [settings, openEditorDialog]);
const onAuthError = useCallback(() => {
setAuthError('reauth required');
openAuthDialog();
}, [openAuthDialog, setAuthError]);
const {
streamingState,
submitQuery,
initError,
pendingHistoryItems: pendingGeminiHistoryItems,
thought,
} = useGeminiStream(
config.getGeminiClient(),
history,
addItem,
setShowHelp,
config,
setDebugMessage,
handleSlashCommand,
shellModeActive,
getPreferredEditor,
onAuthError,
performMemoryRefresh,
modelSwitchedFromQuotaError,
setModelSwitchedFromQuotaError,
);
pendingHistoryItems.push(...pendingGeminiHistoryItems);
const { elapsedTime, currentLoadingPhrase } =
useLoadingIndicator(streamingState);
const showAutoAcceptIndicator = useAutoAcceptIndicator({ config });
const handleFinalSubmit = useCallback(
(submittedValue: string) => {
const trimmedValue = submittedValue.trim();
if (trimmedValue.length > 0) {
submitQuery(trimmedValue);
}
},
[submitQuery],
);
const logger = useLogger();
const [userMessages, setUserMessages] = useState<string[]>([]);
@@ -628,8 +577,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
fetchUserMessages();
}, [history, logger]);
const isInputActive =
streamingState === StreamingState.Idle && !initError && !isProcessing;
const isInputActive = streamingState === StreamingState.Idle && !initError;
const handleClearScreen = useCallback(() => {
clearItems();
@@ -747,13 +695,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
// Arbitrary threshold to ensure that items in the static area are large
// enough but not too large to make the terminal hard to use.
const staticAreaMaxItemHeight = Math.max(terminalHeight * 4, 100);
const placeholder = vimModeEnabled
? " Press 'i' for INSERT mode and 'Esc' for NORMAL mode."
: ' Type your message or @path/to/file';
return (
<StreamingContext.Provider value={streamingState}>
<Box flexDirection="column" width="90%">
<Box flexDirection="column" marginBottom={1} width="90%">
{/* Move UpdateNotification outside Static so it can re-render when updateMessage changes */}
{updateMessage && <UpdateNotification message={updateMessage} />}
@@ -835,9 +779,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
</Box>
)}
{shellConfirmationRequest ? (
<ShellConfirmationDialog request={shellConfirmationRequest} />
) : isThemeDialogOpen ? (
{isThemeDialogOpen ? (
<Box flexDirection="column">
{themeError && (
<Box marginBottom={1}>
@@ -922,7 +864,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
}
elapsedTime={elapsedTime}
/>
<Box
marginTop={1}
display="flex"
@@ -943,11 +884,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
</Text>
) : (
<ContextSummaryDisplay
openFiles={openFiles}
geminiMdFileCount={geminiMdFileCount}
contextFileNames={contextFileNames}
mcpServers={config.getMcpServers()}
blockedMcpServers={config.getBlockedMcpServers()}
showToolDescriptions={showToolDescriptions}
/>
)}
@@ -962,9 +901,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
{shellModeActive && <ShellModeIndicator />}
</Box>
</Box>
{showIDEContextDetail && (
<IDEContextDetailDisplay openFiles={openFiles} />
)}
{showErrorDetails && (
<OverflowProvider>
<Box flexDirection="column">
@@ -993,9 +930,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
commandContext={commandContext}
shellModeActive={shellModeActive}
setShellModeActive={setShellModeActive}
focus={isFocused}
vimHandleInput={vimHandleInput}
placeholder={placeholder}
/>
)}
</>
@@ -1047,7 +981,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
}
promptTokenCount={sessionStats.lastPromptTokenCount}
nightly={nightly}
vimMode={vimModeEnabled ? vimMode : undefined}
/>
</Box>
</Box>

View File

@@ -1,18 +0,0 @@
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
exports[`App UI > should render correctly with the prompt input box 1`] = `
"
╭────────────────────────────────────────────────────────────────────────────────────────╮
│ > Type your message or @path/to/file │
╰────────────────────────────────────────────────────────────────────────────────────────╯
/test/dir no sandbox (see /docs) model (100% context left)"
`;
exports[`App UI > should render the initial UI correctly 1`] = `
"
I'm Feeling Lucky (esc to cancel, 0s)
/test/dir no sandbox (see /docs) model (100% context left)"
`;

View File

@@ -38,12 +38,6 @@ export const Colors: ColorsTheme = {
get AccentRed() {
return themeManager.getActiveTheme().colors.AccentRed;
},
get DiffAdded() {
return themeManager.getActiveTheme().colors.DiffAdded;
},
get DiffRemoved() {
return themeManager.getActiveTheme().colors.DiffRemoved;
},
get Comment() {
return themeManager.getActiveTheme().colors.Comment;
},

View File

@@ -5,14 +5,13 @@
*/
import { getCliVersion } from '../../utils/version.js';
import { CommandKind, SlashCommand } from './types.js';
import { SlashCommand } from './types.js';
import process from 'node:process';
import { MessageType, type HistoryItemAbout } from '../types.js';
export const aboutCommand: SlashCommand = {
name: 'about',
description: 'show version info',
kind: CommandKind.BUILT_IN,
action: async (context) => {
const osVersion = process.platform;
let sandboxEnv = 'no sandbox';

View File

@@ -4,12 +4,11 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { CommandKind, OpenDialogActionReturn, SlashCommand } from './types.js';
import { OpenDialogActionReturn, SlashCommand } from './types.js';
export const authCommand: SlashCommand = {
name: 'auth',
description: 'change the auth method',
kind: CommandKind.BUILT_IN,
action: (_context, _args): OpenDialogActionReturn => ({
type: 'dialog',
dialog: 'auth',

View File

@@ -1,98 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import open from 'open';
import { bugCommand } from './bugCommand.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { getCliVersion } from '../../utils/version.js';
import { GIT_COMMIT_INFO } from '../../generated/git-commit.js';
import { formatMemoryUsage } from '../utils/formatters.js';
// Mock dependencies
vi.mock('open');
vi.mock('../../utils/version.js');
vi.mock('../utils/formatters.js');
vi.mock('node:process', () => ({
default: {
platform: 'test-platform',
version: 'v20.0.0',
// Keep other necessary process properties if needed by other parts of the code
env: process.env,
memoryUsage: () => ({ rss: 0 }),
},
}));
describe('bugCommand', () => {
beforeEach(() => {
vi.mocked(getCliVersion).mockResolvedValue('0.1.0');
vi.mocked(formatMemoryUsage).mockReturnValue('100 MB');
vi.stubEnv('SANDBOX', 'qwen-test');
});
afterEach(() => {
vi.unstubAllEnvs();
vi.clearAllMocks();
});
it('should generate the default GitHub issue URL', async () => {
const mockContext = createMockCommandContext({
services: {
config: {
getModel: () => 'qwen3-coder-plus',
getBugCommand: () => undefined,
},
},
});
if (!bugCommand.action) throw new Error('Action is not defined');
await bugCommand.action(mockContext, 'A test bug');
const expectedInfo = `
* **CLI Version:** 0.1.0
* **Git Commit:** ${GIT_COMMIT_INFO}
* **Operating System:** test-platform v20.0.0
* **Sandbox Environment:** test
* **Model Version:** qwen3-coder-plus
* **Memory Usage:** 100 MB
`;
const expectedUrl =
'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title=A%20test%20bug&info=' +
encodeURIComponent(expectedInfo);
expect(open).toHaveBeenCalledWith(expectedUrl);
});
it('should use a custom URL template from config if provided', async () => {
const customTemplate =
'https://internal.bug-tracker.com/new?desc={title}&details={info}';
const mockContext = createMockCommandContext({
services: {
config: {
getModel: () => 'qwen3-coder-plus',
getBugCommand: () => ({ urlTemplate: customTemplate }),
},
},
});
if (!bugCommand.action) throw new Error('Action is not defined');
await bugCommand.action(mockContext, 'A custom bug');
const expectedInfo = `
* **CLI Version:** 0.1.0
* **Git Commit:** ${GIT_COMMIT_INFO}
* **Operating System:** test-platform v20.0.0
* **Sandbox Environment:** test
* **Model Version:** qwen3-coder-plus
* **Memory Usage:** 100 MB
`;
const expectedUrl = customTemplate
.replace('{title}', encodeURIComponent('A custom bug'))
.replace('{info}', encodeURIComponent(expectedInfo));
expect(open).toHaveBeenCalledWith(expectedUrl);
});
});

View File

@@ -1,83 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import open from 'open';
import process from 'node:process';
import {
type CommandContext,
type SlashCommand,
CommandKind,
} from './types.js';
import { MessageType } from '../types.js';
import { GIT_COMMIT_INFO } from '../../generated/git-commit.js';
import { formatMemoryUsage } from '../utils/formatters.js';
import { getCliVersion } from '../../utils/version.js';
export const bugCommand: SlashCommand = {
name: 'bug',
description: 'submit a bug report',
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext, args?: string): Promise<void> => {
const bugDescription = (args || '').trim();
const { config } = context.services;
const osVersion = `${process.platform} ${process.version}`;
let sandboxEnv = 'no sandbox';
if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') {
sandboxEnv = process.env.SANDBOX.replace(/^qwen-(?:code-)?/, '');
} else if (process.env.SANDBOX === 'sandbox-exec') {
sandboxEnv = `sandbox-exec (${
process.env.SEATBELT_PROFILE || 'unknown'
})`;
}
const modelVersion = config?.getModel() || 'Unknown';
const cliVersion = await getCliVersion();
const memoryUsage = formatMemoryUsage(process.memoryUsage().rss);
const info = `
* **CLI Version:** ${cliVersion}
* **Git Commit:** ${GIT_COMMIT_INFO}
* **Operating System:** ${osVersion}
* **Sandbox Environment:** ${sandboxEnv}
* **Model Version:** ${modelVersion}
* **Memory Usage:** ${memoryUsage}
`;
let bugReportUrl =
'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title={title}&info={info}';
const bugCommandSettings = config?.getBugCommand();
if (bugCommandSettings?.urlTemplate) {
bugReportUrl = bugCommandSettings.urlTemplate;
}
bugReportUrl = bugReportUrl
.replace('{title}', encodeURIComponent(bugDescription))
.replace('{info}', encodeURIComponent(info));
context.ui.addItem(
{
type: MessageType.INFO,
text: `To submit your bug report, please open the following URL in your browser:\n${bugReportUrl}`,
},
Date.now(),
);
try {
await open(bugReportUrl);
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
context.ui.addItem(
{
type: MessageType.ERROR,
text: `Could not open URL in browser: ${errorMessage}`,
},
Date.now(),
);
}
},
};

View File

@@ -1,300 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
vi,
describe,
it,
expect,
beforeEach,
afterEach,
Mocked,
} from 'vitest';
import {
type CommandContext,
MessageActionReturn,
SlashCommand,
} from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { Content } from '@google/genai';
import { GeminiClient } from '@qwen-code/qwen-code-core';
import * as fsPromises from 'fs/promises';
import { chatCommand } from './chatCommand.js';
import { Stats } from 'fs';
import { HistoryItemWithoutId } from '../types.js';
vi.mock('fs/promises', () => ({
stat: vi.fn(),
readdir: vi.fn().mockResolvedValue(['file1.txt', 'file2.txt'] as string[]),
}));
describe('chatCommand', () => {
const mockFs = fsPromises as Mocked<typeof fsPromises>;
let mockContext: CommandContext;
let mockGetChat: ReturnType<typeof vi.fn>;
let mockSaveCheckpoint: ReturnType<typeof vi.fn>;
let mockLoadCheckpoint: ReturnType<typeof vi.fn>;
let mockGetHistory: ReturnType<typeof vi.fn>;
const getSubCommand = (name: 'list' | 'save' | 'resume'): SlashCommand => {
const subCommand = chatCommand.subCommands?.find(
(cmd) => cmd.name === name,
);
if (!subCommand) {
throw new Error(`/memory ${name} command not found.`);
}
return subCommand;
};
beforeEach(() => {
mockGetHistory = vi.fn().mockReturnValue([]);
mockGetChat = vi.fn().mockResolvedValue({
getHistory: mockGetHistory,
});
mockSaveCheckpoint = vi.fn().mockResolvedValue(undefined);
mockLoadCheckpoint = vi.fn().mockResolvedValue([]);
mockContext = createMockCommandContext({
services: {
config: {
getProjectTempDir: () => '/tmp/gemini',
getGeminiClient: () =>
({
getChat: mockGetChat,
}) as unknown as GeminiClient,
},
logger: {
saveCheckpoint: mockSaveCheckpoint,
loadCheckpoint: mockLoadCheckpoint,
initialize: vi.fn().mockResolvedValue(undefined),
},
},
});
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should have the correct main command definition', () => {
expect(chatCommand.name).toBe('chat');
expect(chatCommand.description).toBe('Manage conversation history.');
expect(chatCommand.subCommands).toHaveLength(3);
});
describe('list subcommand', () => {
let listCommand: SlashCommand;
beforeEach(() => {
listCommand = getSubCommand('list');
});
it('should inform when no checkpoints are found', async () => {
mockFs.readdir.mockImplementation(
(async (_: string): Promise<string[]> =>
[] as string[]) as unknown as typeof fsPromises.readdir,
);
const result = await listCommand?.action?.(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'No saved conversation checkpoints found.',
});
});
it('should list found checkpoints', async () => {
const fakeFiles = ['checkpoint-test1.json', 'checkpoint-test2.json'];
const date = new Date();
mockFs.readdir.mockImplementation(
(async (_: string): Promise<string[]> =>
fakeFiles as string[]) as unknown as typeof fsPromises.readdir,
);
mockFs.stat.mockImplementation((async (path: string): Promise<Stats> => {
if (path.endsWith('test1.json')) {
return { mtime: date } as Stats;
}
return { mtime: new Date(date.getTime() + 1000) } as Stats;
}) as unknown as typeof fsPromises.stat);
const result = (await listCommand?.action?.(
mockContext,
'',
)) as MessageActionReturn;
const content = result?.content ?? '';
expect(result?.type).toBe('message');
expect(content).toContain('List of saved conversations:');
const isoDate = date
.toISOString()
.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/);
const formattedDate = isoDate ? `${isoDate[1]} ${isoDate[2]}` : '';
expect(content).toContain(formattedDate);
const index1 = content.indexOf('- \u001b[36mtest1\u001b[0m');
const index2 = content.indexOf('- \u001b[36mtest2\u001b[0m');
expect(index1).toBeGreaterThanOrEqual(0);
expect(index2).toBeGreaterThan(index1);
});
it('should handle invalid date formats gracefully', async () => {
const fakeFiles = ['checkpoint-baddate.json'];
const badDate = {
toISOString: () => 'an-invalid-date-string',
} as Date;
mockFs.readdir.mockResolvedValue(fakeFiles);
mockFs.stat.mockResolvedValue({ mtime: badDate } as Stats);
const result = (await listCommand?.action?.(
mockContext,
'',
)) as MessageActionReturn;
const content = result?.content ?? '';
expect(content).toContain('(saved on Invalid Date)');
});
});
describe('save subcommand', () => {
let saveCommand: SlashCommand;
const tag = 'my-tag';
beforeEach(() => {
saveCommand = getSubCommand('save');
});
it('should return an error if tag is missing', async () => {
const result = await saveCommand?.action?.(mockContext, ' ');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Missing tag. Usage: /chat save <tag>',
});
});
it('should inform if conversation history is empty', async () => {
mockGetHistory.mockReturnValue([]);
const result = await saveCommand?.action?.(mockContext, tag);
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'No conversation found to save.',
});
});
it('should save the conversation', async () => {
const history: HistoryItemWithoutId[] = [
{
type: 'user',
text: 'hello',
},
];
mockGetHistory.mockReturnValue(history);
const result = await saveCommand?.action?.(mockContext, tag);
expect(mockSaveCheckpoint).toHaveBeenCalledWith(history, tag);
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: `Conversation checkpoint saved with tag: ${tag}.`,
});
});
});
describe('resume subcommand', () => {
const goodTag = 'good-tag';
const badTag = 'bad-tag';
let resumeCommand: SlashCommand;
beforeEach(() => {
resumeCommand = getSubCommand('resume');
});
it('should return an error if tag is missing', async () => {
const result = await resumeCommand?.action?.(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Missing tag. Usage: /chat resume <tag>',
});
});
it('should inform if checkpoint is not found', async () => {
mockLoadCheckpoint.mockResolvedValue([]);
const result = await resumeCommand?.action?.(mockContext, badTag);
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: `No saved checkpoint found with tag: ${badTag}.`,
});
});
it('should resume a conversation', async () => {
const conversation: Content[] = [
{ role: 'user', parts: [{ text: 'hello gemini' }] },
{ role: 'model', parts: [{ text: 'hello world' }] },
];
mockLoadCheckpoint.mockResolvedValue(conversation);
const result = await resumeCommand?.action?.(mockContext, goodTag);
expect(result).toEqual({
type: 'load_history',
history: [
{ type: 'user', text: 'hello gemini' },
{ type: 'gemini', text: 'hello world' },
] as HistoryItemWithoutId[],
clientHistory: conversation,
});
});
describe('completion', () => {
it('should provide completion suggestions', async () => {
const fakeFiles = ['checkpoint-alpha.json', 'checkpoint-beta.json'];
mockFs.readdir.mockImplementation(
(async (_: string): Promise<string[]> =>
fakeFiles as string[]) as unknown as typeof fsPromises.readdir,
);
mockFs.stat.mockImplementation(
(async (_: string): Promise<Stats> =>
({
mtime: new Date(),
}) as Stats) as unknown as typeof fsPromises.stat,
);
const result = await resumeCommand?.completion?.(mockContext, 'a');
expect(result).toEqual(['alpha']);
});
it('should suggest filenames sorted by modified time (newest first)', async () => {
const fakeFiles = ['checkpoint-test1.json', 'checkpoint-test2.json'];
const date = new Date();
mockFs.readdir.mockImplementation(
(async (_: string): Promise<string[]> =>
fakeFiles as string[]) as unknown as typeof fsPromises.readdir,
);
mockFs.stat.mockImplementation((async (
path: string,
): Promise<Stats> => {
if (path.endsWith('test1.json')) {
return { mtime: date } as Stats;
}
return { mtime: new Date(date.getTime() + 1000) } as Stats;
}) as unknown as typeof fsPromises.stat);
const result = await resumeCommand?.completion?.(mockContext, '');
// Sort items by last modified time (newest first)
expect(result).toEqual(['test2', 'test1']);
});
});
});
});

View File

@@ -1,214 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import * as fsPromises from 'fs/promises';
import {
CommandContext,
SlashCommand,
MessageActionReturn,
CommandKind,
} from './types.js';
import path from 'path';
import { HistoryItemWithoutId, MessageType } from '../types.js';
interface ChatDetail {
name: string;
mtime: Date;
}
const getSavedChatTags = async (
context: CommandContext,
mtSortDesc: boolean,
): Promise<ChatDetail[]> => {
const geminiDir = context.services.config?.getProjectTempDir();
if (!geminiDir) {
return [];
}
try {
const file_head = 'checkpoint-';
const file_tail = '.json';
const files = await fsPromises.readdir(geminiDir);
const chatDetails: Array<{ name: string; mtime: Date }> = [];
for (const file of files) {
if (file.startsWith(file_head) && file.endsWith(file_tail)) {
const filePath = path.join(geminiDir, file);
const stats = await fsPromises.stat(filePath);
chatDetails.push({
name: file.slice(file_head.length, -file_tail.length),
mtime: stats.mtime,
});
}
}
chatDetails.sort((a, b) =>
mtSortDesc
? b.mtime.getTime() - a.mtime.getTime()
: a.mtime.getTime() - b.mtime.getTime(),
);
return chatDetails;
} catch (_err) {
return [];
}
};
const listCommand: SlashCommand = {
name: 'list',
description: 'List saved conversation checkpoints',
kind: CommandKind.BUILT_IN,
action: async (context): Promise<MessageActionReturn> => {
const chatDetails = await getSavedChatTags(context, false);
if (chatDetails.length === 0) {
return {
type: 'message',
messageType: 'info',
content: 'No saved conversation checkpoints found.',
};
}
const maxNameLength = Math.max(
...chatDetails.map((chat) => chat.name.length),
);
let message = 'List of saved conversations:\n\n';
for (const chat of chatDetails) {
const paddedName = chat.name.padEnd(maxNameLength, ' ');
const isoString = chat.mtime.toISOString();
const match = isoString.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/);
const formattedDate = match ? `${match[1]} ${match[2]}` : 'Invalid Date';
message += ` - \u001b[36m${paddedName}\u001b[0m \u001b[90m(saved on ${formattedDate})\u001b[0m\n`;
}
message += `\n\u001b[90mNote: Newest last, oldest first\u001b[0m`;
return {
type: 'message',
messageType: 'info',
content: message,
};
},
};
const saveCommand: SlashCommand = {
name: 'save',
description:
'Save the current conversation as a checkpoint. Usage: /chat save <tag>',
kind: CommandKind.BUILT_IN,
action: async (context, args): Promise<MessageActionReturn> => {
const tag = args.trim();
if (!tag) {
return {
type: 'message',
messageType: 'error',
content: 'Missing tag. Usage: /chat save <tag>',
};
}
const { logger, config } = context.services;
await logger.initialize();
const chat = await config?.getGeminiClient()?.getChat();
if (!chat) {
return {
type: 'message',
messageType: 'error',
content: 'No chat client available to save conversation.',
};
}
const history = chat.getHistory();
if (history.length > 0) {
await logger.saveCheckpoint(history, tag);
return {
type: 'message',
messageType: 'info',
content: `Conversation checkpoint saved with tag: ${tag}.`,
};
} else {
return {
type: 'message',
messageType: 'info',
content: 'No conversation found to save.',
};
}
},
};
const resumeCommand: SlashCommand = {
name: 'resume',
altNames: ['load'],
description:
'Resume a conversation from a checkpoint. Usage: /chat resume <tag>',
kind: CommandKind.BUILT_IN,
action: async (context, args) => {
const tag = args.trim();
if (!tag) {
return {
type: 'message',
messageType: 'error',
content: 'Missing tag. Usage: /chat resume <tag>',
};
}
const { logger } = context.services;
await logger.initialize();
const conversation = await logger.loadCheckpoint(tag);
if (conversation.length === 0) {
return {
type: 'message',
messageType: 'info',
content: `No saved checkpoint found with tag: ${tag}.`,
};
}
const rolemap: { [key: string]: MessageType } = {
user: MessageType.USER,
model: MessageType.GEMINI,
};
const uiHistory: HistoryItemWithoutId[] = [];
let hasSystemPrompt = false;
let i = 0;
for (const item of conversation) {
i += 1;
const text =
item.parts
?.filter((m) => !!m.text)
.map((m) => m.text)
.join('') || '';
if (!text) {
continue;
}
if (i === 1 && text.match(/context for our chat/)) {
hasSystemPrompt = true;
}
if (i > 2 || !hasSystemPrompt) {
uiHistory.push({
type: (item.role && rolemap[item.role]) || MessageType.GEMINI,
text,
} as HistoryItemWithoutId);
}
}
return {
type: 'load_history',
history: uiHistory,
clientHistory: conversation,
};
},
completion: async (context, partialArg) => {
const chatDetails = await getSavedChatTags(context, true);
return chatDetails
.map((chat) => chat.name)
.filter((name) => name.startsWith(partialArg));
},
};
export const chatCommand: SlashCommand = {
name: 'chat',
description: 'Manage conversation history.',
kind: CommandKind.BUILT_IN,
subCommands: [listCommand, saveCommand, resumeCommand],
};

View File

@@ -8,19 +8,7 @@ import { vi, describe, it, expect, beforeEach, Mock } from 'vitest';
import { clearCommand } from './clearCommand.js';
import { type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
// Mock the telemetry service
vi.mock('@qwen-code/qwen-code-core', async () => {
const actual = await vi.importActual('@qwen-code/qwen-code-core');
return {
...actual,
uiTelemetryService: {
resetLastPromptTokenCount: vi.fn(),
},
};
});
import { GeminiClient, uiTelemetryService } from '@qwen-code/qwen-code-core';
import { GeminiClient } from '@google/gemini-cli-core';
describe('clearCommand', () => {
let mockContext: CommandContext;
@@ -28,7 +16,6 @@ describe('clearCommand', () => {
beforeEach(() => {
mockResetChat = vi.fn().mockResolvedValue(undefined);
vi.clearAllMocks();
mockContext = createMockCommandContext({
services: {
@@ -42,7 +29,7 @@ describe('clearCommand', () => {
});
});
it('should set debug message, reset chat, reset telemetry, and clear UI when config is available', async () => {
it('should set debug message, reset chat, and clear UI when config is available', async () => {
if (!clearCommand.action) {
throw new Error('clearCommand must have an action.');
}
@@ -55,10 +42,9 @@ describe('clearCommand', () => {
expect(mockContext.ui.setDebugMessage).toHaveBeenCalledTimes(1);
expect(mockResetChat).toHaveBeenCalledTimes(1);
expect(mockContext.session.resetSession).toHaveBeenCalledTimes(1);
expect(uiTelemetryService.resetLastPromptTokenCount).toHaveBeenCalledTimes(
1,
);
expect(mockContext.ui.clear).toHaveBeenCalledTimes(1);
// Check the order of operations.
@@ -67,15 +53,10 @@ describe('clearCommand', () => {
const resetChatOrder = mockResetChat.mock.invocationCallOrder[0];
const resetSessionOrder = (mockContext.session.resetSession as Mock).mock
.invocationCallOrder[0];
const resetTelemetryOrder = (
uiTelemetryService.resetLastPromptTokenCount as Mock
).mock.invocationCallOrder[0];
const clearOrder = (mockContext.ui.clear as Mock).mock
.invocationCallOrder[0];
expect(setDebugMessageOrder).toBeLessThan(resetChatOrder);
expect(resetChatOrder).toBeLessThan(resetTelemetryOrder);
expect(resetTelemetryOrder).toBeLessThan(clearOrder);
expect(resetChatOrder).toBeLessThan(resetSessionOrder);
expect(resetSessionOrder).toBeLessThan(clearOrder);
});
@@ -94,13 +75,10 @@ describe('clearCommand', () => {
await clearCommand.action(nullConfigContext, '');
expect(nullConfigContext.ui.setDebugMessage).toHaveBeenCalledWith(
'Clearing terminal.',
'Clearing terminal and resetting chat.',
);
expect(mockResetChat).not.toHaveBeenCalled();
expect(nullConfigContext.session.resetSession).toHaveBeenCalledTimes(1);
expect(uiTelemetryService.resetLastPromptTokenCount).toHaveBeenCalledTimes(
1,
);
expect(nullConfigContext.ui.clear).toHaveBeenCalledTimes(1);
});
});

View File

@@ -4,26 +4,14 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { uiTelemetryService } from '@qwen-code/qwen-code-core';
import { CommandKind, SlashCommand } from './types.js';
import { SlashCommand } from './types.js';
export const clearCommand: SlashCommand = {
name: 'clear',
description: 'clear the screen and conversation history',
kind: CommandKind.BUILT_IN,
action: async (context, _args) => {
const geminiClient = context.services.config?.getGeminiClient();
if (geminiClient) {
context.ui.setDebugMessage('Clearing terminal and resetting chat.');
// If resetChat fails, the exception will propagate and halt the command,
// which is the correct behavior to signal a failure to the user.
await geminiClient.resetChat();
} else {
context.ui.setDebugMessage('Clearing terminal.');
}
uiTelemetryService.resetLastPromptTokenCount();
context.ui.setDebugMessage('Clearing terminal and resetting chat.');
await context.services.config?.getGeminiClient()?.resetChat();
context.session.resetSession();
context.ui.clear();
},

View File

@@ -1,129 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { GeminiClient } from '@qwen-code/qwen-code-core';
import { vi, describe, it, expect, beforeEach } from 'vitest';
import { compressCommand } from './compressCommand.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { MessageType } from '../types.js';
describe('compressCommand', () => {
let context: ReturnType<typeof createMockCommandContext>;
let mockTryCompressChat: ReturnType<typeof vi.fn>;
beforeEach(() => {
mockTryCompressChat = vi.fn();
context = createMockCommandContext({
services: {
config: {
getGeminiClient: () =>
({
tryCompressChat: mockTryCompressChat,
}) as unknown as GeminiClient,
},
},
});
});
it('should do nothing if a compression is already pending', async () => {
context.ui.pendingItem = {
type: MessageType.COMPRESSION,
compression: {
isPending: true,
originalTokenCount: null,
newTokenCount: null,
},
};
await compressCommand.action!(context, '');
expect(context.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: MessageType.ERROR,
text: 'Already compressing, wait for previous request to complete',
}),
expect.any(Number),
);
expect(context.ui.setPendingItem).not.toHaveBeenCalled();
expect(mockTryCompressChat).not.toHaveBeenCalled();
});
it('should set pending item, call tryCompressChat, and add result on success', async () => {
const compressedResult = {
originalTokenCount: 200,
newTokenCount: 100,
};
mockTryCompressChat.mockResolvedValue(compressedResult);
await compressCommand.action!(context, '');
expect(context.ui.setPendingItem).toHaveBeenNthCalledWith(
1,
expect.objectContaining({
type: MessageType.COMPRESSION,
compression: {
isPending: true,
originalTokenCount: null,
newTokenCount: null,
},
}),
);
expect(mockTryCompressChat).toHaveBeenCalledWith(
expect.stringMatching(/^compress-\d+$/),
true,
);
expect(context.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: MessageType.COMPRESSION,
compression: {
isPending: false,
originalTokenCount: 200,
newTokenCount: 100,
},
}),
expect.any(Number),
);
expect(context.ui.setPendingItem).toHaveBeenNthCalledWith(2, null);
});
it('should add an error message if tryCompressChat returns falsy', async () => {
mockTryCompressChat.mockResolvedValue(null);
await compressCommand.action!(context, '');
expect(context.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: MessageType.ERROR,
text: 'Failed to compress chat history.',
}),
expect.any(Number),
);
expect(context.ui.setPendingItem).toHaveBeenCalledWith(null);
});
it('should add an error message if tryCompressChat throws', async () => {
const error = new Error('Compression failed');
mockTryCompressChat.mockRejectedValue(error);
await compressCommand.action!(context, '');
expect(context.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: MessageType.ERROR,
text: `Failed to compress chat history: ${error.message}`,
}),
expect.any(Number),
);
expect(context.ui.setPendingItem).toHaveBeenCalledWith(null);
});
it('should clear the pending item in a finally block', async () => {
mockTryCompressChat.mockRejectedValue(new Error('some error'));
await compressCommand.action!(context, '');
expect(context.ui.setPendingItem).toHaveBeenCalledWith(null);
});
});

View File

@@ -1,78 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { HistoryItemCompression, MessageType } from '../types.js';
import { CommandKind, SlashCommand } from './types.js';
export const compressCommand: SlashCommand = {
name: 'compress',
altNames: ['summarize'],
description: 'Compresses the context by replacing it with a summary.',
kind: CommandKind.BUILT_IN,
action: async (context) => {
const { ui } = context;
if (ui.pendingItem) {
ui.addItem(
{
type: MessageType.ERROR,
text: 'Already compressing, wait for previous request to complete',
},
Date.now(),
);
return;
}
const pendingMessage: HistoryItemCompression = {
type: MessageType.COMPRESSION,
compression: {
isPending: true,
originalTokenCount: null,
newTokenCount: null,
},
};
try {
ui.setPendingItem(pendingMessage);
const promptId = `compress-${Date.now()}`;
const compressed = await context.services.config
?.getGeminiClient()
?.tryCompressChat(promptId, true);
if (compressed) {
ui.addItem(
{
type: MessageType.COMPRESSION,
compression: {
isPending: false,
originalTokenCount: compressed.originalTokenCount,
newTokenCount: compressed.newTokenCount,
},
} as HistoryItemCompression,
Date.now(),
);
} else {
ui.addItem(
{
type: MessageType.ERROR,
text: 'Failed to compress chat history.',
},
Date.now(),
);
}
} catch (e) {
ui.addItem(
{
type: MessageType.ERROR,
text: `Failed to compress chat history: ${
e instanceof Error ? e.message : String(e)
}`,
},
Date.now(),
);
} finally {
ui.setPendingItem(null);
}
},
};

View File

@@ -1,296 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { vi, describe, it, expect, beforeEach, Mock } from 'vitest';
import { copyCommand } from './copyCommand.js';
import { type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { copyToClipboard } from '../utils/commandUtils.js';
vi.mock('../utils/commandUtils.js', () => ({
copyToClipboard: vi.fn(),
}));
describe('copyCommand', () => {
let mockContext: CommandContext;
let mockCopyToClipboard: Mock;
let mockGetChat: Mock;
let mockGetHistory: Mock;
beforeEach(() => {
vi.clearAllMocks();
mockCopyToClipboard = vi.mocked(copyToClipboard);
mockGetChat = vi.fn();
mockGetHistory = vi.fn();
mockContext = createMockCommandContext({
services: {
config: {
getGeminiClient: () => ({
getChat: mockGetChat,
}),
},
},
});
mockGetChat.mockReturnValue({
getHistory: mockGetHistory,
});
});
it('should return info message when no history is available', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
mockGetChat.mockReturnValue(undefined);
const result = await copyCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'No output in history',
});
expect(mockCopyToClipboard).not.toHaveBeenCalled();
});
it('should return info message when history is empty', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
mockGetHistory.mockReturnValue([]);
const result = await copyCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'No output in history',
});
expect(mockCopyToClipboard).not.toHaveBeenCalled();
});
it('should return info message when no AI messages are found in history', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithUserOnly = [
{
role: 'user',
parts: [{ text: 'Hello' }],
},
];
mockGetHistory.mockReturnValue(historyWithUserOnly);
const result = await copyCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'No output in history',
});
expect(mockCopyToClipboard).not.toHaveBeenCalled();
});
it('should copy last AI message to clipboard successfully', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithAiMessage = [
{
role: 'user',
parts: [{ text: 'Hello' }],
},
{
role: 'model',
parts: [{ text: 'Hi there! How can I help you?' }],
},
];
mockGetHistory.mockReturnValue(historyWithAiMessage);
mockCopyToClipboard.mockResolvedValue(undefined);
const result = await copyCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'Last output copied to the clipboard',
});
expect(mockCopyToClipboard).toHaveBeenCalledWith(
'Hi there! How can I help you?',
);
});
it('should handle multiple text parts in AI message', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithMultipleParts = [
{
role: 'model',
parts: [{ text: 'Part 1: ' }, { text: 'Part 2: ' }, { text: 'Part 3' }],
},
];
mockGetHistory.mockReturnValue(historyWithMultipleParts);
mockCopyToClipboard.mockResolvedValue(undefined);
const result = await copyCommand.action(mockContext, '');
expect(mockCopyToClipboard).toHaveBeenCalledWith('Part 1: Part 2: Part 3');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'Last output copied to the clipboard',
});
});
it('should filter out non-text parts', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithMixedParts = [
{
role: 'model',
parts: [
{ text: 'Text part' },
{ image: 'base64data' }, // Non-text part
{ text: ' more text' },
],
},
];
mockGetHistory.mockReturnValue(historyWithMixedParts);
mockCopyToClipboard.mockResolvedValue(undefined);
const result = await copyCommand.action(mockContext, '');
expect(mockCopyToClipboard).toHaveBeenCalledWith('Text part more text');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'Last output copied to the clipboard',
});
});
it('should get the last AI message when multiple AI messages exist', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithMultipleAiMessages = [
{
role: 'model',
parts: [{ text: 'First AI response' }],
},
{
role: 'user',
parts: [{ text: 'User message' }],
},
{
role: 'model',
parts: [{ text: 'Second AI response' }],
},
];
mockGetHistory.mockReturnValue(historyWithMultipleAiMessages);
mockCopyToClipboard.mockResolvedValue(undefined);
const result = await copyCommand.action(mockContext, '');
expect(mockCopyToClipboard).toHaveBeenCalledWith('Second AI response');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'Last output copied to the clipboard',
});
});
it('should handle clipboard copy error', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithAiMessage = [
{
role: 'model',
parts: [{ text: 'AI response' }],
},
];
mockGetHistory.mockReturnValue(historyWithAiMessage);
const clipboardError = new Error('Clipboard access denied');
mockCopyToClipboard.mockRejectedValue(clipboardError);
const result = await copyCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Failed to copy to the clipboard.',
});
});
it('should handle non-Error clipboard errors', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithAiMessage = [
{
role: 'model',
parts: [{ text: 'AI response' }],
},
];
mockGetHistory.mockReturnValue(historyWithAiMessage);
mockCopyToClipboard.mockRejectedValue('String error');
const result = await copyCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Failed to copy to the clipboard.',
});
});
it('should return info message when no text parts found in AI message', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const historyWithEmptyParts = [
{
role: 'model',
parts: [{ image: 'base64data' }], // No text parts
},
];
mockGetHistory.mockReturnValue(historyWithEmptyParts);
const result = await copyCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'Last AI output contains no text to copy.',
});
expect(mockCopyToClipboard).not.toHaveBeenCalled();
});
it('should handle unavailable config service', async () => {
if (!copyCommand.action) throw new Error('Command has no action');
const nullConfigContext = createMockCommandContext({
services: { config: null },
});
const result = await copyCommand.action(nullConfigContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: 'No output in history',
});
expect(mockCopyToClipboard).not.toHaveBeenCalled();
});
});

View File

@@ -1,67 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { copyToClipboard } from '../utils/commandUtils.js';
import {
CommandKind,
SlashCommand,
SlashCommandActionReturn,
} from './types.js';
export const copyCommand: SlashCommand = {
name: 'copy',
description: 'Copy the last result or code snippet to clipboard',
kind: CommandKind.BUILT_IN,
action: async (context, _args): Promise<SlashCommandActionReturn | void> => {
const chat = await context.services.config?.getGeminiClient()?.getChat();
const history = chat?.getHistory();
// Get the last message from the AI (model role)
const lastAiMessage = history
? history.filter((item) => item.role === 'model').pop()
: undefined;
if (!lastAiMessage) {
return {
type: 'message',
messageType: 'info',
content: 'No output in history',
};
}
// Extract text from the parts
const lastAiOutput = lastAiMessage.parts
?.filter((part) => part.text)
.map((part) => part.text)
.join('');
if (lastAiOutput) {
try {
await copyToClipboard(lastAiOutput);
return {
type: 'message',
messageType: 'info',
content: 'Last output copied to the clipboard',
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
console.debug(message);
return {
type: 'message',
messageType: 'error',
content: 'Failed to copy to the clipboard.',
};
}
} else {
return {
type: 'message',
messageType: 'info',
content: 'Last AI output contains no text to copy.',
};
}
},
};

View File

@@ -1,34 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { corgiCommand } from './corgiCommand.js';
import { type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
describe('corgiCommand', () => {
let mockContext: CommandContext;
beforeEach(() => {
mockContext = createMockCommandContext();
vi.spyOn(mockContext.ui, 'toggleCorgiMode');
});
it('should call the toggleCorgiMode function on the UI context', async () => {
if (!corgiCommand.action) {
throw new Error('The corgi command must have an action.');
}
await corgiCommand.action(mockContext, '');
expect(mockContext.ui.toggleCorgiMode).toHaveBeenCalledTimes(1);
});
it('should have the correct name and description', () => {
expect(corgiCommand.name).toBe('corgi');
expect(corgiCommand.description).toBe('Toggles corgi mode.');
});
});

View File

@@ -1,16 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { CommandKind, type SlashCommand } from './types.js';
export const corgiCommand: SlashCommand = {
name: 'corgi',
description: 'Toggles corgi mode.',
kind: CommandKind.BUILT_IN,
action: (context, _args) => {
context.ui.toggleCorgiMode();
},
};

View File

@@ -1,99 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
import open from 'open';
import { docsCommand } from './docsCommand.js';
import { type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { MessageType } from '../types.js';
// Mock the 'open' library
vi.mock('open', () => ({
default: vi.fn(),
}));
describe('docsCommand', () => {
let mockContext: CommandContext;
beforeEach(() => {
// Create a fresh mock context before each test
mockContext = createMockCommandContext();
// Reset the `open` mock
vi.mocked(open).mockClear();
});
afterEach(() => {
// Restore any stubbed environment variables
vi.unstubAllEnvs();
});
it("should add an info message and call 'open' in a non-sandbox environment", async () => {
if (!docsCommand.action) {
throw new Error('docsCommand must have an action.');
}
const docsUrl = 'https://github.com/QwenLM/qwen-code';
await docsCommand.action(mockContext, '');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: `Opening documentation in your browser: ${docsUrl}`,
},
expect.any(Number),
);
expect(open).toHaveBeenCalledWith(docsUrl);
});
it('should only add an info message in a sandbox environment', async () => {
if (!docsCommand.action) {
throw new Error('docsCommand must have an action.');
}
// Simulate a sandbox environment
process.env.SANDBOX = 'gemini-sandbox';
const docsUrl = 'https://github.com/QwenLM/qwen-code';
await docsCommand.action(mockContext, '');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: `Please open the following URL in your browser to view the documentation:\n${docsUrl}`,
},
expect.any(Number),
);
// Ensure 'open' was not called in the sandbox
expect(open).not.toHaveBeenCalled();
});
it("should not open browser for 'sandbox-exec'", async () => {
if (!docsCommand.action) {
throw new Error('docsCommand must have an action.');
}
// Simulate the specific 'sandbox-exec' environment
process.env.SANDBOX = 'sandbox-exec';
const docsUrl = 'https://github.com/QwenLM/qwen-code';
await docsCommand.action(mockContext, '');
// The logic should fall through to the 'else' block
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: `Opening documentation in your browser: ${docsUrl}`,
},
expect.any(Number),
);
// 'open' should be called in this specific sandbox case
expect(open).toHaveBeenCalledWith(docsUrl);
});
});

View File

@@ -1,42 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import open from 'open';
import process from 'node:process';
import {
type CommandContext,
type SlashCommand,
CommandKind,
} from './types.js';
import { MessageType } from '../types.js';
export const docsCommand: SlashCommand = {
name: 'docs',
description: 'open full Qwen Code documentation in your browser',
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext): Promise<void> => {
const docsUrl = 'https://github.com/QwenLM/qwen-code';
if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') {
context.ui.addItem(
{
type: MessageType.INFO,
text: `Please open the following URL in your browser to view the documentation:\n${docsUrl}`,
},
Date.now(),
);
} else {
context.ui.addItem(
{
type: MessageType.INFO,
text: `Opening documentation in your browser: ${docsUrl}`,
},
Date.now(),
);
await open(docsUrl);
}
},
};

View File

@@ -1,30 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { editorCommand } from './editorCommand.js';
// 1. Import the mock context utility
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
describe('editorCommand', () => {
it('should return a dialog action to open the editor dialog', () => {
if (!editorCommand.action) {
throw new Error('The editor command must have an action.');
}
const mockContext = createMockCommandContext();
const result = editorCommand.action(mockContext, '');
expect(result).toEqual({
type: 'dialog',
dialog: 'editor',
});
});
it('should have the correct name and description', () => {
expect(editorCommand.name).toBe('editor');
expect(editorCommand.description).toBe('set external editor preference');
});
});

View File

@@ -1,21 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
CommandKind,
type OpenDialogActionReturn,
type SlashCommand,
} from './types.js';
export const editorCommand: SlashCommand = {
name: 'editor',
description: 'set external editor preference',
kind: CommandKind.BUILT_IN,
action: (): OpenDialogActionReturn => ({
type: 'dialog',
dialog: 'editor',
}),
};

View File

@@ -1,67 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { extensionsCommand } from './extensionsCommand.js';
import { type CommandContext } from './types.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import { MessageType } from '../types.js';
describe('extensionsCommand', () => {
let mockContext: CommandContext;
it('should display "No active extensions." when none are found', async () => {
mockContext = createMockCommandContext({
services: {
config: {
getExtensions: () => [],
},
},
});
if (!extensionsCommand.action) throw new Error('Action not defined');
await extensionsCommand.action(mockContext, '');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: 'No active extensions.',
},
expect.any(Number),
);
});
it('should list active extensions when they are found', async () => {
const mockExtensions = [
{ name: 'ext-one', version: '1.0.0', isActive: true },
{ name: 'ext-two', version: '2.1.0', isActive: true },
{ name: 'ext-three', version: '3.0.0', isActive: false },
];
mockContext = createMockCommandContext({
services: {
config: {
getExtensions: () => mockExtensions,
},
},
});
if (!extensionsCommand.action) throw new Error('Action not defined');
await extensionsCommand.action(mockContext, '');
const expectedMessage =
'Active extensions:\n\n' +
` - \u001b[36mext-one (v1.0.0)\u001b[0m\n` +
` - \u001b[36mext-two (v2.1.0)\u001b[0m\n`;
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
{
type: MessageType.INFO,
text: expectedMessage,
},
expect.any(Number),
);
});
});

View File

@@ -1,46 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
type CommandContext,
type SlashCommand,
CommandKind,
} from './types.js';
import { MessageType } from '../types.js';
export const extensionsCommand: SlashCommand = {
name: 'extensions',
description: 'list active extensions',
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext): Promise<void> => {
const activeExtensions = context.services.config
?.getExtensions()
.filter((ext) => ext.isActive);
if (!activeExtensions || activeExtensions.length === 0) {
context.ui.addItem(
{
type: MessageType.INFO,
text: 'No active extensions.',
},
Date.now(),
);
return;
}
const extensionLines = activeExtensions.map(
(ext) => ` - \u001b[36m${ext.name} (v${ext.version})\u001b[0m`,
);
const message = `Active extensions:\n\n${extensionLines.join('\n')}\n`;
context.ui.addItem(
{
type: MessageType.INFO,
text: message,
},
Date.now(),
);
},
};

View File

@@ -32,9 +32,9 @@ describe('helpCommand', () => {
});
it("should also be triggered by its alternative name '?'", () => {
// This test is more conceptual. The routing of altNames to the command
// This test is more conceptual. The routing of altName to the command
// is handled by the slash command processor, but we can assert the
// altNames is correctly defined on the command object itself.
expect(helpCommand.altNames).toContain('?');
// altName is correctly defined on the command object itself.
expect(helpCommand.altName).toBe('?');
});
});

View File

@@ -4,13 +4,12 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { CommandKind, OpenDialogActionReturn, SlashCommand } from './types.js';
import { OpenDialogActionReturn, SlashCommand } from './types.js';
export const helpCommand: SlashCommand = {
name: 'help',
altNames: ['?'],
description: 'for help on Qwen Code',
kind: CommandKind.BUILT_IN,
altName: '?',
description: 'for help on qwen code',
action: (_context, _args): OpenDialogActionReturn => {
console.debug('Opening help UI ...');
return {

View File

@@ -1,270 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
MockInstance,
vi,
describe,
it,
expect,
beforeEach,
afterEach,
} from 'vitest';
import { ideCommand } from './ideCommand.js';
import { type CommandContext } from './types.js';
import { type Config } from '@qwen-code/qwen-code-core';
import * as child_process from 'child_process';
import { glob } from 'glob';
import { IDEConnectionStatus } from '@qwen-code/qwen-code-core/index.js';
vi.mock('child_process');
vi.mock('glob');
function regexEscape(value: string) {
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
describe('ideCommand', () => {
let mockContext: CommandContext;
let mockConfig: Config;
let execSyncSpy: MockInstance;
let globSyncSpy: MockInstance;
let platformSpy: MockInstance;
beforeEach(() => {
mockContext = {
ui: {
addItem: vi.fn(),
},
} as unknown as CommandContext;
mockConfig = {
getIdeMode: vi.fn(),
getIdeClient: vi.fn(),
} as unknown as Config;
execSyncSpy = vi.spyOn(child_process, 'execSync');
globSyncSpy = vi.spyOn(glob, 'sync');
platformSpy = vi.spyOn(process, 'platform', 'get');
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should return null if ideMode is not enabled', () => {
vi.mocked(mockConfig.getIdeMode).mockReturnValue(false);
const command = ideCommand(mockConfig);
expect(command).toBeNull();
});
it('should return the ide command if ideMode is enabled', () => {
vi.mocked(mockConfig.getIdeMode).mockReturnValue(true);
const command = ideCommand(mockConfig);
expect(command).not.toBeNull();
expect(command?.name).toBe('ide');
expect(command?.subCommands).toHaveLength(2);
expect(command?.subCommands?.[0].name).toBe('status');
expect(command?.subCommands?.[1].name).toBe('install');
});
describe('status subcommand', () => {
const mockGetConnectionStatus = vi.fn();
beforeEach(() => {
vi.mocked(mockConfig.getIdeMode).mockReturnValue(true);
vi.mocked(mockConfig.getIdeClient).mockReturnValue({
getConnectionStatus: mockGetConnectionStatus,
} as ReturnType<Config['getIdeClient']>);
});
it('should show connected status', () => {
mockGetConnectionStatus.mockReturnValue({
status: IDEConnectionStatus.Connected,
});
const command = ideCommand(mockConfig);
const result = command!.subCommands![0].action!(mockContext, '');
expect(mockGetConnectionStatus).toHaveBeenCalled();
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: '🟢 Connected',
});
});
it('should show connecting status', () => {
mockGetConnectionStatus.mockReturnValue({
status: IDEConnectionStatus.Connecting,
});
const command = ideCommand(mockConfig);
const result = command!.subCommands![0].action!(mockContext, '');
expect(mockGetConnectionStatus).toHaveBeenCalled();
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: `🟡 Connecting...`,
});
});
it('should show disconnected status', () => {
mockGetConnectionStatus.mockReturnValue({
status: IDEConnectionStatus.Disconnected,
});
const command = ideCommand(mockConfig);
const result = command!.subCommands![0].action!(mockContext, '');
expect(mockGetConnectionStatus).toHaveBeenCalled();
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: `🔴 Disconnected`,
});
});
it('should show disconnected status with details', () => {
const details = 'Something went wrong';
mockGetConnectionStatus.mockReturnValue({
status: IDEConnectionStatus.Disconnected,
details,
});
const command = ideCommand(mockConfig);
const result = command!.subCommands![0].action!(mockContext, '');
expect(mockGetConnectionStatus).toHaveBeenCalled();
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: `🔴 Disconnected: ${details}`,
});
});
});
describe('install subcommand', () => {
beforeEach(() => {
vi.mocked(mockConfig.getIdeMode).mockReturnValue(true);
platformSpy.mockReturnValue('linux');
});
it('should show an error if VSCode is not installed', async () => {
execSyncSpy.mockImplementation(() => {
throw new Error('Command not found');
});
const command = ideCommand(mockConfig);
await command!.subCommands![1].action!(mockContext, '');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
text: expect.stringMatching(/VS Code command-line tool .* not found/),
}),
expect.any(Number),
);
});
it('should show an error if the VSIX file is not found', async () => {
execSyncSpy.mockReturnValue(''); // VSCode is installed
globSyncSpy.mockReturnValue([]); // No .vsix file found
const command = ideCommand(mockConfig);
await command!.subCommands![1].action!(mockContext, '');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
text: 'Could not find the required VS Code companion extension. Please file a bug via /bug.',
}),
expect.any(Number),
);
});
it('should install the extension if found in the bundle directory', async () => {
const vsixPath = '/path/to/bundle/gemini.vsix';
execSyncSpy.mockReturnValue(''); // VSCode is installed
globSyncSpy.mockReturnValue([vsixPath]); // Found .vsix file
const command = ideCommand(mockConfig);
await command!.subCommands![1].action!(mockContext, '');
expect(globSyncSpy).toHaveBeenCalledWith(
expect.stringContaining('.vsix'),
);
expect(execSyncSpy).toHaveBeenCalledWith(
expect.stringMatching(
new RegExp(
`code(.cmd)? --install-extension ${regexEscape(vsixPath)} --force`,
),
),
{ stdio: 'pipe' },
);
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'info',
text: `Installing VS Code companion extension...`,
}),
expect.any(Number),
);
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'info',
text: 'VS Code companion extension installed successfully. Restart gemini-cli in a fresh terminal window.',
}),
expect.any(Number),
);
});
it('should install the extension if found in the dev directory', async () => {
const vsixPath = '/path/to/dev/gemini.vsix';
execSyncSpy.mockReturnValue(''); // VSCode is installed
// First glob call for bundle returns nothing, second for dev returns path.
globSyncSpy.mockReturnValueOnce([]).mockReturnValueOnce([vsixPath]);
const command = ideCommand(mockConfig);
await command!.subCommands![1].action!(mockContext, '');
expect(globSyncSpy).toHaveBeenCalledTimes(2);
expect(execSyncSpy).toHaveBeenCalledWith(
expect.stringMatching(
new RegExp(
`code(.cmd)? --install-extension ${regexEscape(vsixPath)} --force`,
),
),
{ stdio: 'pipe' },
);
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'info',
text: 'VS Code companion extension installed successfully. Restart gemini-cli in a fresh terminal window.',
}),
expect.any(Number),
);
});
it('should show an error if installation fails', async () => {
const vsixPath = '/path/to/bundle/gemini.vsix';
const errorMessage = 'Installation failed';
execSyncSpy
.mockReturnValueOnce('') // VSCode is installed check
.mockImplementation(() => {
// Installation command
const error: Error & { stderr?: Buffer } = new Error(
'Command failed',
);
error.stderr = Buffer.from(errorMessage);
throw error;
});
globSyncSpy.mockReturnValue([vsixPath]);
const command = ideCommand(mockConfig);
await command!.subCommands![1].action!(mockContext, '');
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
text: `Failed to install VS Code companion extension.`,
}),
expect.any(Number),
);
});
});
});

View File

@@ -1,157 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { fileURLToPath } from 'url';
import { Config, IDEConnectionStatus } from '@qwen-code/qwen-code-core';
import {
CommandContext,
SlashCommand,
SlashCommandActionReturn,
CommandKind,
} from './types.js';
import * as child_process from 'child_process';
import * as process from 'process';
import { glob } from 'glob';
import * as path from 'path';
const VSCODE_COMMAND = process.platform === 'win32' ? 'code.cmd' : 'code';
const VSCODE_COMPANION_EXTENSION_FOLDER = 'vscode-ide-companion';
function isVSCodeInstalled(): boolean {
try {
child_process.execSync(
process.platform === 'win32'
? `where.exe ${VSCODE_COMMAND}`
: `command -v ${VSCODE_COMMAND}`,
{ stdio: 'ignore' },
);
return true;
} catch {
return false;
}
}
export const ideCommand = (config: Config | null): SlashCommand | null => {
if (!config?.getIdeMode()) {
return null;
}
return {
name: 'ide',
description: 'manage IDE integration',
kind: CommandKind.BUILT_IN,
subCommands: [
{
name: 'status',
description: 'check status of IDE integration',
kind: CommandKind.BUILT_IN,
action: (_context: CommandContext): SlashCommandActionReturn => {
const connection = config.getIdeClient()?.getConnectionStatus();
switch (connection?.status) {
case IDEConnectionStatus.Connected:
return {
type: 'message',
messageType: 'info',
content: `🟢 Connected`,
} as const;
case IDEConnectionStatus.Connecting:
return {
type: 'message',
messageType: 'info',
content: `🟡 Connecting...`,
} as const;
default: {
let content = `🔴 Disconnected`;
if (connection?.details) {
content += `: ${connection.details}`;
}
return {
type: 'message',
messageType: 'error',
content,
} as const;
}
}
},
},
{
name: 'install',
description: 'install required VS Code companion extension',
kind: CommandKind.BUILT_IN,
action: async (context) => {
if (!isVSCodeInstalled()) {
context.ui.addItem(
{
type: 'error',
text: `VS Code command-line tool "${VSCODE_COMMAND}" not found in your PATH.`,
},
Date.now(),
);
return;
}
const bundleDir = path.dirname(fileURLToPath(import.meta.url));
// The VSIX file is copied to the bundle directory as part of the build.
let vsixFiles = glob.sync(path.join(bundleDir, '*.vsix'));
if (vsixFiles.length === 0) {
// If the VSIX file is not in the bundle, it might be a dev
// environment running with `npm start`. Look for it in the original
// package location, relative to the bundle dir.
const devPath = path.join(
bundleDir,
'..',
'..',
'..',
'..',
'..',
VSCODE_COMPANION_EXTENSION_FOLDER,
'*.vsix',
);
vsixFiles = glob.sync(devPath);
}
if (vsixFiles.length === 0) {
context.ui.addItem(
{
type: 'error',
text: 'Could not find the required VS Code companion extension. Please file a bug via /bug.',
},
Date.now(),
);
return;
}
const vsixPath = vsixFiles[0];
const command = `${VSCODE_COMMAND} --install-extension ${vsixPath} --force`;
context.ui.addItem(
{
type: 'info',
text: `Installing VS Code companion extension...`,
},
Date.now(),
);
try {
child_process.execSync(command, { stdio: 'pipe' });
context.ui.addItem(
{
type: 'info',
text: 'VS Code companion extension installed successfully. Restart gemini-cli in a fresh terminal window.',
},
Date.now(),
);
} catch (_error) {
context.ui.addItem(
{
type: 'error',
text: `Failed to install VS Code companion extension.`,
},
Date.now(),
);
}
},
},
],
};
};

File diff suppressed because it is too large Load Diff

View File

@@ -1,524 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
SlashCommand,
SlashCommandActionReturn,
CommandContext,
CommandKind,
MessageActionReturn,
} from './types.js';
import {
DiscoveredMCPPrompt,
DiscoveredMCPTool,
getMCPDiscoveryState,
getMCPServerStatus,
MCPDiscoveryState,
MCPServerStatus,
mcpServerRequiresOAuth,
getErrorMessage,
} from '@qwen-code/qwen-code-core';
import open from 'open';
const COLOR_GREEN = '\u001b[32m';
const COLOR_YELLOW = '\u001b[33m';
const COLOR_RED = '\u001b[31m';
const COLOR_CYAN = '\u001b[36m';
const COLOR_GREY = '\u001b[90m';
const RESET_COLOR = '\u001b[0m';
const getMcpStatus = async (
context: CommandContext,
showDescriptions: boolean,
showSchema: boolean,
showTips: boolean = false,
): Promise<SlashCommandActionReturn> => {
const { config } = context.services;
if (!config) {
return {
type: 'message',
messageType: 'error',
content: 'Config not loaded.',
};
}
const toolRegistry = await config.getToolRegistry();
if (!toolRegistry) {
return {
type: 'message',
messageType: 'error',
content: 'Could not retrieve tool registry.',
};
}
const mcpServers = config.getMcpServers() || {};
const serverNames = Object.keys(mcpServers);
const blockedMcpServers = config.getBlockedMcpServers() || [];
if (serverNames.length === 0 && blockedMcpServers.length === 0) {
const docsUrl = 'https://goo.gle/gemini-cli-docs-mcp';
if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') {
return {
type: 'message',
messageType: 'info',
content: `No MCP servers configured. Please open the following URL in your browser to view documentation:\n${docsUrl}`,
};
} else {
// Open the URL in the browser
await open(docsUrl);
return {
type: 'message',
messageType: 'info',
content: `No MCP servers configured. Opening documentation in your browser: ${docsUrl}`,
};
}
}
// Check if any servers are still connecting
const connectingServers = serverNames.filter(
(name) => getMCPServerStatus(name) === MCPServerStatus.CONNECTING,
);
const discoveryState = getMCPDiscoveryState();
let message = '';
// Add overall discovery status message if needed
if (
discoveryState === MCPDiscoveryState.IN_PROGRESS ||
connectingServers.length > 0
) {
message += `${COLOR_YELLOW}⏳ MCP servers are starting up (${connectingServers.length} initializing)...${RESET_COLOR}\n`;
message += `${COLOR_CYAN}Note: First startup may take longer. Tool availability will update automatically.${RESET_COLOR}\n\n`;
}
message += 'Configured MCP servers:\n\n';
const allTools = toolRegistry.getAllTools();
for (const serverName of serverNames) {
const serverTools = allTools.filter(
(tool) =>
tool instanceof DiscoveredMCPTool && tool.serverName === serverName,
) as DiscoveredMCPTool[];
const promptRegistry = await config.getPromptRegistry();
const serverPrompts = promptRegistry.getPromptsByServer(serverName) || [];
const status = getMCPServerStatus(serverName);
// Add status indicator with descriptive text
let statusIndicator = '';
let statusText = '';
switch (status) {
case MCPServerStatus.CONNECTED:
statusIndicator = '🟢';
statusText = 'Ready';
break;
case MCPServerStatus.CONNECTING:
statusIndicator = '🔄';
statusText = 'Starting... (first startup may take longer)';
break;
case MCPServerStatus.DISCONNECTED:
default:
statusIndicator = '🔴';
statusText = 'Disconnected';
break;
}
// Get server description if available
const server = mcpServers[serverName];
let serverDisplayName = serverName;
if (server.extensionName) {
serverDisplayName += ` (from ${server.extensionName})`;
}
// Format server header with bold formatting and status
message += `${statusIndicator} \u001b[1m${serverDisplayName}\u001b[0m - ${statusText}`;
let needsAuthHint = mcpServerRequiresOAuth.get(serverName) || false;
// Add OAuth status if applicable
if (server?.oauth?.enabled) {
needsAuthHint = true;
try {
const { MCPOAuthTokenStorage } = await import(
'@qwen-code/qwen-code-core'
);
const hasToken = await MCPOAuthTokenStorage.getToken(serverName);
if (hasToken) {
const isExpired = MCPOAuthTokenStorage.isTokenExpired(hasToken.token);
if (isExpired) {
message += ` ${COLOR_YELLOW}(OAuth token expired)${RESET_COLOR}`;
} else {
message += ` ${COLOR_GREEN}(OAuth authenticated)${RESET_COLOR}`;
needsAuthHint = false;
}
} else {
message += ` ${COLOR_RED}(OAuth not authenticated)${RESET_COLOR}`;
}
} catch (_err) {
// If we can't check OAuth status, just continue
}
}
// Add tool count with conditional messaging
if (status === MCPServerStatus.CONNECTED) {
const parts = [];
if (serverTools.length > 0) {
parts.push(
`${serverTools.length} ${serverTools.length === 1 ? 'tool' : 'tools'}`,
);
}
if (serverPrompts.length > 0) {
parts.push(
`${serverPrompts.length} ${
serverPrompts.length === 1 ? 'prompt' : 'prompts'
}`,
);
}
if (parts.length > 0) {
message += ` (${parts.join(', ')})`;
} else {
message += ` (0 tools)`;
}
} else if (status === MCPServerStatus.CONNECTING) {
message += ` (tools and prompts will appear when ready)`;
} else {
message += ` (${serverTools.length} tools cached)`;
}
// Add server description with proper handling of multi-line descriptions
if (showDescriptions && server?.description) {
const descLines = server.description.trim().split('\n');
if (descLines) {
message += ':\n';
for (const descLine of descLines) {
message += ` ${COLOR_GREEN}${descLine}${RESET_COLOR}\n`;
}
} else {
message += '\n';
}
} else {
message += '\n';
}
// Reset formatting after server entry
message += RESET_COLOR;
if (serverTools.length > 0) {
message += ` ${COLOR_CYAN}Tools:${RESET_COLOR}\n`;
serverTools.forEach((tool) => {
if (showDescriptions && tool.description) {
// Format tool name in cyan using simple ANSI cyan color
message += ` - ${COLOR_CYAN}${tool.name}${RESET_COLOR}`;
// Handle multi-line descriptions by properly indenting and preserving formatting
const descLines = tool.description.trim().split('\n');
if (descLines) {
message += ':\n';
for (const descLine of descLines) {
message += ` ${COLOR_GREEN}${descLine}${RESET_COLOR}\n`;
}
} else {
message += '\n';
}
// Reset is handled inline with each line now
} else {
// Use cyan color for the tool name even when not showing descriptions
message += ` - ${COLOR_CYAN}${tool.name}${RESET_COLOR}\n`;
}
const parameters =
tool.schema.parametersJsonSchema ?? tool.schema.parameters;
if (showSchema && parameters) {
// Prefix the parameters in cyan
message += ` ${COLOR_CYAN}Parameters:${RESET_COLOR}\n`;
const paramsLines = JSON.stringify(parameters, null, 2)
.trim()
.split('\n');
if (paramsLines) {
for (const paramsLine of paramsLines) {
message += ` ${COLOR_GREEN}${paramsLine}${RESET_COLOR}\n`;
}
}
}
});
}
if (serverPrompts.length > 0) {
if (serverTools.length > 0) {
message += '\n';
}
message += ` ${COLOR_CYAN}Prompts:${RESET_COLOR}\n`;
serverPrompts.forEach((prompt: DiscoveredMCPPrompt) => {
if (showDescriptions && prompt.description) {
message += ` - ${COLOR_CYAN}${prompt.name}${RESET_COLOR}`;
const descLines = prompt.description.trim().split('\n');
if (descLines) {
message += ':\n';
for (const descLine of descLines) {
message += ` ${COLOR_GREEN}${descLine}${RESET_COLOR}\n`;
}
} else {
message += '\n';
}
} else {
message += ` - ${COLOR_CYAN}${prompt.name}${RESET_COLOR}\n`;
}
});
}
if (serverTools.length === 0 && serverPrompts.length === 0) {
message += ' No tools or prompts available\n';
} else if (serverTools.length === 0) {
message += ' No tools available';
if (status === MCPServerStatus.DISCONNECTED && needsAuthHint) {
message += ` ${COLOR_GREY}(type: "/mcp auth ${serverName}" to authenticate this server)${RESET_COLOR}`;
}
message += '\n';
} else if (status === MCPServerStatus.DISCONNECTED && needsAuthHint) {
// This case is for when serverTools.length > 0
message += ` ${COLOR_GREY}(type: "/mcp auth ${serverName}" to authenticate this server)${RESET_COLOR}\n`;
}
message += '\n';
}
for (const server of blockedMcpServers) {
let serverDisplayName = server.name;
if (server.extensionName) {
serverDisplayName += ` (from ${server.extensionName})`;
}
message += `🔴 \u001b[1m${serverDisplayName}\u001b[0m - Blocked\n\n`;
}
// Add helpful tips when no arguments are provided
if (showTips) {
message += '\n';
message += `${COLOR_CYAN}💡 Tips:${RESET_COLOR}\n`;
message += ` • Use ${COLOR_CYAN}/mcp desc${RESET_COLOR} to show server and tool descriptions\n`;
message += ` • Use ${COLOR_CYAN}/mcp schema${RESET_COLOR} to show tool parameter schemas\n`;
message += ` • Use ${COLOR_CYAN}/mcp nodesc${RESET_COLOR} to hide descriptions\n`;
message += ` • Use ${COLOR_CYAN}/mcp auth <server-name>${RESET_COLOR} to authenticate with OAuth-enabled servers\n`;
message += ` • Press ${COLOR_CYAN}Ctrl+T${RESET_COLOR} to toggle tool descriptions on/off\n`;
message += '\n';
}
// Make sure to reset any ANSI formatting at the end to prevent it from affecting the terminal
message += RESET_COLOR;
return {
type: 'message',
messageType: 'info',
content: message,
};
};
const authCommand: SlashCommand = {
name: 'auth',
description: 'Authenticate with an OAuth-enabled MCP server',
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
args: string,
): Promise<MessageActionReturn> => {
const serverName = args.trim();
const { config } = context.services;
if (!config) {
return {
type: 'message',
messageType: 'error',
content: 'Config not loaded.',
};
}
const mcpServers = config.getMcpServers() || {};
if (!serverName) {
// List servers that support OAuth
const oauthServers = Object.entries(mcpServers)
.filter(([_, server]) => server.oauth?.enabled)
.map(([name, _]) => name);
if (oauthServers.length === 0) {
return {
type: 'message',
messageType: 'info',
content: 'No MCP servers configured with OAuth authentication.',
};
}
return {
type: 'message',
messageType: 'info',
content: `MCP servers with OAuth authentication:\n${oauthServers.map((s) => ` - ${s}`).join('\n')}\n\nUse /mcp auth <server-name> to authenticate.`,
};
}
const server = mcpServers[serverName];
if (!server) {
return {
type: 'message',
messageType: 'error',
content: `MCP server '${serverName}' not found.`,
};
}
// Always attempt OAuth authentication, even if not explicitly configured
// The authentication process will discover OAuth requirements automatically
try {
context.ui.addItem(
{
type: 'info',
text: `Starting OAuth authentication for MCP server '${serverName}'...`,
},
Date.now(),
);
// Import dynamically to avoid circular dependencies
const { MCPOAuthProvider } = await import('@qwen-code/qwen-code-core');
let oauthConfig = server.oauth;
if (!oauthConfig) {
oauthConfig = { enabled: false };
}
// Pass the MCP server URL for OAuth discovery
const mcpServerUrl = server.httpUrl || server.url;
await MCPOAuthProvider.authenticate(
serverName,
oauthConfig,
mcpServerUrl,
);
context.ui.addItem(
{
type: 'info',
text: `✅ Successfully authenticated with MCP server '${serverName}'!`,
},
Date.now(),
);
// Trigger tool re-discovery to pick up authenticated server
const toolRegistry = await config.getToolRegistry();
if (toolRegistry) {
context.ui.addItem(
{
type: 'info',
text: `Re-discovering tools from '${serverName}'...`,
},
Date.now(),
);
await toolRegistry.discoverToolsForServer(serverName);
}
// Update the client with the new tools
const geminiClient = config.getGeminiClient();
if (geminiClient) {
await geminiClient.setTools();
}
return {
type: 'message',
messageType: 'info',
content: `Successfully authenticated and refreshed tools for '${serverName}'.`,
};
} catch (error) {
return {
type: 'message',
messageType: 'error',
content: `Failed to authenticate with MCP server '${serverName}': ${getErrorMessage(error)}`,
};
}
},
completion: async (context: CommandContext, partialArg: string) => {
const { config } = context.services;
if (!config) return [];
const mcpServers = config.getMcpServers() || {};
return Object.keys(mcpServers).filter((name) =>
name.startsWith(partialArg),
);
},
};
const listCommand: SlashCommand = {
name: 'list',
description: 'List configured MCP servers and tools',
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext, args: string) => {
const lowerCaseArgs = args.toLowerCase().split(/\s+/).filter(Boolean);
const hasDesc =
lowerCaseArgs.includes('desc') || lowerCaseArgs.includes('descriptions');
const hasNodesc =
lowerCaseArgs.includes('nodesc') ||
lowerCaseArgs.includes('nodescriptions');
const showSchema = lowerCaseArgs.includes('schema');
// Show descriptions if `desc` or `schema` is present,
// but `nodesc` takes precedence and disables them.
const showDescriptions = !hasNodesc && (hasDesc || showSchema);
// Show tips only when no arguments are provided
const showTips = lowerCaseArgs.length === 0;
return getMcpStatus(context, showDescriptions, showSchema, showTips);
},
};
const refreshCommand: SlashCommand = {
name: 'refresh',
description: 'Refresh the list of MCP servers and tools',
kind: CommandKind.BUILT_IN,
action: async (
context: CommandContext,
): Promise<SlashCommandActionReturn> => {
const { config } = context.services;
if (!config) {
return {
type: 'message',
messageType: 'error',
content: 'Config not loaded.',
};
}
const toolRegistry = await config.getToolRegistry();
if (!toolRegistry) {
return {
type: 'message',
messageType: 'error',
content: 'Could not retrieve tool registry.',
};
}
context.ui.addItem(
{
type: 'info',
text: 'Refreshing MCP servers and tools...',
},
Date.now(),
);
await toolRegistry.discoverMcpTools();
// Update the client with the new tools
const geminiClient = config.getGeminiClient();
if (geminiClient) {
await geminiClient.setTools();
}
return getMcpStatus(context, false, false, false);
},
};
export const mcpCommand: SlashCommand = {
name: 'mcp',
description:
'list configured MCP servers and tools, or authenticate with OAuth-enabled servers',
kind: CommandKind.BUILT_IN,
subCommands: [listCommand, authCommand, refreshCommand],
// Default action when no subcommand is provided
action: async (context: CommandContext, args: string) =>
// If no subcommand, run the list command
listCommand.action!(context, args),
};

Some files were not shown because too many files have changed in this diff Show More