Merge tag 'v0.3.0' into chore/sync-gemini-cli-v0.3.0

This commit is contained in:
mingholy.lmh
2025-09-10 21:01:40 +08:00
583 changed files with 30160 additions and 10770 deletions

View File

@@ -0,0 +1 @@
action: 'log'

View File

@@ -26,15 +26,11 @@ steps:
- |-
SHELL_TAG_NAME="$TAG_NAME"
FINAL_TAG="$SHORT_SHA" # Default to SHA
if [[ "$$SHELL_TAG_NAME" == *"-nightly"* ]]; then
echo "Nightly release detected."
FINAL_TAG="$${SHELL_TAG_NAME#v}"
# Also escape the variable in the regex match
elif [[ "$$SHELL_TAG_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Official release detected."
if [[ "$$SHELL_TAG_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
echo "Release detected."
FINAL_TAG="$${SHELL_TAG_NAME#v}"
else
echo "Development/RC release detected. Using commit SHA as tag."
echo "Development release detected. Using commit SHA as tag."
fi
echo "Determined image tag: $$FINAL_TAG"
echo "$$FINAL_TAG" > /workspace/image_tag.txt

View File

@@ -32,6 +32,9 @@ body:
description: 'Please paste the full text from the `/about` command run from Qwen Code. Also include which platform (macOS, Windows, Linux).'
value: |
<details>
<summary>Client Information</summary>
Run `gemini` to enter the interactive CLI, then run the `/about` command.
```console
$ qwen /about

35
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
# See https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: 'npm'
directory: '/'
schedule:
interval: 'daily'
target-branch: 'main'
commit-message:
prefix: 'chore(deps)'
include: 'scope'
reviewers:
- 'google-gemini/gemini-cli-askmode-approvers'
groups:
# Group all non-major updates together.
# This is to reduce the number of PRs that need to be reviewed.
# Major updates will still be created as separate PRs.
npm-minor-patch:
applies-to: 'version-updates'
update-types:
- 'minor'
- 'patch'
open-pull-requests-limit: 0
- package-ecosystem: 'github-actions'
directory: '/'
schedule:
interval: 'daily'
target-branch: 'main'
commit-message:
prefix: 'chore(deps)'
include: 'scope'
reviewers:
- 'google-gemini/gemini-cli-askmode-approvers'
open-pull-requests-limit: 0

View File

@@ -30,6 +30,10 @@ jobs:
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
permission-pull-requests: 'read'
permission-discussions: 'read'
permission-contents: 'read'
- name: 'Generate Report 📜'
id: 'report'
@@ -164,7 +168,7 @@ jobs:
- name: '🤖 Get Insights from Report'
if: |-
${{ steps.report.outputs.report_body != '' }}
uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}'
REPOSITORY: '${{ github.repository }}'

29
.github/workflows/eval.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: 'Eval'
on:
workflow_dispatch:
jobs:
eval:
name: 'Eval'
runs-on: 'ubuntu-latest'
strategy:
matrix:
node-version:
- '20.x'
- '22.x'
- '24.x'
steps:
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version: '${{ matrix.node-version }}'
cache: 'npm'
- name: 'Set up Python'
uses: 'actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065' # ratchet:actions/setup-python@v5
with:
python-version: '3.11'
- name: 'Install and configure Poetry'
uses: 'snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a' # ratchet:snok/install-poetry@v1

View File

@@ -0,0 +1,262 @@
name: '🏷️ Gemini Automated Issue Deduplication'
on:
issues:
types:
- 'opened'
- 'reopened'
issue_comment:
types:
- 'created'
workflow_dispatch:
inputs:
issue_number:
description: 'issue number to dedup'
required: true
type: 'number'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
find-duplicates:
if: |-
github.repository == 'google-gemini/gemini-cli' &&
vars.TRIAGE_DEDUPLICATE_ISSUES != '' &&
(github.event_name == 'issues' ||
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@gemini-cli /deduplicate') &&
(github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR')))
permissions:
contents: 'read'
id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings
issues: 'read'
statuses: 'read'
packages: 'read'
timeout-minutes: 20
runs-on: 'ubuntu-latest'
outputs:
duplicate_issues_csv: '${{ env.DUPLICATE_ISSUES_CSV }}'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
with:
registry: 'ghcr.io'
username: '${{ github.actor }}'
password: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Find Duplicate Issues'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_issue_deduplication'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ISSUE_TITLE: '${{ github.event.issue.title }}'
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"mcpServers": {
"issue_deduplication": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"--network", "host",
"-e", "GITHUB_TOKEN",
"-e", "GEMINI_API_KEY",
"-e", "DATABASE_TYPE",
"-e", "FIRESTORE_DATABASE_ID",
"-e", "GCP_PROJECT",
"-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json",
"-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json",
"ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3"
],
"env": {
"GITHUB_TOKEN": "${GITHUB_TOKEN}",
"GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}",
"DATABASE_TYPE":"firestore",
"GCP_PROJECT": "${FIRESTORE_PROJECT}",
"FIRESTORE_DATABASE_ID": "(default)",
"GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}"
},
"enabled": true,
"timeout": 600000
}
},
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh issue view)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are an issue de-duplication assistant. Your goal is to find
duplicate issues for a given issue.
## Steps
1. **Find Potential Duplicates:**
- The repository is ${{ github.repository }} and the issue number is ${{ github.event.issue.number }}.
- Use the `duplicates` tool with the `repo` and `issue_number` to find potential duplicates for the current issue. Do not use the `threshold` parameter.
- If no duplicates are found, you are done.
- Print the JSON output from the `duplicates` tool to the logs.
2. **Refine Duplicates List (if necessary):**
- If the `duplicates` tool returns between 1 and 14 results, you must refine the list.
- For each potential duplicate issue, run `gh issue view <issue-number> --json title,body,comments` to fetch its content.
- Also fetch the content of the original issue: `gh issue view "${ISSUE_NUMBER}" --json title,body,comments`.
- Carefully analyze the content (title, body, comments) of the original issue and all potential duplicates.
- It is very important if the comments on either issue mention that they are not duplicates of each other, to treat them as not duplicates.
- Based on your analysis, create a final list containing only the issues you are highly confident are actual duplicates.
- If your final list is empty, you are done.
- Print to the logs if you omitted any potential duplicates based on your analysis.
- If the `duplicates` tool returned 15+ results, use the top 15 matches (based on descending similarity score value) to perform this step.
3. **Output final duplicates list as CSV:**
- Convert the list of appropriate duplicate issue numbers into a comma-separated list (CSV). If there are no appropriate duplicates, use the empty string.
- Use the "echo" shell command to append the CSV of issue numbers into the filepath referenced by the environment variable "${GITHUB_ENV}":
echo "DUPLICATE_ISSUES_CSV=[DUPLICATE_ISSUES_AS_CSV]" >> "${GITHUB_ENV}"
## Guidelines
- Only use the `duplicates` and `run_shell_command` tools.
- The `run_shell_command` tool can be used with `gh issue view`.
- Do not download or read media files like images, videos, or links. The `--json` flag for `gh issue view` will prevent this.
- Do not modify the issue content or status.
- Do not add comments or labels.
- Reference all shell variables as "${VAR}" (with quotes and braces).
add-comment-and-label:
needs: 'find-duplicates'
if: |-
github.repository == 'google-gemini/gemini-cli' &&
vars.TRIAGE_DEDUPLICATE_ISSUES != '' &&
needs.find-duplicates.outputs.duplicate_issues_csv != '' &&
(
github.event_name == 'issues' ||
github.event_name == 'workflow_dispatch' ||
(
github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@gemini-cli /deduplicate') &&
(
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR'
)
)
)
permissions:
issues: 'write'
timeout-minutes: 5
runs-on: 'ubuntu-latest'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
- name: 'Comment and Label Duplicate Issue'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
env:
DUPLICATES_OUTPUT: '${{ needs.find-duplicates.outputs.duplicate_issues_csv }}'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
const rawCsv = process.env.DUPLICATES_OUTPUT;
core.info(`Raw duplicates CSV: ${rawCsv}`);
const duplicateIssues = rawCsv.split(',').map(s => s.trim()).filter(s => s);
if (duplicateIssues.length === 0) {
core.info('No duplicate issues found. Nothing to do.');
return;
}
const issueNumber = ${{ github.event.issue.number }};
function formatCommentBody(issues, updated = false) {
const header = updated
? 'Found possible duplicate issues (updated):'
: 'Found possible duplicate issues:';
const issuesList = issues.map(num => `- #${num}`).join('\n');
const footer = 'If you believe this is not a duplicate, please remove the `status/possible-duplicate` label.';
const magicComment = '<!-- gemini-cli-deduplication -->';
return `${header}\n\n${issuesList}\n\n${footer}\n${magicComment}`;
}
const newCommentBody = formatCommentBody(duplicateIssues);
const newUpdatedCommentBody = formatCommentBody(duplicateIssues, true);
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
});
const magicComment = '<!-- gemini-cli-deduplication -->';
const existingComment = comments.find(comment =>
comment.user.type === 'Bot' && comment.body.includes(magicComment)
);
let commentMade = false;
if (existingComment) {
// To check if lists are same, just compare the formatted bodies without headers.
const existingBodyForCompare = existingComment.body.substring(existingComment.body.indexOf('- #'));
const newBodyForCompare = newCommentBody.substring(newCommentBody.indexOf('- #'));
if (existingBodyForCompare.trim() !== newBodyForCompare.trim()) {
core.info(`Updating existing comment ${existingComment.id}`);
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id,
body: newUpdatedCommentBody,
});
commentMade = true;
} else {
core.info('Existing comment is up-to-date. Nothing to do.');
}
} else {
core.info('Creating new comment.');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: newCommentBody,
});
commentMade = true;
}
if (commentMade) {
core.info('Adding "status/possible-duplicate" label.');
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
labels: ['status/possible-duplicate'],
});
}

View File

@@ -46,6 +46,7 @@ jobs:
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
AVAILABLE_LABELS: '${{ steps.get_labels.outputs.available_labels }}'
with:
OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}'
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
@@ -61,8 +62,10 @@ jobs:
prompt: |-
## Role
You are an issue triage assistant. Analyze the current GitHub issue and apply the most appropriate existing labels. Use the available
tools to gather information; do not ask for information to be provided. Do not remove labels titled help wanted or good first issue.
You are an issue triage assistant. Analyze the current GitHub issue
and identify the most appropriate existing labels. Use the available
tools to gather information; do not ask for information to be
provided. Do not remove labels titled help wanted or good first issue.
## Steps
@@ -77,13 +80,17 @@ jobs:
## Guidelines
- Only use labels that already exist in the repository.
- Do not add comments or modify the issue content.
- Triage only the current issue.
- Apply only one area/ label.
- Apply only one kind/ label.
- Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
- Only use labels that already exist in the repository
- Do not add comments or modify the issue content
- Triage only the current issue
- Identify only one area/ label
- Identify only one kind/ label
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario
- Reference all shell variables as "${VAR}" (with quotes and braces)
- Output only valid JSON format
- Do not include any explanation or additional text, just the JSON
Categorization Guidelines:
P0: Critical / Blocker
- A P0 bug is a catastrophic failure that demands immediate attention. It represents a complete showstopper for a significant portion of users or for the development process itself.

View File

@@ -0,0 +1,116 @@
name: '📋 Gemini Scheduled Issue Deduplication'
on:
schedule:
- cron: '0 * * * *' # Runs every hour
workflow_dispatch:
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
refresh-embeddings:
if: |-
${{ vars.TRIAGE_DEDUPLICATE_ISSUES != '' && github.repository == 'google-gemini/gemini-cli' }}
permissions:
contents: 'read'
id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings
issues: 'read'
statuses: 'read'
packages: 'read'
timeout-minutes: 20
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
with:
registry: 'ghcr.io'
username: '${{ github.actor }}'
password: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Run Gemini Issue Deduplication Refresh'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_refresh_embeddings'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ISSUE_TITLE: '${{ github.event.issue.title }}'
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"mcpServers": {
"issue_deduplication": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"--network", "host",
"-e", "GITHUB_TOKEN",
"-e", "GEMINI_API_KEY",
"-e", "DATABASE_TYPE",
"-e", "FIRESTORE_DATABASE_ID",
"-e", "GCP_PROJECT",
"-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json",
"-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json",
"ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3"
],
"env": {
"GITHUB_TOKEN": "${GITHUB_TOKEN}",
"GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}",
"DATABASE_TYPE":"firestore",
"GCP_PROJECT": "${FIRESTORE_PROJECT}",
"FIRESTORE_DATABASE_ID": "(default)",
"GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}"
},
"enabled": true,
"timeout": 600000
}
},
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are a database maintenance assistant for a GitHub issue deduplication system.
## Goal
Your sole responsibility is to refresh the embeddings for all open issues in the repository to ensure the deduplication database is up-to-date.
## Steps
1. **Extract Repository Information:** The repository is ${{ github.repository }}.
2. **Refresh Embeddings:** Call the `refresh` tool with the correct `repo`. Do not use the `force` parameter.
3. **Log Output:** Print the JSON output from the `refresh` tool to the logs.
## Guidelines
- Only use the `refresh` tool.
- Do not attempt to find duplicates or modify any issues.
- Your only task is to call the `refresh` tool and log its output.

View File

@@ -14,11 +14,8 @@ defaults:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
packages: 'read'
jobs:
triage-issues:
@@ -62,6 +59,7 @@ jobs:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ISSUES_TO_TRIAGE: '${{ steps.find_issues.outputs.issues_to_triage }}'
REPOSITORY: '${{ github.repository }}'
AVAILABLE_LABELS: '${{ steps.get_labels.outputs.available_labels }}'
with:
OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}'
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
@@ -70,18 +68,14 @@ jobs:
{
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh label list)",
"run_shell_command(gh issue edit)",
"run_shell_command(gh issue view)",
"run_shell_command(gh issue list)"
"run_shell_command(echo)"
],
"sandbox": false
}
prompt: |-
## Role
You are an issue triage assistant. Analyze issues and apply
You are an issue triage assistant. Analyze issues and identify
appropriate labels. Use the available tools to gather information;
do not ask for information to be provided.
@@ -114,13 +108,15 @@ jobs:
## Guidelines
- Output only valid JSON format
- Do not include any explanation or additional text, just the JSON
- Only use labels that already exist in the repository.
- Do not add comments or modify the issue content.
- Do not remove labels titled help wanted or good first issue.
- Triage only the current issue.
- Apply only one area/ label
- Apply only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Identify only one area/ label
- Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
Categorization Guidelines:
P0: Critical / Blocker

View File

@@ -0,0 +1,98 @@
name: 'Assign Issue on Comment'
on:
issue_comment:
types:
- 'created'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
packages: 'read'
jobs:
self-assign-issue:
if: |-
github.repository == 'google-gemini/gemini-cli' &&
github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '/assign')
runs-on: 'ubuntu-latest'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b'
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
# Add 'assignments' write permission
permission-issues: 'write'
- name: 'Assign issue to user'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |
const issueNumber = context.issue.number;
const commenter = context.actor;
const owner = context.repo.owner;
const repo = context.repo.repo;
const MAX_ISSUES_ASSIGNED = 3;
// Search for open issues already assigned to the commenter in this repo
const { data: assignedIssues } = await github.rest.search.issuesAndPullRequests({
q: `is:issue repo:${owner}/${repo} assignee:${commenter} is:open`
});
if (assignedIssues.total_count >= MAX_ISSUES_ASSIGNED) {
await github.rest.issues.createComment({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: `👋 @${commenter}! You currently have ${assignedIssues.total_count} issues assigned to you. We have a ${MAX_ISSUES_ASSIGNED} max issues assigned at once policy. Once you close out an existing issue it will open up space to take another. You can also unassign yourself from an existing issue but please work on a hand-off if someone is expecting work on that issue.`
});
return; // exit
}
// Check if the issue is already assigned
const issue = await github.rest.issues.get({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
});
if (issue.data.assignees.length > 0) {
// Comment that it's already assigned
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: `@${commenter} Thanks for taking interest but this issue is already assigned. We'd still love to have you contribute. Check out our [Help Wanted](https://github.com/google-gemini/gemini-cli/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22) list for issues where we need some extra attention.`
});
return;
}
// If not taken, assign the user who commented
await github.rest.issues.addAssignees({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
assignees: [commenter]
});
// Post a comment to confirm assignment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: `👋 @${commenter}, you've been assigned to this issue! Thank you for taking the time to contribute. Make sure to check out our [contributing guidelines](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md).`
});

View File

@@ -4,6 +4,8 @@ on:
schedule:
# Runs every day at midnight UTC for the nightly release.
- cron: '0 0 * * *'
# Runs every Tuesday at 23:59 UTC for the preview release.
- cron: '59 23 * * 2'
workflow_dispatch:
inputs:
version:
@@ -25,6 +27,11 @@ on:
required: false
type: 'boolean'
default: false
create_preview_release:
description: 'Auto apply the preview release tag, input version is ignored.'
required: false
type: 'boolean'
default: false
force_skip_tests:
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
required: false
@@ -51,22 +58,30 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
ref: '${{ github.sha }}'
ref: '${{ github.event.inputs.ref || github.sha }}'
fetch-depth: 0
- name: 'Set booleans for simplified logic'
env:
CREATE_NIGHTLY_RELEASE: '${{ github.event.inputs.create_nightly_release }}'
CREATE_PREVIEW_RELEASE: '${{ github.event.inputs.create_preview_release }}'
EVENT_NAME: '${{ github.event_name }}'
CRON: '${{ github.event.schedule }}'
DRY_RUN_INPUT: '${{ github.event.inputs.dry_run }}'
id: 'vars'
run: |-
is_nightly="false"
if [[ "${EVENT_NAME}" == "schedule" || "${CREATE_NIGHTLY_RELEASE}" == "true" ]]; then
if [[ "${CRON}" == "0 0 * * *" || "${CREATE_NIGHTLY_RELEASE}" == "true" ]]; then
is_nightly="true"
fi
echo "is_nightly=${is_nightly}" >> "${GITHUB_OUTPUT}"
is_preview="false"
if [[ "${CRON}" == "59 23 * * 2" || "${CREATE_PREVIEW_RELEASE}" == "true" ]]; then
is_preview="true"
fi
echo "is_preview=${is_preview}" >> "${GITHUB_OUTPUT}"
is_dry_run="false"
if [[ "${DRY_RUN_INPUT}" == "true" ]]; then
is_dry_run="true"
@@ -96,7 +111,9 @@ jobs:
PREVIOUS_TAG=$(node scripts/get-previous-tag.js "$CURRENT_TAG" || echo "")
echo "PREVIOUS_TAG=${PREVIOUS_TAG}" >> "$GITHUB_OUTPUT"
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
MANUAL_VERSION: '${{ inputs.version }}'
- name: 'Run Tests'

20
.prettierignore Normal file
View File

@@ -0,0 +1,20 @@
**/bundle
**/coverage
**/dist
**/.git
**/node_modules
.docker
.DS_Store
.env
.gemini/
.idea
.integration-tests/
*.iml
*.tsbuildinfo
*.vsix
bower_components
eslint.config.js
**/generated
gha-creds-*.json
junit.xml
Thumbs.db

View File

@@ -24,12 +24,12 @@ Our development is guided by the following principles:
## How the Roadmap Works
Our roadmap is managed directly through Github Issues. See our entry point Roadmap Issue [here](https://github.com/google-gemini/gemini-cli/issues/4191). This approach allows for transparency and gives you a direct way to learn more or get involved with any specific initiative. All our roadmap items will be tagged as Type:`Feature` and Label:`maintainer` for features we are actively working on, or Type:`Task` and Label:`maintainer` for a more detailed list of tasks.
Our roadmap is managed directly through GitHub Issues. See our entry point Roadmap Issue [here](https://github.com/google-gemini/gemini-cli/issues/4191). This approach allows for transparency and gives you a direct way to learn more or get involved with any specific initiative. All our roadmap items will be tagged as Type:`Feature` and Label:`maintainer` for features we are actively working on, or Type:`Task` and Label:`maintainer` for a more detailed list of tasks.
Issues are organized to provide key information at a glance:
- **Target Quarter:** `Milestone` denotes the anticipated delivery timeline.
- **Feature Area:** Labels such as `area/model` or `area/tooling` categorizes the work.
- **Feature Area:** Labels such as `area/model` or `area/tooling` categorize the work.
- **Issue Type:** _Workstream_ => _Epics_ => _Features_ => _Tasks|Bugs_
To see what we're working on, you can filter our issues by these dimensions. See all our items [here](https://github.com/orgs/google-gemini/projects/11/views/19)
@@ -39,7 +39,7 @@ To see what we're working on, you can filter our issues by these dimensions. See
To better organize our efforts, we categorize our work into several key feature areas. These labels are used on our GitHub Issues to help you filter and
find initiatives that interest you.
- **Authentication:** Secure user access via API keys, Gemini Code Assist login etc.
- **Authentication:** Secure user access via API keys, Gemini Code Assist login, etc.
- **Model:** Support new Gemini models, multi-modality, local execution, and performance tuning.
- **User Experience:** Improve the CLI's usability, performance, interactive features, and documentation.
- **Tooling:** Built-in tools and the MCP ecosystem.

Binary file not shown.

After

Width:  |  Height:  |  Size: 381 KiB

View File

@@ -18,8 +18,8 @@ Slash commands provide meta-level control over the CLI itself.
- **Description:** Saves the current conversation history. You must add a `<tag>` for identifying the conversation state.
- **Usage:** `/chat save <tag>`
- **Details on Checkpoint Location:** The default locations for saved chat checkpoints are:
- Linux/macOS: `~/.config/google-generative-ai/checkpoints/`
- Windows: `C:\Users\<YourUsername>\AppData\Roaming\google-generative-ai\checkpoints\`
- Linux/macOS: `~/.qwen/tmp/<project_hash>/`
- Windows: `C:\Users\<YourUsername>\.qwen\tmp\<project_hash>\`
- When you run `/chat list`, the CLI only scans these specific directories to find available checkpoints.
- **Note:** These checkpoints are for manually saving and resuming conversation states. For automatic checkpoints created before file modifications, see the [Checkpointing documentation](../checkpointing.md).
- **`resume`**
@@ -118,6 +118,7 @@ Slash commands provide meta-level control over the CLI itself.
- [**`/tools`**](../tools/index.md)
- **Description:** Display a list of tools that are currently available within Qwen Code.
- **Usage:** `/tools [desc]`
- **Sub-commands:**
- **`desc`** or **`descriptions`**:
- **Description:** Show detailed descriptions of each tool, including each tool's name with its full description as provided to the model.
@@ -278,7 +279,7 @@ When a custom command attempts to execute a shell command, Qwen Code will now pr
1. **Inject Commands:** Use the `!{...}` syntax.
2. **Argument Substitution:** If `{{args}}` is present inside the block, it is automatically shell-escaped (see [Context-Aware Injection](#1-context-aware-injection-with-args) above).
3. **Robust Parsing:** The parser correctly handles complex shell commands that include nested braces, such as JSON payloads.
3. **Robust Parsing:** The parser correctly handles complex shell commands that include nested braces, such as JSON payloads. **Note:** The content inside `!{...}` must have balanced braces (`{` and `}`). If you need to execute a command containing unbalanced braces, consider wrapping it in an external script file and calling the script within the `!{...}` block.
4. **Security Check and Confirmation:** The CLI performs a security check on the final, resolved command (after arguments are escaped and substituted). A dialog will appear showing the exact command(s) to be executed.
5. **Execution and Error Reporting:** The command is executed. If the command fails, the output injected into the prompt will include the error messages (stderr) followed by a status line, e.g., `[Shell command exited with code 1]`. This helps the model understand the context of the failure.
@@ -306,6 +307,41 @@ Please generate a Conventional Commit message based on the following git diff:
When you run `/git:commit`, the CLI first executes `git diff --staged`, then replaces `!{git diff --staged}` with the output of that command before sending the final, complete prompt to the model.
##### 4. Injecting File Content with `@{...}`
You can directly embed the content of a file or a directory listing into your prompt using the `@{...}` syntax. This is useful for creating commands that operate on specific files.
**How It Works:**
- **File Injection**: `@{path/to/file.txt}` is replaced by the content of `file.txt`.
- **Multimodal Support**: If the path points to a supported image (e.g., PNG, JPEG), PDF, audio, or video file, it will be correctly encoded and injected as multimodal input. Other binary files are handled gracefully and skipped.
- **Directory Listing**: `@{path/to/dir}` is traversed and each file present within the directory and all subdirectories are inserted into the prompt. This respects `.gitignore` and `.geminiignore` if enabled.
- **Workspace-Aware**: The command searches for the path in the current directory and any other workspace directories. Absolute paths are allowed if they are within the workspace.
- **Processing Order**: File content injection with `@{...}` is processed _before_ shell commands (`!{...}`) and argument substitution (`{{args}}`).
- **Parsing**: The parser requires the content inside `@{...}` (the path) to have balanced braces (`{` and `}`).
**Example (`review.toml`):**
This command injects the content of a _fixed_ best practices file (`docs/best-practices.md`) and uses the user's arguments to provide context for the review.
```toml
# In: <project>/.qwen/commands/review.toml
# Invoked via: /review FileCommandLoader.ts
description = "Reviews the provided context using a best practice guide."
prompt = """
You are an expert code reviewer.
Your task is to review {{args}}.
Use the following best practices when providing your review:
@{docs/best-practices.md}
"""
```
When you run `/review FileCommandLoader.ts`, the `@{docs/best-practices.md}` placeholder is replaced by the content of that file, and `{{args}}` is replaced by the text you provided, before the final prompt is sent to the model.
---
#### Example: A "Pure Function" Refactoring Command

View File

@@ -7,16 +7,20 @@ Qwen Code offers several ways to configure its behavior, including environment v
Configuration is applied in the following order of precedence (lower numbers are overridden by higher numbers):
1. **Default values:** Hardcoded defaults within the application.
2. **User settings file:** Global settings for the current user.
3. **Project settings file:** Project-specific settings.
4. **System settings file:** System-wide settings.
5. **Environment variables:** System-wide or session-specific variables, potentially loaded from `.env` files.
6. **Command-line arguments:** Values passed when launching the CLI.
2. **System defaults file:** System-wide default settings that can be overridden by other settings files.
3. **User settings file:** Global settings for the current user.
4. **Project settings file:** Project-specific settings.
5. **System settings file:** System-wide settings that override all other settings files.
6. **Environment variables:** System-wide or session-specific variables, potentially loaded from `.env` files.
7. **Command-line arguments:** Values passed when launching the CLI.
## Settings files
Qwen Code uses `settings.json` files for persistent configuration. There are three locations for these files:
Qwen Code uses JSON settings files for persistent configuration. There are four locations for these files:
- **System defaults file:**
- **Location:** `/etc/qwen-code/system-defaults.json` (Linux), `C:\ProgramData\qwen-code\system-defaults.json` (Windows) or `/Library/Application Support/QwenCode/system-defaults.json` (macOS). The path can be overridden using the `QWEN_CODE_SYSTEM_DEFAULTS_PATH` environment variable.
- **Scope:** Provides a base layer of system-wide default settings. These settings have the lowest precedence and are intended to be overridden by user, project, or system override settings.
- **User settings file:**
- **Location:** `~/.qwen/settings.json` (where `~` is your home directory).
- **Scope:** Applies to all Qwen Code sessions for the current user.
@@ -24,7 +28,7 @@ Qwen Code uses `settings.json` files for persistent configuration. There are thr
- **Location:** `.qwen/settings.json` within your project's root directory.
- **Scope:** Applies only when running Qwen Code from that specific project. Project settings override user settings.
- **System settings file:**
- **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable.
- **Location:** `/etc/qwen-code/settings.json` (Linux), `C:\ProgramData\qwen-code\settings.json` (Windows) or `/Library/Application Support/QwenCode/settings.json` (macOS). The path can be overridden using the `QWEN_CODE_SYSTEM_SETTINGS_PATH` environment variable.
- **Scope:** Applies to all Qwen Code sessions on the system, for all users. System settings override user and project settings. May be useful for system administrators at enterprises to have controls over users' Qwen Code setups.
**Note on environment variables in settings:** String values within your `settings.json` files can reference environment variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will be automatically resolved when the settings are loaded. For example, if you have an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like this: `"apiKey": "$MY_API_TOKEN"`.
@@ -60,19 +64,36 @@ In addition to a project settings file, a project's `.qwen` directory can contai
- **Properties:**
- **`respectGitIgnore`** (boolean): Whether to respect .gitignore patterns when discovering files. When set to `true`, git-ignored files (like `node_modules/`, `dist/`, `.env`) are automatically excluded from @ commands and file listing operations.
- **`enableRecursiveFileSearch`** (boolean): Whether to enable searching recursively for filenames under the current tree when completing @ prefixes in the prompt.
- **`disableFuzzySearch`** (boolean): When `true`, disables the fuzzy search capabilities when searching for files, which can improve performance on projects with a large number of files.
- **Example:**
```json
"fileFiltering": {
"respectGitIgnore": true,
"enableRecursiveFileSearch": false
"enableRecursiveFileSearch": false,
"disableFuzzySearch": true
}
```
### Troubleshooting File Search Performance
If you are experiencing performance issues with file searching (e.g., with `@` completions), especially in projects with a very large number of files, here are a few things you can try in order of recommendation:
1. **Use `.geminiignore`:** Create a `.geminiignore` file in your project root to exclude directories that contain a large number of files that you don't need to reference (e.g., build artifacts, logs, `node_modules`). Reducing the total number of files crawled is the most effective way to improve performance.
2. **Disable Fuzzy Search:** If ignoring files is not enough, you can disable fuzzy search by setting `disableFuzzySearch` to `true` in your `settings.json` file. This will use a simpler, non-fuzzy matching algorithm, which can be faster.
3. **Disable Recursive File Search:** As a last resort, you can disable recursive file search entirely by setting `enableRecursiveFileSearch` to `false`. This will be the fastest option as it avoids a recursive crawl of your project. However, it means you will need to type the full path to files when using `@` completions.
- **`coreTools`** (array of strings):
- **Description:** Allows you to specify a list of core tool names that should be made available to the model. This can be used to restrict the set of built-in tools. See [Built-in Tools](../core/tools-api.md#built-in-tools) for a list of core tools. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"coreTools": ["ShellTool(ls -l)"]` will only allow the `ls -l` command to be executed.
- **Default:** All tools available for use by the model.
- **Example:** `"coreTools": ["ReadFileTool", "GlobTool", "ShellTool(ls)"]`.
- **`allowedTools`** (array of strings):
- **Default:** `undefined`
- **Description:** A list of tool names that will bypass the confirmation dialog. This is useful for tools that you trust and use frequently. The match semantics are the same as `coreTools`.
- **Example:** `"allowedTools": ["ShellTool(git status)"]`.
- **`excludeTools`** (array of strings):
- **Description:** Allows you to specify a list of core tool names that should be excluded from the model. A tool listed in both `excludeTools` and `coreTools` is excluded. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"excludeTools": ["ShellTool(rm -rf)"]` will block the `rm -rf` command.
- **Default**: No tools excluded.
@@ -267,7 +288,7 @@ In addition to a project settings file, a project's `.qwen` directory can contai
```
- **`includeDirectories`** (array of strings):
- **Description:** Specifies an array of additional absolute or relative paths to include in the workspace context. This allows you to work with files across multiple directories as if they were one. Paths can use `~` to refer to the user's home directory. This setting can be combined with the `--include-directories` command-line flag.
- **Description:** Specifies an array of additional absolute or relative paths to include in the workspace context. Missing directories will be skipped with a warning by default. Paths can use `~` to refer to the user's home directory. This setting can be combined with the `--include-directories` command-line flag.
- **Default:** `[]`
- **Example:**
```json
@@ -310,6 +331,20 @@ In addition to a project settings file, a project's `.qwen` directory can contai
"showLineNumbers": false
```
- **`accessibility`** (object):
- **Description:** Configures accessibility features for the CLI.
- **Properties:**
- **`screenReader`** (boolean): Enables screen reader mode, which adjusts the TUI for better compatibility with screen readers. This can also be enabled with the `--screen-reader` command-line flag, which will take precedence over the setting.
- **`disableLoadingPhrases`** (boolean): Disables the display of loading phrases during operations.
- **Default:** `{"screenReader": false, "disableLoadingPhrases": false}`
- **Example:**
```json
"accessibility": {
"screenReader": true,
"disableLoadingPhrases": true
}
```
### Example `settings.json`:
```json
@@ -427,8 +462,8 @@ The CLI automatically loads environment variables from an `.env` file. The loadi
Arguments passed directly when running the CLI can override other configurations for that specific session.
- **`--model <model_name>`** (**`-m <model_name>`**):
- Specifies the Gemini model to use for this session.
- Example: `npm start -- --model gemini-1.5-pro-latest`
- Specifies the model to use for this session.
- Example: `npm start -- --model qwen3-coder-plus`
- **`--prompt <your_prompt>`** (**`-p <your_prompt>`**):
- Used to pass a prompt directly to the command. This invokes Qwen Code in a non-interactive mode.
- **`--prompt-interactive <your_prompt>`** (**`-i <your_prompt>`**):
@@ -457,6 +492,9 @@ Arguments passed directly when running the CLI can override other configurations
- `yolo`: Automatically approve all tool calls (equivalent to `--yolo`)
- Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach.
- Example: `qwen --approval-mode auto_edit`
- **`--allowed-tools <tool1,tool2,...>`**:
- A comma-separated list of tool names that will bypass the confirmation dialog.
- Example: `qwen --allowed-tools "ShellTool(git status)"`
- **`--telemetry`**:
- Enables [telemetry](../telemetry.md).
- **`--telemetry-target`**:
@@ -483,6 +521,8 @@ Arguments passed directly when running the CLI can override other configurations
- Can be specified multiple times or as comma-separated values.
- 5 directories can be added at maximum.
- Example: `--include-directories /path/to/project1,/path/to/project2` or `--include-directories /path/to/project1 --include-directories /path/to/project2`
- **`--screen-reader`**:
- Enables screen reader mode for accessibility.
- **`--version`**:
- Displays the version of the CLI.
- **`--openai-logging`**:
@@ -495,7 +535,7 @@ Arguments passed directly when running the CLI can override other configurations
While not strictly configuration for the CLI's _behavior_, context files (defaulting to `QWEN.md` but configurable via the `contextFileName` setting) are crucial for configuring the _instructional context_ (also referred to as "memory"). This powerful feature allows you to give project-specific instructions, coding style guides, or any relevant background information to the AI, making its responses more tailored and accurate to your needs. The CLI includes UI elements, such as an indicator in the footer showing the number of loaded context files, to keep you informed about the active context.
- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the Gemini model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically.
- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically.
### Example Context File Content (e.g., `QWEN.md`)

336
docs/cli/enterprise.md Normal file
View File

@@ -0,0 +1,336 @@
# Gemini CLI for the Enterprise
This document outlines configuration patterns and best practices for deploying and managing Gemini CLI in an enterprise environment. By leveraging system-level settings, administrators can enforce security policies, manage tool access, and ensure a consistent experience for all users.
> **A Note on Security:** The patterns described in this document are intended to help administrators create a more controlled and secure environment for using Gemini CLI. However, they should not be considered a foolproof security boundary. A determined user with sufficient privileges on their local machine may still be able to circumvent these configurations. These measures are designed to prevent accidental misuse and enforce corporate policy in a managed environment, not to defend against a malicious actor with local administrative rights.
## Centralized Configuration: The System Settings File
The most powerful tools for enterprise administration are the system-wide settings files. These files allow you to define a baseline configuration (`system-defaults.json`) and a set of overrides (`settings.json`) that apply to all users on a machine. For a complete overview of configuration options, see the [Configuration documentation](./configuration.md).
Settings are merged from four files. The precedence order for single-value settings (like `theme`) is:
1. System Defaults (`system-defaults.json`)
2. User Settings (`~/.gemini/settings.json`)
3. Workspace Settings (`<project>/.gemini/settings.json`)
4. System Overrides (`settings.json`)
This means the System Overrides file has the final say. For settings that are arrays (`includeDirectories`) or objects (`mcpServers`), the values are merged.
**Example of Merging and Precedence:**
Here is how settings from different levels are combined.
- **System Defaults `system-defaults.json`:**
```json
{
"theme": "default-corporate-theme",
"includeDirectories": ["/etc/gemini-cli/common-context"]
}
```
- **User `settings.json` (`~/.gemini/settings.json`):**
```json
{
"theme": "user-preferred-dark-theme",
"mcpServers": {
"corp-server": {
"command": "/usr/local/bin/corp-server-dev"
},
"user-tool": {
"command": "npm start --prefix ~/tools/my-tool"
}
},
"includeDirectories": ["~/gemini-context"]
}
```
- **Workspace `settings.json` (`<project>/.gemini/settings.json`):**
```json
{
"theme": "project-specific-light-theme",
"mcpServers": {
"project-tool": {
"command": "npm start"
}
},
"includeDirectories": ["./project-context"]
}
```
- **System Overrides `settings.json`:**
```json
{
"theme": "system-enforced-theme",
"mcpServers": {
"corp-server": {
"command": "/usr/local/bin/corp-server-prod"
}
},
"includeDirectories": ["/etc/gemini-cli/global-context"]
}
```
This results in the following merged configuration:
- **Final Merged Configuration:**
```json
{
"theme": "system-enforced-theme",
"mcpServers": {
"corp-server": {
"command": "/usr/local/bin/corp-server-prod"
},
"user-tool": {
"command": "npm start --prefix ~/tools/my-tool"
},
"project-tool": {
"command": "npm start"
}
},
"includeDirectories": [
"/etc/gemini-cli/common-context",
"~/gemini-context",
"./project-context",
"/etc/gemini-cli/global-context"
]
}
```
**Why:**
- **`theme`**: The value from the system overrides (`system-enforced-theme`) is used, as it has the highest precedence.
- **`mcpServers`**: The objects are merged. The `corp-server` definition from the system overrides takes precedence over the user's definition. The unique `user-tool` and `project-tool` are included.
- **`includeDirectories`**: The arrays are concatenated in the order of System Defaults, User, Workspace, and then System Overrides.
- **Location**:
- **Linux**: `/etc/gemini-cli/settings.json`
- **Windows**: `C:\ProgramData\gemini-cli\settings.json`
- **macOS**: `/Library/Application Support/GeminiCli/settings.json`
- The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable.
- **Control**: This file should be managed by system administrators and protected with appropriate file permissions to prevent unauthorized modification by users.
By using the system settings file, you can enforce the security and configuration patterns described below.
## Restricting Tool Access
You can significantly enhance security by controlling which tools the Gemini model can use. This is achieved through the `coreTools` and `excludeTools` settings. For a list of available tools, see the [Tools documentation](../tools/index.md).
### Allowlisting with `coreTools`
The most secure approach is to explicitly add the tools and commands that users are permitted to execute to an allowlist. This prevents the use of any tool not on the approved list.
**Example:** Allow only safe, read-only file operations and listing files.
```json
{
"coreTools": ["ReadFileTool", "GlobTool", "ShellTool(ls)"]
}
```
### Blocklisting with `excludeTools`
Alternatively, you can add specific tools that are considered dangerous in your environment to a blocklist.
**Example:** Prevent the use of the shell tool for removing files.
```json
{
"excludeTools": ["ShellTool(rm -rf)"]
}
```
**Security Note:** Blocklisting with `excludeTools` is less secure than allowlisting with `coreTools`, as it relies on blocking known-bad commands, and clever users may find ways to bypass simple string-based blocks. **Allowlisting is the recommended approach.**
## Managing Custom Tools (MCP Servers)
If your organization uses custom tools via [Model-Context Protocol (MCP) servers](../core/tools-api.md), it is crucial to understand how server configurations are managed to apply security policies effectively.
### How MCP Server Configurations are Merged
Gemini CLI loads `settings.json` files from three levels: System, Workspace, and User. When it comes to the `mcpServers` object, these configurations are **merged**:
1. **Merging:** The lists of servers from all three levels are combined into a single list.
2. **Precedence:** If a server with the **same name** is defined at multiple levels (e.g., a server named `corp-api` exists in both system and user settings), the definition from the highest-precedence level is used. The order of precedence is: **System > Workspace > User**.
This means a user **cannot** override the definition of a server that is already defined in the system-level settings. However, they **can** add new servers with unique names.
### Enforcing a Catalog of Tools
The security of your MCP tool ecosystem depends on a combination of defining the canonical servers and adding their names to an allowlist.
### Restricting Tools Within an MCP Server
For even greater security, especially when dealing with third-party MCP servers, you can restrict which specific tools from a server are exposed to the model. This is done using the `includeTools` and `excludeTools` properties within a server's definition. This allows you to use a subset of tools from a server without allowing potentially dangerous ones.
Following the principle of least privilege, it is highly recommended to use `includeTools` to create an allowlist of only the necessary tools.
**Example:** Only allow the `code-search` and `get-ticket-details` tools from a third-party MCP server, even if the server offers other tools like `delete-ticket`.
```json
{
"allowMCPServers": ["third-party-analyzer"],
"mcpServers": {
"third-party-analyzer": {
"command": "/usr/local/bin/start-3p-analyzer.sh",
"includeTools": ["code-search", "get-ticket-details"]
}
}
}
```
#### More Secure Pattern: Define and Add to Allowlist in System Settings
To create a secure, centrally-managed catalog of tools, the system administrator **must** do both of the following in the system-level `settings.json` file:
1. **Define the full configuration** for every approved server in the `mcpServers` object. This ensures that even if a user defines a server with the same name, the secure system-level definition will take precedence.
2. **Add the names** of those servers to an allowlist using the `allowMCPServers` setting. This is a critical security step that prevents users from running any servers that are not on this list. If this setting is omitted, the CLI will merge and allow any server defined by the user.
**Example System `settings.json`:**
1. Add the _names_ of all approved servers to an allowlist.
This will prevent users from adding their own servers.
2. Provide the canonical _definition_ for each server on the allowlist.
```json
{
"allowMCPServers": ["corp-data-api", "source-code-analyzer"],
"mcpServers": {
"corp-data-api": {
"command": "/usr/local/bin/start-corp-api.sh",
"timeout": 5000
},
"source-code-analyzer": {
"command": "/usr/local/bin/start-analyzer.sh"
}
}
}
```
This pattern is more secure because it uses both definition and an allowlist. Any server a user defines will either be overridden by the system definition (if it has the same name) or blocked because its name is not in the `allowMCPServers` list.
### Less Secure Pattern: Omitting the Allowlist
If the administrator defines the `mcpServers` object but fails to also specify the `allowMCPServers` allowlist, users may add their own servers.
**Example System `settings.json`:**
This configuration defines servers but does not enforce the allowlist.
The administrator has NOT included the "allowMCPServers" setting.
```json
{
"mcpServers": {
"corp-data-api": {
"command": "/usr/local/bin/start-corp-api.sh"
}
}
}
```
In this scenario, a user can add their own server in their local `settings.json`. Because there is no `allowMCPServers` list to filter the merged results, the user's server will be added to the list of available tools and allowed to run.
## Enforcing Sandboxing for Security
To mitigate the risk of potentially harmful operations, you can enforce the use of sandboxing for all tool execution. The sandbox isolates tool execution in a containerized environment.
**Example:** Force all tool execution to happen within a Docker sandbox.
```json
{
"sandbox": "docker"
}
```
You can also specify a custom, hardened Docker image for the sandbox using the `--sandbox-image` command-line argument or by building a custom `sandbox.Dockerfile` as described in the [Sandboxing documentation](./configuration.md#sandboxing).
## Controlling Network Access via Proxy
In corporate environments with strict network policies, you can configure Gemini CLI to route all outbound traffic through a corporate proxy. This can be set via an environment variable, but it can also be enforced for custom tools via the `mcpServers` configuration.
**Example (for an MCP Server):**
```json
{
"mcpServers": {
"proxied-server": {
"command": "node",
"args": ["mcp_server.js"],
"env": {
"HTTP_PROXY": "http://proxy.example.com:8080",
"HTTPS_PROXY": "http://proxy.example.com:8080"
}
}
}
}
```
## Telemetry and Auditing
For auditing and monitoring purposes, you can configure Gemini CLI to send telemetry data to a central location. This allows you to track tool usage and other events. For more information, see the [telemetry documentation](../telemetry.md).
**Example:** Enable telemetry and send it to a local OTLP collector. If `otlpEndpoint` is not specified, it defaults to `http://localhost:4317`.
```json
{
"telemetry": {
"enabled": true,
"target": "gcp",
"logPrompts": false
}
}
```
**Note:** Ensure that `logPrompts` is set to `false` in an enterprise setting to avoid collecting potentially sensitive information from user prompts.
## Putting It All Together: Example System `settings.json`
Here is an example of a system `settings.json` file that combines several of the patterns discussed above to create a secure, controlled environment for Gemini CLI.
```json
{
"sandbox": "docker",
"coreTools": [
"ReadFileTool",
"GlobTool",
"ShellTool(ls)",
"ShellTool(cat)",
"ShellTool(grep)"
],
"mcpServers": {
"corp-tools": {
"command": "/opt/gemini-tools/start.sh",
"timeout": 5000
}
},
"allowMCPServers": ["corp-tools"],
"telemetry": {
"enabled": true,
"target": "gcp",
"otlpEndpoint": "https://telemetry-prod.example.com:4317",
"logPrompts": false
},
"bugCommand": {
"urlTemplate": "https://servicedesk.example.com/new-ticket?title={title}&details={info}"
},
"usageStatisticsEnabled": false
}
```
This configuration:
- Forces all tool execution into a Docker sandbox.
- Strictly uses an allowlist for a small set of safe shell commands and file tools.
- Defines and allows a single corporate MCP server for custom tools.
- Enables telemetry for auditing, without logging prompt content.
- Redirects the `/bug` command to an internal ticketing system.
- Disables general usage statistics collection.

View File

@@ -28,6 +28,8 @@ Qwen Code comes with a selection of pre-defined themes, which you can list using
3. Using the arrow keys, select a theme. Some interfaces might offer a live preview or highlight as you select.
4. Confirm your selection to apply the theme.
**Note:** If a theme is defined in your `settings.json` file (either by name or by a file path), you must remove the `"theme"` setting from the file before you can change the theme using the `/theme` command.
### Theme Persistence
Selected themes are saved in Qwen Code's [configuration](./configuration.md) so your preference is remembered across sessions.
@@ -105,6 +107,46 @@ You can use either hex codes (e.g., `#FF0000`) **or** standard CSS color names (
You can define multiple custom themes by adding more entries to the `customThemes` object.
### Loading Themes from a File
In addition to defining custom themes in `settings.json`, you can also load a theme directly from a JSON file by specifying the file path in your `settings.json`. This is useful for sharing themes or keeping them separate from your main configuration.
To load a theme from a file, set the `theme` property in your `settings.json` to the path of your theme file:
```json
{
"theme": "/path/to/your/theme.json"
}
```
The theme file must be a valid JSON file that follows the same structure as a custom theme defined in `settings.json`.
**Example `my-theme.json`:**
```json
{
"name": "My File Theme",
"type": "custom",
"Background": "#282A36",
"Foreground": "#F8F8F2",
"LightBlue": "#82AAFF",
"AccentBlue": "#61AFEF",
"AccentPurple": "#BD93F9",
"AccentCyan": "#8BE9FD",
"AccentGreen": "#50FA7B",
"AccentYellow": "#F1FA8C",
"AccentRed": "#FF5555",
"Comment": "#6272A4",
"Gray": "#ABB2BF",
"DiffAdded": "#A6E3A1",
"DiffRemoved": "#F38BA8",
"DiffModified": "#89B4FA",
"GradientColors": ["#4796E4", "#847ACE", "#C3677F"]
}
```
**Security Note:** For your safety, Gemini CLI will only load theme files that are located within your home directory. If you attempt to load a theme from outside your home directory, a warning will be displayed and the theme will not be loaded. This is to prevent loading potentially malicious theme files from untrusted sources.
### Example Custom Theme
<img src="../assets/theme-custom.png" alt="Custom theme example" width="600" />

View File

@@ -15,10 +15,10 @@ The following is an example of a proxy script that can be used with the `GEMINI_
// Set `GEMINI_SANDBOX_PROXY_COMMAND=scripts/example-proxy.js` to run proxy alongside sandbox
// Test via `curl https://example.com` inside sandbox (in shell mode or via shell tool)
import http from 'http';
import net from 'net';
import { URL } from 'url';
import console from 'console';
import http from 'node:http';
import net from 'node:net';
import { URL } from 'node:url';
import console from 'node:console';
const PROXY_PORT = 8877;
const ALLOWED_DOMAINS = ['example.com', 'googleapis.com'];

View File

@@ -74,3 +74,27 @@ For example, if both a user and the `gcp` extension define a `deploy` command:
- `/deploy` - Executes the user's deploy command
- `/gcp.deploy` - Executes the extension's deploy command (marked with `[gcp]` tag)
## Installing Extensions
You can install extensions using the `install` command. This command allows you to install extensions from a Git repository or a local path.
### Usage
`gemini extensions install <source> | [options]`
### Options
- `source <url> positional argument`: The URL of a Git repository to install the extension from. The repository must contain a `gemini-extension.json` file in its root.
- `--path <path>`: The path to a local directory to install as an extension. The directory must contain a `gemini-extension.json` file.
# Variables
Gemini CLI extensions allow variable substitution in `gemini-extension.json`. This can be useful if e.g., you need the current directory to run an MCP server using `"cwd": "${extensionPath}${/}run.ts"`.
**Supported variables:**
| variable | description |
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `${extensionPath}` | The fully-qualified path of the extension in the user's filesystem e.g., '/Users/username/.gemini/extensions/example-extension'. This will not unwrap symlinks. |
| `${/} or ${pathSeparator}` | The path separator (differs per OS). |

View File

@@ -44,7 +44,10 @@ You can also install the extension directly from a marketplace.
- **For Visual Studio Code:** Install from the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=google.gemini-cli-vscode-ide-companion).
- **For VS Code Forks:** To support forks of VS Code, the extension is also published on the [Open VSX Registry](https://open-vsx.org/extension/google/gemini-cli-vscode-ide-companion). Follow your editor's instructions for installing extensions from this registry.
After any installation method, it's recommended to open a new terminal window to ensure the integration is activated correctly. Once installed, you can use `/ide enable` to connect.
> NOTE:
> The "Gemini CLI Companion" extension may appear towards the bottom of search results. If you don't see it immediately, try scrolling down or sorting by "Newly Published".
>
> After manually installing the extension, you must run `/ide enable` in the CLI to activate the integration.
## Usage
@@ -110,7 +113,7 @@ If you encounter issues with IDE integration, here are some common error message
### Connection Errors
- **Message:** `🔴 Disconnected: Failed to connect to IDE companion extension for [IDE Name]. Please ensure the extension is running and try restarting your terminal. To install the extension, run /ide install.`
- **Message:** `🔴 Disconnected: Failed to connect to IDE companion extension in [IDE Name]. Please ensure the extension is running. To install the extension, run /ide install.`
- **Cause:** Gemini CLI could not find the necessary environment variables (`GEMINI_CLI_IDE_WORKSPACE_PATH` or `GEMINI_CLI_IDE_SERVER_PORT`) to connect to the IDE. This usually means the IDE companion extension is not running or did not initialize correctly.
- **Solution:**
1. Make sure you have installed the **Gemini CLI Companion** extension in your IDE and that it is enabled.
@@ -122,13 +125,13 @@ If you encounter issues with IDE integration, here are some common error message
### Configuration Errors
- **Message:** `🔴 Disconnected: Directory mismatch. Gemini CLI is running in a different location than the open workspace in [IDE Name]. Please run the CLI from the same directory as your project's root folder.`
- **Cause:** The CLI's current working directory is outside the folder or workspace you have open in your IDE.
- **Message:** `🔴 Disconnected: Directory mismatch. Gemini CLI is running in a different location than the open workspace in [IDE Name]. Please run the CLI from one of the following directories: [List of directories]`
- **Cause:** The CLI's current working directory is outside the workspace you have open in your IDE.
- **Solution:** `cd` into the same directory that is open in your IDE and restart the CLI.
- **Message:** `🔴 Disconnected: To use this feature, please open a single workspace folder in [IDE Name] and try again.`
- **Cause:** You have multiple workspace folders open in your IDE, or no folder is open at all. The IDE integration requires a single root workspace folder to operate correctly.
- **Solution:** Open a single project folder in your IDE and restart the CLI.
- **Message:** `🔴 Disconnected: To use this feature, please open a workspace folder in [IDE Name] and try again.`
- **Cause:** You have no workspace open in your IDE.
- **Solution:** Open a workspace in your IDE and restart the CLI.
### General Errors
@@ -136,6 +139,6 @@ If you encounter issues with IDE integration, here are some common error message
- **Cause:** You are running Gemini CLI in a terminal or environment that is not a supported IDE.
- **Solution:** Run Gemini CLI from the integrated terminal of a supported IDE, like VS Code.
- **Message:** `No installer is available for [IDE Name]. Please install the IDE companion manually from its marketplace.`
- **Message:** `No installer is available for IDE. Please install the Gemini CLI Companion extension manually from the marketplace.`
- **Cause:** You ran `/ide install`, but the CLI does not have an automated installer for your specific IDE.
- **Solution:** Open your IDE's extension marketplace, search for "Gemini CLI Companion", and install it manually.
- **Solution:** Open your IDE's extension marketplace, search for "Gemini CLI Companion", and [install it manually](#3-manual-installation-from-a-marketplace).

View File

@@ -32,7 +32,7 @@ This documentation is organized into the following sections:
- **[Web Search Tool](./tools/web-search.md):** Documentation for the `web_search` tool.
- **[Memory Tool](./tools/memory.md):** Documentation for the `save_memory` tool.
- **[Contributing & Development Guide](../CONTRIBUTING.md):** Information for contributors and developers, including setup, building, testing, and coding conventions.
- **[NPM Workspaces and Publishing](./npm.md):** Details on how the project's packages are managed and published.
- **[NPM](./npm.md):** Details on how the project's packages are structured
- **[Troubleshooting Guide](./troubleshooting.md):** Find solutions to common problems and FAQs.
- **[Terms of Service and Privacy Notice](./tos-privacy.md):** Information on the terms of service and privacy notices applicable to your use of Qwen Code.

View File

@@ -29,6 +29,7 @@ This document lists the available keyboard shortcuts in Qwen Code.
| `Ctrl+A` / `Home` | Move the cursor to the beginning of the line. |
| `Ctrl+B` / `Left Arrow` | Move the cursor one character to the left. |
| `Ctrl+C` | Clear the input prompt |
| `Esc` (double press) | Clear the input prompt. |
| `Ctrl+D` / `Delete` | Delete the character to the right of the cursor. |
| `Ctrl+E` / `End` | Move the cursor to the end of the line. |
| `Ctrl+F` / `Right Arrow` | Move the cursor one character to the right. |

343
docs/releases.md Normal file
View File

@@ -0,0 +1,343 @@
# Gemini CLI Releases
## Release Cadence and Tags
We will follow https://semver.org/ as closely as possible but will call out when or if we have to deviate from it. Our weekly releases will be minor version increments and any bug or hotfixes btween releases will go out as patch versions on the most recent release.
### Preview
New preview releases will be published each week at UTC 2359 on Tuesdays. These releases will not have been fully vetted and may contain regressions or other outstanding issues. Please help us test and install with `preview` tag.
```bash
npm install -g @google/gemini-cli@preview
```
### Stable
- New stable releases will be published each week at UTC 2000 on Tuesdays, this will be the full promotion of last week's release + any bug fixes and validations. Use `latest` tag.
```bash
npm install -g @google/gemini-cli@latest
```
### Nightly
- New releases will be published each week at UTC 0000 each day, This will be all changes from the main branch as represted at time of release. It should be assumed there are pending validations and issues. Use `nightly` tag.
```bash
npm install -g @google/gemini-cli@nightly
```
# Release Process.
Where `x.y.z` is the next version to be released. In most all cases for the weekly release this will be an increment on `y`, aka minor version update. Major version updates `x` will need broader coordination and communication. For patches `z` see below. When possible we will do our best to adher to https://semver.org/
Our release cadence is new releases are sent to a preview channel for a week and then promoted to stable after a week. Version numbers will follow SemVer with weekly releases incrementing the minor version. Patches and bug fixes to both preview and stable releases will increment the patch version.
## Nightly Release
Each night at UTC 0000 we will auto deploy a nightly release from `main`. This will be a version of the next production release, x.y.z, with the nightly tag.
## Create Preview Release
Each Tuesday at UTC 2359 we will auto deploy a preview release of the next production release x.y.z.
- This will happen as a scheduled instance of the release action. It will be cut off of Main.
- This will create a branch `release/vx.y.z-preview.n`
- We will run evals and smoke testing against this branch and the npm package. For now this should be manual smoke testing, we don't have a dedicated matrix or specific detailed process. There is work coming soon to make this more formalized and automatic see https://github.com/google-gemini/gemini-cli/issues/3788
- Users installing `@preview` will get this release as well
## Promote Stable Release
After one week (On the following Tuesday) with all signals a go, we will manually release at 2000 UTC via the current on-call person.
- The release action will be used with the source branch as `release/vx.y.z-preview.n`
- The version will be x.y.z
- The releaser will create and merge a pr into main with the version changes.
- Smoke tests and manual validation will be run. For now this should be manual smoke testing, we don't have a dedicated matrix or specific detailed process. There is work coming soon to make this more formalized and automatic see https://github.com/google-gemini/gemini-cli/issues/3788
## Patching Releases
If a critical bug needs to be fixed before the next scheduled release, follow this process to create a patch.
### 1. Create a Hotfix Branch
First, create a new branch for your fix. The source for this branch depends on whether you are patching a stable or a preview release.
- **For a stable release patch:**
Create a branch from the Git tag of the version you need to patch. Tag names are formatted as `vx.y.z`.
```bash
# Example: Create a hotfix branch for v0.2.0
git checkout v0.2.0 -b hotfix/issue-123-fix-for-v0.2.0
```
- **For a preview release patch:**
Create a branch from the existing preview release branch, which is formatted as `release/vx.y.z-preview.n`.
```bash
# Example: Create a hotfix branch for a preview release
git checkout release/v0.2.0-preview.0 && git checkout -b hotfix/issue-456-fix-for-preview
```
### 2. Implement the Fix
In your new hotfix branch, either create a new commit with the fix or cherry-pick an existing commit from the `main` branch. Merge your changes into the source of the hotfix branch (ex. https://github.com/google-gemini/gemini-cli/pull/6850).
### 3. Perform the Release
Follow the manual release process using the "Release" GitHub Actions workflow.
- **Version**: For stable patches, increment the patch version (e.g., `v0.2.0` -> `v0.2.1`). For preview patches, increment the preview number (e.g., `v0.2.0-preview.0` -> `v0.2.0-preview.1`).
- **Ref**: Use your source branch as the reference (ex. `release/v0.2.0-preview.0`)
![How to run a release](assets/release_patch.png)
### 4. Update Versions
After the hotfix is released, merge the changes back to the appropriate branch.
- **For a stable release hotfix:**
Open a pull request to merge the release branch (e.g., `release/0.2.1`) back into `main`. This keeps the version number in `main` up to date.
- **For a preview release hotfix:**
Open a pull request to merge the new preview release branch (e.g., `release/v0.2.0-preview.1`) back into the existing preview release branch (`release/v0.2.0-preview.0`) (ex. https://github.com/google-gemini/gemini-cli/pull/6868)
## Release Schedule
<table>
<tr>
<td>Date
</td>
<td>Stable UTC 2000
</td>
<td>Preview UTC 2359
</td>
</tr>
<tr>
<td>Aug 19th, 2025
</td>
<td>N/A
</td>
<td>0.2.0-preview.0
</td>
</tr>
<tr>
<td>Aug 26th, 2025
</td>
<td>0.2.0
</td>
<td>0.3.0-preview.0
</td>
</tr>
<tr>
<td>Sep 2nd, 2025
</td>
<td>0.3.0
</td>
<td>0.4.0-preview.0
</td>
</tr>
<tr>
<td>Sep 9th, 2025
</td>
<td>0.4.0
</td>
<td>0.5.0-preview.0
</td>
</tr>
<tr>
<td>Sep 16th, 2025
</td>
<td>0.5.0
</td>
<td>0.6.0-preview.0
</td>
</tr>
<tr>
<td>Sep 23rd, 2025
</td>
<td>0.6.0
</td>
<td>0.7.0-preview.0
</td>
</tr>
</table>
## How To Release
Releases are managed through the [release.yml](https://github.com/google-gemini/gemini-cli/actions/workflows/release.yml) GitHub Actions workflow. To perform a manual release for a patch or hotfix:
1. Navigate to the **Actions** tab of the repository.
2. Select the **Release** workflow from the list.
3. Click the **Run workflow** dropdown button.
4. Fill in the required inputs:
- **Version**: The exact version to release (e.g., `v0.2.1`).
- **Ref**: The branch or commit SHA to release from (defaults to `main`).
- **Dry Run**: Leave as `true` to test the workflow without publishing, or set to `false` to perform a live release.
5. Click **Run workflow**.
### TLDR
Each release, wether automated or manual performs the following steps:
1. Checks out the latest code from the `main` branch.
1. Installs all dependencies.
1. Runs the full suite of `preflight` checks and integration tests.
1. If all tests succeed, it calculates the next version number based on the inputs.
1. It creates a branch name `release/${VERSION}`.
1. It creates a tag name `v${VERSION}`.
1. It then builds and publishes the packages to npm with the provided version number.
1. Finally, it creates a GitHub Release for the version.
### Failure Handling
If any step in the workflow fails, it will automatically create a new issue in the repository with the labels `bug` and `release-failure`. The issue will contain a link to the failed workflow run for easy debugging.
### Docker
We also run a Google cloud build called [release-docker.yml](../.gcp/release-docker.yml). Which publishes the sandbox docker to match your release. This will also be moved to GH and combined with the main release file once service account permissions are sorted out.
## Release Validation
After pushing a new release smoke testing should be performed to ensure that the packages are working as expected. This can be done by installing the packages locally and running a set of tests to ensure that they are functioning correctly.
- `npx -y @google/gemini-cli@latest --version` to validate the push worked as expected if you were not doing a rc or dev tag
- `npx -y @google/gemini-cli@<release tag> --version` to validate the tag pushed appropriately
- _This is destructive locally_ `npm uninstall @google/gemini-cli && npm uninstall -g @google/gemini-cli && npm cache clean --force && npm install @google/gemini-cli@<version>`
- Smoke testing a basic run through of exercising a few llm commands and tools is recommended to ensure that the packages are working as expected. We'll codify this more in the future.
## Local Testing and Validation: Changes to the Packaging and Publishing Process
If you need to test the release process without actually publishing to NPM or creating a public GitHub release, you can trigger the workflow manually from the GitHub UI.
1. Go to the [Actions tab](https://github.com/google-gemini/gemini-cli/actions/workflows/release.yml) of the repository.
2. Click on the "Run workflow" dropdown.
3. Leave the `dry_run` option checked (`true`).
4. Click the "Run workflow" button.
This will run the entire release process but will skip the `npm publish` and `gh release create` steps. You can inspect the workflow logs to ensure everything is working as expected.
It is crucial to test any changes to the packaging and publishing process locally before committing them. This ensures that the packages will be published correctly and that they will work as expected when installed by a user.
To validate your changes, you can perform a dry run of the publishing process. This will simulate the publishing process without actually publishing the packages to the npm registry.
```bash
npm_package_version=9.9.9 SANDBOX_IMAGE_REGISTRY="registry" SANDBOX_IMAGE_NAME="thename" npm run publish:npm --dry-run
```
This command will do the following:
1. Build all the packages.
2. Run all the prepublish scripts.
3. Create the package tarballs that would be published to npm.
4. Print a summary of the packages that would be published.
You can then inspect the generated tarballs to ensure that they contain the correct files and that the `package.json` files have been updated correctly. The tarballs will be created in the root of each package's directory (e.g., `packages/cli/google-gemini-cli-0.1.6.tgz`).
By performing a dry run, you can be confident that your changes to the packaging process are correct and that the packages will be published successfully.
## Release Deep Dive
The main goal of the release process is to take the source code from the packages/ directory, build it, and assemble a
clean, self-contained package in a temporary `bundle` directory at the root of the project. This `bundle` directory is what
actually gets published to NPM.
Here are the key stages:
Stage 1: Pre-Release Sanity Checks and Versioning
- What happens: Before any files are moved, the process ensures the project is in a good state. This involves running tests,
linting, and type-checking (npm run preflight). The version number in the root package.json and packages/cli/package.json
is updated to the new release version.
- Why: This guarantees that only high-quality, working code is released. Versioning is the first step to signify a new
release.
Stage 2: Building the Source Code
- What happens: The TypeScript source code in packages/core/src and packages/cli/src is compiled into JavaScript.
- File movement:
- packages/core/src/\*_/_.ts -> compiled to -> packages/core/dist/
- packages/cli/src/\*_/_.ts -> compiled to -> packages/cli/dist/
- Why: The TypeScript code written during development needs to be converted into plain JavaScript that can be run by
Node.js. The core package is built first as the cli package depends on it.
Stage 3: Assembling the Final Publishable Package
This is the most critical stage where files are moved and transformed into their final state for publishing. A temporary
`bundle` folder is created at the project root to house the final package contents.
1. The `package.json` is Transformed:
- What happens: The package.json from packages/cli/ is read, modified, and written into the root `bundle`/ directory.
- File movement: packages/cli/package.json -> (in-memory transformation) -> `bundle`/package.json
- Why: The final package.json must be different from the one used in development. Key changes include:
- Removing devDependencies.
- Removing workspace-specific "dependencies": { "@gemini-cli/core": "workspace:\*" } and ensuring the core code is
bundled directly into the final JavaScript file.
- Ensuring the bin, main, and files fields point to the correct locations within the final package structure.
2. The JavaScript Bundle is Created:
- What happens: The built JavaScript from both packages/core/dist and packages/cli/dist are bundled into a single,
executable JavaScript file.
- File movement: packages/cli/dist/index.js + packages/core/dist/index.js -> (bundled by esbuild) -> `bundle`/gemini.js (or a
similar name).
- Why: This creates a single, optimized file that contains all the necessary application code. It simplifies the package
by removing the need for the core package to be a separate dependency on NPM, as its code is now included directly.
3. Static and Supporting Files are Copied:
- What happens: Essential files that are not part of the source code but are required for the package to work correctly
or be well-described are copied into the `bundle` directory.
- File movement:
- README.md -> `bundle`/README.md
- LICENSE -> `bundle`/LICENSE
- packages/cli/src/utils/\*.sb (sandbox profiles) -> `bundle`/
- Why:
- The README.md and LICENSE are standard files that should be included in any NPM package.
- The sandbox profiles (.sb files) are critical runtime assets required for the CLI's sandboxing feature to
function. They must be located next to the final executable.
Stage 4: Publishing to NPM
- What happens: The npm publish command is run from inside the root `bundle` directory.
- Why: By running npm publish from within the `bundle` directory, only the files we carefully assembled in Stage 3 are uploaded
to the NPM registry. This prevents any source code, test files, or development configurations from being accidentally
published, resulting in a clean and minimal package for users.
Summary of File Flow
```mermaid
graph TD
subgraph "Source Files"
A["packages/core/src/*.ts<br/>packages/cli/src/*.ts"]
B["packages/cli/package.json"]
C["README.md<br/>LICENSE<br/>packages/cli/src/utils/*.sb"]
end
subgraph "Process"
D(Build)
E(Transform)
F(Assemble)
G(Publish)
end
subgraph "Artifacts"
H["Bundled JS"]
I["Final package.json"]
J["bundle/"]
end
subgraph "Destination"
K["NPM Registry"]
end
A --> D --> H
B --> E --> I
C --> F
H --> F
I --> F
F --> J
J --> G --> K
```
This process ensures that the final published artifact is a purpose-built, clean, and efficient representation of the
project, rather than a direct copy of the development workspace.

View File

@@ -177,9 +177,10 @@ Logs are timestamped records of specific events. The following events are logged
- `qwen-code.user_prompt`: This event occurs when a user submits a prompt.
- **Attributes**:
- `prompt_length`
- `prompt` (this attribute is excluded if `log_prompts_enabled` is configured to be `false`)
- `auth_type`
- `prompt_length` (int)
- `prompt_id` (string)
- `prompt` (string, this attribute is excluded if `log_prompts_enabled` is configured to be `false`)
- `auth_type` (string)
- `qwen-code.tool_call`: This event occurs for each function call.
- **Attributes**:
@@ -272,6 +273,7 @@ Metrics are numerical measurements of behavior over time. The following metrics
- `ai_removed_lines` (Int, if applicable): Number of lines removed/changed by AI.
- `user_added_lines` (Int, if applicable): Number of lines added/changed by user in AI proposed changes.
- `user_removed_lines` (Int, if applicable): Number of lines removed/changed by user in AI proposed changes.
- `programming_language` (string, if applicable): The programming language of the file.
- `gemini_cli.chat_compression` (Counter, Int): Counts chat compression operations
- **Attributes**:

View File

@@ -29,6 +29,7 @@ Use `read_many_files` to read content from multiple files specified by paths or
`read_many_files` searches for files matching the provided `paths` and `include` patterns, while respecting `exclude` patterns and default excludes (if enabled).
- For text files: it reads the content of each matched file (attempting to skip binary files not explicitly requested as image/PDF) and concatenates it into a single string, with a separator `--- {filePath} ---` between the content of each file. Uses UTF-8 encoding by default.
- The tool inserts a `--- End of content ---` after the last file.
- For image and PDF files: if explicitly requested by name or extension (e.g., `paths: ["logo.png"]` or `include: ["*.pdf"]`), the tool reads the file and returns its content as a base64 encoded string.
- The tool attempts to detect and skip other binary files (those not matching common image/PDF types or not explicitly requested) by checking for null bytes in their initial content.

View File

@@ -83,6 +83,18 @@ This guide provides solutions to common issues and debugging tips, including top
- If running in a container, verify `host.docker.internal` resolves. Otherwise, map the host appropriately.
- Reinstall the companion with `/ide install` and use “Qwen Code: Run” in the Command Palette to verify it launches.
## Exit Codes
The Gemini CLI uses specific exit codes to indicate the reason for termination. This is especially useful for scripting and automation.
| Exit Code | Error Type | Description |
| --------- | -------------------------- | --------------------------------------------------------------------------------------------------- |
| 41 | `FatalAuthenticationError` | An error occurred during the authentication process. |
| 42 | `FatalInputError` | Invalid or missing input was provided to the CLI. (non-interactive mode only) |
| 44 | `FatalSandboxError` | An error occurred with the sandboxing environment (e.g., Docker, Podman, or Seatbelt). |
| 52 | `FatalConfigError` | A configuration file (`settings.json`) is invalid or contains errors. |
| 53 | `FatalTurnLimitedError` | The maximum number of conversational turns for the session was reached. (non-interactive mode only) |
## Debugging Tips
- **CLI debugging:**

View File

@@ -5,9 +5,9 @@
*/
import esbuild from 'esbuild';
import path from 'path';
import { fileURLToPath } from 'url';
import { createRequire } from 'module';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { createRequire } from 'node:module';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);

View File

@@ -10,9 +10,10 @@ import reactPlugin from 'eslint-plugin-react';
import reactHooks from 'eslint-plugin-react-hooks';
import prettierConfig from 'eslint-config-prettier';
import importPlugin from 'eslint-plugin-import';
import vitest from '@vitest/eslint-plugin';
import globals from 'globals';
import licenseHeader from 'eslint-plugin-license-header';
import path from 'node:path'; // Use node: prefix for built-ins
import path from 'node:path';
import url from 'node:url';
// --- ESM way to get __dirname ---
@@ -29,10 +30,7 @@ export default tseslint.config(
ignores: [
'node_modules/*',
'eslint.config.js',
'packages/cli/dist/**',
'packages/core/dist/**',
'packages/server/dist/**',
'packages/vscode-ide-companion/dist/**',
'packages/**/dist/**',
'bundle/**',
'package/bundle/**',
'.integration-tests/**',
@@ -105,6 +103,10 @@ export default tseslint.config(
'error',
{ ignoreParameters: true, ignoreProperties: true },
],
'@typescript-eslint/consistent-type-imports': [
'error',
{ disallowTypeAnnotations: false },
],
'@typescript-eslint/no-namespace': ['error', { allowDeclarations: true }],
'@typescript-eslint/no-unused-vars': [
'error',
@@ -157,6 +159,17 @@ export default tseslint.config(
'default-case': 'error',
},
},
{
files: ['packages/*/src/**/*.test.{ts,tsx}'],
plugins: {
vitest,
},
rules: {
...vitest.configs.recommended.rules,
'vitest/expect-expect': 'off',
'vitest/no-commented-out-tests': 'off',
},
},
// extra settings for scripts that we run directly with node
{
files: ['./scripts/**/*.js', 'esbuild.config.js'],

View File

@@ -9,16 +9,45 @@ if (process.env['NO_COLOR'] !== undefined) {
delete process.env['NO_COLOR'];
}
import { mkdir, readdir, rm } from 'fs/promises';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import {
mkdir,
readdir,
rm,
readFile,
writeFile,
unlink,
} from 'node:fs/promises';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import * as os from 'node:os';
import {
GEMINI_CONFIG_DIR,
DEFAULT_CONTEXT_FILENAME,
} from '../packages/core/src/tools/memoryTool.js';
const __dirname = dirname(fileURLToPath(import.meta.url));
const rootDir = join(__dirname, '..');
const integrationTestsDir = join(rootDir, '.integration-tests');
let runDir = ''; // Make runDir accessible in teardown
const memoryFilePath = join(
os.homedir(),
GEMINI_CONFIG_DIR,
DEFAULT_CONTEXT_FILENAME,
);
let originalMemoryContent: string | null = null;
export async function setup() {
try {
originalMemoryContent = await readFile(memoryFilePath, 'utf-8');
} catch (e) {
if ((e as NodeJS.ErrnoException).code !== 'ENOENT') {
throw e;
}
// File doesn't exist, which is fine.
}
runDir = join(integrationTestsDir, `${Date.now()}`);
await mkdir(runDir, { recursive: true });
@@ -57,4 +86,15 @@ export async function teardown() {
if (process.env['KEEP_OUTPUT'] !== 'true' && runDir) {
await rm(runDir, { recursive: true, force: true });
}
if (originalMemoryContent !== null) {
await mkdir(dirname(memoryFilePath), { recursive: true });
await writeFile(memoryFilePath, originalMemoryContent, 'utf-8');
} else {
try {
await unlink(memoryFilePath);
} catch {
// File might not exist if the test failed before creating it.
}
}
}

View File

@@ -10,17 +10,10 @@ import * as os from 'node:os';
import * as path from 'node:path';
import * as net from 'node:net';
import * as child_process from 'node:child_process';
import type { ChildProcess } from 'node:child_process';
import { IdeClient } from '../packages/core/src/ide/ide-client.js';
import { TestMcpServer } from './test-mcp-server.js';
// Helper function to reset the IdeClient singleton instance for testing
const resetIdeClientInstance = () => {
// Access the private instance property using type assertion
(IdeClient as unknown as { instance?: IdeClient }).instance = undefined;
};
describe.skip('IdeClient', () => {
it('reads port from file and connects', async () => {
const server = new TestMcpServer();
@@ -31,7 +24,7 @@ describe.skip('IdeClient', () => {
process.env['QWEN_CODE_IDE_WORKSPACE_PATH'] = process.cwd();
process.env['TERM_PROGRAM'] = 'vscode';
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({
@@ -74,7 +67,8 @@ describe('IdeClient fallback connection logic', () => {
process.env['TERM_PROGRAM'] = 'vscode';
process.env['QWEN_CODE_IDE_WORKSPACE_PATH'] = process.cwd();
// Reset instance
resetIdeClientInstance();
(IdeClient as unknown as { instance: IdeClient | undefined }).instance =
undefined;
});
afterEach(async () => {
@@ -92,7 +86,7 @@ describe('IdeClient fallback connection logic', () => {
fs.unlinkSync(portFile);
}
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({
@@ -106,7 +100,7 @@ describe('IdeClient fallback connection logic', () => {
// Write port file with a port that is not listening
fs.writeFileSync(portFile, JSON.stringify({ port: filePort }));
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({
@@ -117,7 +111,7 @@ describe('IdeClient fallback connection logic', () => {
});
describe.skip('getIdeProcessId', () => {
let child: ChildProcess;
let child: child_process.ChildProcess;
afterEach(() => {
if (child) {
@@ -145,11 +139,11 @@ describe.skip('getIdeProcessId', () => {
);
let out = '';
child.stdout?.on('data', (data: Buffer) => {
child.stdout?.on('data', (data) => {
out += data.toString();
});
child.on('close', (code: number | null) => {
child.on('close', (code) => {
if (code === 0) {
resolve(out.trim());
} else {
@@ -180,11 +174,12 @@ describe('IdeClient with proxy', () => {
vi.stubEnv('QWEN_CODE_IDE_WORKSPACE_PATH', process.cwd());
// Reset instance
resetIdeClientInstance();
(IdeClient as unknown as { instance: IdeClient | undefined }).instance =
undefined;
});
afterEach(async () => {
IdeClient.getInstance().disconnect();
(await IdeClient.getInstance()).disconnect();
await mcpServer.stop();
proxyServer.close();
vi.unstubAllEnvs();
@@ -195,7 +190,7 @@ describe('IdeClient with proxy', () => {
vi.stubEnv('HTTPS_PROXY', `http://localhost:${proxyServerPort}`);
vi.stubEnv('NO_PROXY', 'example.com,127.0.0.1,::1');
const ideClient = IdeClient.getInstance();
const ideClient = await IdeClient.getInstance();
await ideClient.connect();
expect(ideClient.getConnectionStatus()).toEqual({

View File

@@ -6,8 +6,8 @@
import { describe, it, expect } from 'vitest';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
import { existsSync } from 'fs';
import { join } from 'path';
import { existsSync } from 'node:fs';
import { join } from 'node:path';
describe('list_directory', () => {
it('should be able to list a directory', async () => {

View File

@@ -11,8 +11,8 @@
import { describe, it, beforeAll, expect } from 'vitest';
import { TestRig } from './test-helper.js';
import { join } from 'path';
import { writeFileSync } from 'fs';
import { join } from 'node:path';
import { writeFileSync } from 'node:fs';
// Create a minimal MCP server that doesn't require external dependencies
// This implements the MCP protocol directly using Node.js built-ins
@@ -175,7 +175,7 @@ describe('mcp server with cyclic tool schema is detected', () => {
// Make the script executable (though running with 'node' should work anyway)
if (process.platform !== 'win32') {
const { chmodSync } = await import('fs');
const { chmodSync } = await import('node:fs');
chmodSync(testServerPath, 0o755);
}
});

View File

@@ -8,7 +8,7 @@ import { describe, it, expect } from 'vitest';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe('read_many_files', () => {
it('should be able to read multiple files', async () => {
it.skip('should be able to read multiple files', async () => {
const rig = new TestRig();
await rig.setup('should be able to read multiple files');
rig.createFile('file1.txt', 'file 1 content');

View File

@@ -6,8 +6,8 @@
import { describe, it, expect, beforeAll } from 'vitest';
import { ShellExecutionService } from '../packages/core/src/services/shellExecutionService.js';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { vi } from 'vitest';
describe('ShellExecutionService programmatic integration tests', () => {
@@ -123,4 +123,34 @@ describe('ShellExecutionService programmatic integration tests', () => {
const exitedCleanly = result.exitCode === 0 && result.signal === null;
expect(exitedCleanly, 'Process should not have exited cleanly').toBe(false);
});
it('should propagate environment variables to the child process', async () => {
const varName = 'QWEN_CODE_TEST_VAR';
const varValue = `test-value`;
process.env[varName] = varValue;
try {
const command =
process.platform === 'win32' ? `echo %${varName}%` : `echo $${varName}`;
const onOutputEvent = vi.fn();
const abortController = new AbortController();
const handle = await ShellExecutionService.execute(
command,
testDir,
onOutputEvent,
abortController.signal,
false,
);
const result = await handle.result;
expect(result.error).toBeNull();
expect(result.exitCode).toBe(0);
expect(result.output).toContain(varValue);
} finally {
// Clean up the env var to prevent side-effects on other tests.
delete process.env[varName];
}
});
});

View File

@@ -12,8 +12,8 @@
import { describe, it, beforeAll, expect } from 'vitest';
import { TestRig, validateModelOutput } from './test-helper.js';
import { join } from 'path';
import { writeFileSync } from 'fs';
import { join } from 'node:path';
import { writeFileSync } from 'node:fs';
// Create a minimal MCP server that doesn't require external dependencies
// This implements the MCP protocol directly using Node.js built-ins
@@ -186,7 +186,7 @@ describe('simple-mcp-server', () => {
// Make the script executable (though running with 'node' should work anyway)
if (process.platform !== 'win32') {
const { chmodSync } = await import('fs');
const { chmodSync } = await import('node:fs');
chmodSync(testServerPath, 0o755);
}
});

View File

@@ -0,0 +1,97 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
describe.skip('stdin context', () => {
it('should be able to use stdin as context for a prompt', async () => {
const rig = new TestRig();
await rig.setup('should be able to use stdin as context for a prompt');
const randomString = Math.random().toString(36).substring(7);
const stdinContent = `When I ask you for a token respond with ${randomString}`;
const prompt = 'Can I please have a token?';
const result = await rig.run({ prompt, stdin: stdinContent });
await rig.waitForTelemetryEvent('api_request');
const lastRequest = rig.readLastApiRequest();
expect(lastRequest).not.toBeNull();
const historyString = lastRequest.attributes.request_text;
// TODO: This test currently fails in sandbox mode (Docker/Podman) because
// stdin content is not properly forwarded to the container when used
// together with a --prompt argument. The test passes in non-sandbox mode.
expect(historyString).toContain(randomString);
expect(historyString).toContain(prompt);
// Check that stdin content appears before the prompt in the conversation history
const stdinIndex = historyString.indexOf(randomString);
const promptIndex = historyString.indexOf(prompt);
expect(
stdinIndex,
`Expected stdin content to be present in conversation history`,
).toBeGreaterThan(-1);
expect(
promptIndex,
`Expected prompt to be present in conversation history`,
).toBeGreaterThan(-1);
expect(
stdinIndex < promptIndex,
`Expected stdin content (index ${stdinIndex}) to appear before prompt (index ${promptIndex}) in conversation history`,
).toBeTruthy();
// Add debugging information
if (!result.toLowerCase().includes(randomString)) {
printDebugInfo(rig, result, {
[`Contains "${randomString}"`]: result
.toLowerCase()
.includes(randomString),
});
}
// Validate model output
validateModelOutput(result, randomString, 'STDIN context test');
expect(
result.toLowerCase().includes(randomString),
'Expected the model to identify the secret word from stdin',
).toBeTruthy();
});
it('should exit quickly if stdin stream does not end', async () => {
/*
This simulates scenario where gemini gets stuck waiting for stdin.
This happens in situations where process.stdin.isTTY is false
even though gemini is intended to run interactively.
*/
const rig = new TestRig();
await rig.setup('should exit quickly if stdin stream does not end');
try {
await rig.run({ stdinDoesNotEnd: true });
throw new Error('Expected rig.run to throw an error');
} catch (error: unknown) {
expect(error).toBeInstanceOf(Error);
const err = error as Error;
expect(err.message).toContain('Process exited with code 1');
expect(err.message).toContain('No input provided via stdin.');
console.log('Error message:', err.message);
}
const lastRequest = rig.readLastApiRequest();
expect(lastRequest).toBeNull();
// If this test times out, runs indefinitely, it's a regression.
}, 3000);
});

View File

@@ -4,13 +4,14 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { execSync, spawn } from 'child_process';
import { execSync, spawn } from 'node:child_process';
import { parse } from 'shell-quote';
import { mkdirSync, writeFileSync, readFileSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { env } from 'process';
import fs from 'fs';
import { mkdirSync, writeFileSync, readFileSync } from 'node:fs';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import { env } from 'node:process';
import fs from 'node:fs';
import { EOL } from 'node:os';
const __dirname = dirname(fileURLToPath(import.meta.url));
@@ -93,7 +94,9 @@ export function validateModelOutput(
if (missingContent.length > 0) {
console.warn(
`Warning: LLM did not include expected content in response: ${missingContent.join(', ')}.`,
`Warning: LLM did not include expected content in response: ${missingContent.join(
', ',
)}.`,
'This is not ideal but not a test failure.',
);
console.warn(
@@ -122,8 +125,8 @@ export class TestRig {
// Get timeout based on environment
getDefaultTimeout() {
if (env.CI) return 60000; // 1 minute in CI
if (env.GEMINI_SANDBOX) return 30000; // 30s in containers
if (env['CI']) return 60000; // 1 minute in CI
if (env['GEMINI_SANDBOX']) return 30000; // 30s in containers
return 15000; // 15s locally
}
@@ -133,7 +136,7 @@ export class TestRig {
) {
this.testName = testName;
const sanitizedName = sanitizeTestName(testName);
this.testDir = join(env.INTEGRATION_TEST_FILE_DIR!, sanitizedName);
this.testDir = join(env['INTEGRATION_TEST_FILE_DIR']!, sanitizedName);
mkdirSync(this.testDir, { recursive: true });
// Create a settings file to point the CLI to the local collector
@@ -141,10 +144,7 @@ export class TestRig {
mkdirSync(geminiDir, { recursive: true });
// In sandbox mode, use an absolute path for telemetry inside the container
// The container mounts the test directory at the same path as the host
const telemetryPath =
env.GEMINI_SANDBOX && env.GEMINI_SANDBOX !== 'false'
? join(this.testDir, 'telemetry.log') // Absolute path in test directory
: env.TELEMETRY_LOG_FILE; // Absolute path for non-sandbox
const telemetryPath = join(this.testDir, 'telemetry.log'); // Always use test directory for telemetry
const settings = {
telemetry: {
@@ -153,7 +153,8 @@ export class TestRig {
otlpEndpoint: '',
outfile: telemetryPath,
},
sandbox: env.GEMINI_SANDBOX !== 'false' ? env.GEMINI_SANDBOX : false,
sandbox:
env['GEMINI_SANDBOX'] !== 'false' ? env['GEMINI_SANDBOX'] : false,
...options.settings, // Allow tests to override/add settings
};
writeFileSync(
@@ -178,7 +179,9 @@ export class TestRig {
}
run(
promptOrOptions: string | { prompt?: string; stdin?: string },
promptOrOptions:
| string
| { prompt?: string; stdin?: string; stdinDoesNotEnd?: boolean },
...args: string[]
): Promise<string> {
let command = `node ${this.bundlePath} --yolo`;
@@ -222,18 +225,25 @@ export class TestRig {
if (execOptions.input) {
child.stdin!.write(execOptions.input);
}
if (
typeof promptOrOptions === 'object' &&
!promptOrOptions.stdinDoesNotEnd
) {
child.stdin!.end();
}
child.stdin!.end();
child.stdout!.on('data', (data: Buffer) => {
stdout += data;
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
if (env['KEEP_OUTPUT'] === 'true' || env['VERBOSE'] === 'true') {
process.stdout.write(data);
}
});
child.stderr!.on('data', (data: Buffer) => {
stderr += data;
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
if (env['KEEP_OUTPUT'] === 'true' || env['VERBOSE'] === 'true') {
process.stderr.write(data);
}
});
@@ -247,10 +257,10 @@ export class TestRig {
// Filter out telemetry output when running with Podman
// Podman seems to output telemetry to stdout even when writing to file
let result = stdout;
if (env.GEMINI_SANDBOX === 'podman') {
if (env['GEMINI_SANDBOX'] === 'podman') {
// Remove telemetry JSON objects from output
// They are multi-line JSON objects that start with { and contain telemetry fields
const lines = result.split('\n');
const lines = result.split(EOL);
const filteredLines = [];
let inTelemetryObject = false;
let braceDepth = 0;
@@ -299,7 +309,7 @@ export class TestRig {
readFile(fileName: string) {
const filePath = join(this.testDir!, fileName);
const content = readFileSync(filePath, 'utf-8');
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
if (env['KEEP_OUTPUT'] === 'true' || env['VERBOSE'] === 'true') {
console.log(`--- FILE: ${filePath} ---`);
console.log(content);
console.log(`--- END FILE: ${filePath} ---`);
@@ -309,12 +319,12 @@ export class TestRig {
async cleanup() {
// Clean up test directory
if (this.testDir && !env.KEEP_OUTPUT) {
if (this.testDir && !env['KEEP_OUTPUT']) {
try {
execSync(`rm -rf ${this.testDir}`);
} catch (error) {
// Ignore cleanup errors
if (env.VERBOSE === 'true') {
if (env['VERBOSE'] === 'true') {
console.warn('Cleanup warning:', (error as Error).message);
}
}
@@ -322,11 +332,8 @@ export class TestRig {
}
async waitForTelemetryReady() {
// In sandbox mode, telemetry is written to a relative path in the test directory
const logFilePath =
env.GEMINI_SANDBOX && env.GEMINI_SANDBOX !== 'false'
? join(this.testDir!, 'telemetry.log')
: env.TELEMETRY_LOG_FILE;
// Telemetry is always written to the test directory
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath) return;
@@ -347,6 +354,52 @@ export class TestRig {
);
}
async waitForTelemetryEvent(eventName: string, timeout?: number) {
if (!timeout) {
timeout = this.getDefaultTimeout();
}
await this.waitForTelemetryReady();
return this.poll(
() => {
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath || !fs.existsSync(logFilePath)) {
return false;
}
const content = readFileSync(logFilePath, 'utf-8');
const jsonObjects = content
.split(/}\n{/)
.map((obj, index, array) => {
// Add back the braces we removed during split
if (index > 0) obj = '{' + obj;
if (index < array.length - 1) obj = obj + '}';
return obj.trim();
})
.filter((obj) => obj);
for (const jsonStr of jsonObjects) {
try {
const logData = JSON.parse(jsonStr);
if (
logData.attributes &&
logData.attributes['event.name'] === `gemini_cli.${eventName}`
) {
return true;
}
} catch {
// ignore
}
}
return false;
},
timeout,
100,
);
}
async waitForToolCall(toolName: string, timeout?: number) {
// Use environment-specific timeout
if (!timeout) {
@@ -397,7 +450,7 @@ export class TestRig {
while (Date.now() - startTime < timeout) {
attempts++;
const result = predicate();
if (env.VERBOSE === 'true' && attempts % 5 === 0) {
if (env['VERBOSE'] === 'true' && attempts % 5 === 0) {
console.log(
`Poll attempt ${attempts}: ${result ? 'success' : 'waiting...'}`,
);
@@ -407,7 +460,7 @@ export class TestRig {
}
await new Promise((resolve) => setTimeout(resolve, interval));
}
if (env.VERBOSE === 'true') {
if (env['VERBOSE'] === 'true') {
console.log(`Poll timed out after ${attempts} attempts`);
}
return false;
@@ -468,7 +521,7 @@ export class TestRig {
// If no matches found with the simple pattern, try the JSON parsing approach
// in case the format changes
if (logs.length === 0) {
const lines = stdout.split('\n');
const lines = stdout.split(EOL);
let currentObject = '';
let inObject = false;
let braceDepth = 0;
@@ -540,7 +593,7 @@ export class TestRig {
readToolLogs() {
// For Podman, first check if telemetry file exists and has content
// If not, fall back to parsing from stdout
if (env.GEMINI_SANDBOX === 'podman') {
if (env['GEMINI_SANDBOX'] === 'podman') {
// Try reading from file first
const logFilePath = join(this.testDir!, 'telemetry.log');
@@ -566,11 +619,8 @@ export class TestRig {
}
}
// In sandbox mode, telemetry is written to a relative path in the test directory
const logFilePath =
env.GEMINI_SANDBOX && env.GEMINI_SANDBOX !== 'false'
? join(this.testDir!, 'telemetry.log')
: env.TELEMETRY_LOG_FILE;
// Telemetry is always written to the test directory
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath) {
console.warn(`TELEMETRY_LOG_FILE environment variable not set`);
@@ -587,7 +637,7 @@ export class TestRig {
// Split the content into individual JSON objects
// They are separated by "}\n{"
const jsonObjects = content
.split(/}\s*\n\s*{/)
.split(/}\n{/)
.map((obj, index, array) => {
// Add back the braces we removed during split
if (index > 0) obj = '{' + obj;
@@ -625,15 +675,48 @@ export class TestRig {
}
} catch (e) {
// Skip objects that aren't valid JSON
if (env.VERBOSE === 'true') {
console.error(
'Failed to parse telemetry object:',
(e as Error).message,
);
if (env['VERBOSE'] === 'true') {
console.error('Failed to parse telemetry object:', e);
}
}
}
return logs;
}
readLastApiRequest(): Record<string, unknown> | null {
// Telemetry is always written to the test directory
const logFilePath = join(this.testDir!, 'telemetry.log');
if (!logFilePath || !fs.existsSync(logFilePath)) {
return null;
}
const content = readFileSync(logFilePath, 'utf-8');
const jsonObjects = content
.split(/}\n{/)
.map((obj, index, array) => {
if (index > 0) obj = '{' + obj;
if (index < array.length - 1) obj = obj + '}';
return obj.trim();
})
.filter((obj) => obj);
let lastApiRequest = null;
for (const jsonStr of jsonObjects) {
try {
const logData = JSON.parse(jsonStr);
if (
logData.attributes &&
logData.attributes['event.name'] === 'gemini_cli.api_request'
) {
lastApiRequest = logData;
}
} catch {
// ignore
}
}
return lastApiRequest;
}
}

View File

@@ -4,5 +4,6 @@
"noEmit": true,
"allowJs": true
},
"include": ["**/*.ts"]
"include": ["**/*.ts"],
"references": [{ "path": "../packages/core" }]
}

4785
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,6 +17,7 @@
},
"scripts": {
"start": "node scripts/start.js",
"start:a2a-server": "CODER_AGENT_PORT=41242 npm run start --workspace @google/gemini-cli-a2a-server",
"debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js",
"auth:npm": "npx google-artifactregistry-auth",
"auth:docker": "gcloud auth configure-docker us-west1-docker.pkg.dev",
@@ -62,7 +63,6 @@
],
"devDependencies": {
"@types/marked": "^5.0.2",
"@types/micromatch": "^4.0.9",
"@types/mime-types": "^3.0.1",
"@types/minimatch": "^5.1.2",
"@types/mock-fs": "^4.13.4",
@@ -70,6 +70,7 @@
"@types/shell-quote": "^1.7.5",
"@types/uuid": "^10.0.0",
"@vitest/coverage-v8": "^3.1.1",
"@vitest/eslint-plugin": "^1.3.4",
"concurrently": "^9.2.0",
"cross-env": "^7.0.3",
"esbuild": "^0.25.0",
@@ -95,7 +96,8 @@
"yargs": "^17.7.2"
},
"dependencies": {
"node-fetch": "^3.3.2",
"@lvce-editor/ripgrep": "^1.6.0",
"simple-git": "^3.28.0",
"strip-ansi": "^7.1.0"
},
"optionalDependencies": {

View File

@@ -0,0 +1,5 @@
# Gemini CLI A2A Server
## All code in this package is experimental and under active development
This package contains the A2A server implementation for the Gemini CLI.

View File

@@ -0,0 +1,7 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export * from './src/index.js';

View File

@@ -0,0 +1,48 @@
{
"name": "@google/gemini-cli-a2a-server",
"version": "0.3.0",
"private": true,
"description": "Gemini CLI A2A Server",
"repository": {
"type": "git",
"url": "git+https://github.com/google-gemini/gemini-cli.git",
"directory": "packages/a2a-server"
},
"type": "module",
"main": "dist/server.js",
"scripts": {
"start": "node dist/src/server.js",
"build": "node ../../scripts/build_package.js",
"lint": "eslint . --ext .ts,.tsx",
"format": "prettier --write .",
"test": "vitest run",
"test:ci": "vitest run --coverage",
"typecheck": "tsc --noEmit"
},
"files": [
"dist"
],
"dependencies": {
"@a2a-js/sdk": "^0.3.2",
"@google-cloud/storage": "^7.16.0",
"@qwen-code/qwen-code-core": "file:../core",
"express": "^5.1.0",
"fs-extra": "^11.3.0",
"tar": "^7.4.3",
"uuid": "^11.1.0",
"winston": "^3.17.0"
},
"devDependencies": {
"@types/express": "^5.0.3",
"@types/fs-extra": "^11.0.4",
"@types/supertest": "^6.0.3",
"@types/tar": "^6.1.13",
"dotenv": "^16.4.5",
"supertest": "^7.1.4",
"typescript": "^5.3.3",
"vitest": "^3.1.1"
},
"engines": {
"node": ">=20"
}
}

View File

@@ -0,0 +1,648 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config } from '@qwen-code/qwen-code-core';
import {
GeminiEventType,
ApprovalMode,
type ToolCallConfirmationDetails,
} from '@qwen-code/qwen-code-core';
import type {
TaskStatusUpdateEvent,
SendStreamingMessageSuccessResponse,
} from '@a2a-js/sdk';
import type express from 'express';
import type { Server } from 'node:http';
import request from 'supertest';
import {
afterAll,
afterEach,
beforeEach,
beforeAll,
describe,
expect,
it,
vi,
} from 'vitest';
import { createApp } from './agent.js';
import {
assertUniqueFinalEventIsLast,
assertTaskCreationAndWorkingStatus,
createStreamMessageRequest,
MockTool,
} from './testing_utils.js';
const mockToolConfirmationFn = async () =>
({}) as unknown as ToolCallConfirmationDetails;
const streamToSSEEvents = (
stream: string,
): SendStreamingMessageSuccessResponse[] =>
stream
.split('\n\n')
.filter(Boolean) // Remove empty strings from trailing newlines
.map((chunk) => {
const dataLine = chunk
.split('\n')
.find((line) => line.startsWith('data: '));
if (!dataLine) {
throw new Error(`Invalid SSE chunk found: "${chunk}"`);
}
return JSON.parse(dataLine.substring(6));
});
// Mock the logger to avoid polluting test output
// Comment out to debug tests
vi.mock('./logger.js', () => ({
logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn() },
}));
let config: Config;
const getToolRegistrySpy = vi.fn().mockReturnValue(ApprovalMode.DEFAULT);
const getApprovalModeSpy = vi.fn();
vi.mock('./config.js', async () => {
const actual = await vi.importActual('./config.js');
return {
...actual,
loadConfig: vi.fn().mockImplementation(async () => {
config = {
getToolRegistry: getToolRegistrySpy,
getApprovalMode: getApprovalModeSpy,
getIdeMode: vi.fn().mockReturnValue(false),
getAllowedTools: vi.fn().mockReturnValue([]),
getIdeClient: vi.fn(),
getWorkspaceContext: vi.fn().mockReturnValue({
isPathWithinWorkspace: () => true,
}),
getTargetDir: () => '/test',
getGeminiClient: vi.fn(),
getDebugMode: vi.fn().mockReturnValue(false),
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ model: 'gemini-pro' }),
getModel: vi.fn().mockReturnValue('gemini-pro'),
getUsageStatisticsEnabled: vi.fn().mockReturnValue(false),
setFlashFallbackHandler: vi.fn(),
initialize: vi.fn().mockResolvedValue(undefined),
} as unknown as Config;
return config;
}),
};
});
// Mock the GeminiClient to avoid actual API calls
const sendMessageStreamSpy = vi.fn();
vi.mock('@qwen-code/qwen-code-core', async () => {
const actual = await vi.importActual('@qwen-code/qwen-code-core');
return {
...actual,
GeminiClient: vi.fn().mockImplementation(() => ({
sendMessageStream: sendMessageStreamSpy,
getUserTier: vi.fn().mockReturnValue('free'),
initialize: vi.fn(),
})),
};
});
describe('E2E Tests', () => {
let app: express.Express;
let server: Server;
beforeAll(async () => {
app = await createApp();
server = app.listen(0); // Listen on a random available port
});
beforeEach(() => {
getApprovalModeSpy.mockReturnValue(ApprovalMode.DEFAULT);
});
afterAll(
() =>
new Promise<void>((resolve) => {
server.close(() => {
resolve();
});
}),
);
afterEach(() => {
vi.clearAllMocks();
});
it('should create a new task and stream status updates (text-content) via POST /', async () => {
sendMessageStreamSpy.mockImplementation(async function* () {
yield* [{ type: 'content', value: 'Hello how are you?' }];
});
const agent = request.agent(app);
const res = await agent
.post('/')
.send(createStreamMessageRequest('hello', 'a2a-test-message'))
.set('Content-Type', 'application/json')
.expect(200);
const events = streamToSSEEvents(res.text);
assertTaskCreationAndWorkingStatus(events);
// Status update: text-content
const textContentEvent = events[2].result as TaskStatusUpdateEvent;
expect(textContentEvent.kind).toBe('status-update');
expect(textContentEvent.status.state).toBe('working');
expect(textContentEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'text-content',
});
expect(textContentEvent.status.message?.parts).toMatchObject([
{ kind: 'text', text: 'Hello how are you?' },
]);
// Status update: input-required (final)
const finalEvent = events[3].result as TaskStatusUpdateEvent;
expect(finalEvent.kind).toBe('status-update');
expect(finalEvent.status?.state).toBe('input-required');
expect(finalEvent.final).toBe(true);
assertUniqueFinalEventIsLast(events);
expect(events.length).toBe(4);
});
it('should create a new task, schedule a tool call, and wait for approval', async () => {
// First call yields the tool request
sendMessageStreamSpy.mockImplementationOnce(async function* () {
yield* [
{
type: GeminiEventType.ToolCallRequest,
value: {
callId: 'test-call-id',
name: 'test-tool',
args: {},
},
},
];
});
// Subsequent calls yield nothing
sendMessageStreamSpy.mockImplementation(async function* () {
yield* [];
});
const mockTool = new MockTool(
'test-tool',
'Test Tool',
true,
false,
mockToolConfirmationFn,
);
getToolRegistrySpy.mockReturnValue({
getAllTools: vi.fn().mockReturnValue([mockTool]),
getToolsByServer: vi.fn().mockReturnValue([]),
getTool: vi.fn().mockReturnValue(mockTool),
});
const agent = request.agent(app);
const res = await agent
.post('/')
.send(createStreamMessageRequest('run a tool', 'a2a-tool-test-message'))
.set('Content-Type', 'application/json')
.expect(200);
const events = streamToSSEEvents(res.text);
assertTaskCreationAndWorkingStatus(events);
// Status update: working
const workingEvent2 = events[2].result as TaskStatusUpdateEvent;
expect(workingEvent2.kind).toBe('status-update');
expect(workingEvent2.status.state).toBe('working');
expect(workingEvent2.metadata?.['coderAgent']).toMatchObject({
kind: 'state-change',
});
// Status update: tool-call-update
const toolCallUpdateEvent = events[3].result as TaskStatusUpdateEvent;
expect(toolCallUpdateEvent.kind).toBe('status-update');
expect(toolCallUpdateEvent.status.state).toBe('working');
expect(toolCallUpdateEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(toolCallUpdateEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'validating',
request: { callId: 'test-call-id' },
},
},
]);
// State update: awaiting_approval update
const toolCallConfirmationEvent = events[4].result as TaskStatusUpdateEvent;
expect(toolCallConfirmationEvent.kind).toBe('status-update');
expect(toolCallConfirmationEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-confirmation',
});
expect(toolCallConfirmationEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'awaiting_approval',
request: { callId: 'test-call-id' },
},
},
]);
expect(toolCallConfirmationEvent.status?.state).toBe('working');
assertUniqueFinalEventIsLast(events);
expect(events.length).toBe(6);
});
it('should handle multiple tool calls in a single turn', async () => {
// First call yields the tool request
sendMessageStreamSpy.mockImplementationOnce(async function* () {
yield* [
{
type: GeminiEventType.ToolCallRequest,
value: {
callId: 'test-call-id-1',
name: 'test-tool-1',
args: {},
},
},
{
type: GeminiEventType.ToolCallRequest,
value: {
callId: 'test-call-id-2',
name: 'test-tool-2',
args: {},
},
},
];
});
// Subsequent calls yield nothing
sendMessageStreamSpy.mockImplementation(async function* () {
yield* [];
});
const mockTool1 = new MockTool(
'test-tool-1',
'Test Tool 1',
false,
false,
mockToolConfirmationFn,
);
const mockTool2 = new MockTool(
'test-tool-2',
'Test Tool 2',
false,
false,
mockToolConfirmationFn,
);
getToolRegistrySpy.mockReturnValue({
getAllTools: vi.fn().mockReturnValue([mockTool1, mockTool2]),
getToolsByServer: vi.fn().mockReturnValue([]),
getTool: vi.fn().mockImplementation((name: string) => {
if (name === 'test-tool-1') return mockTool1;
if (name === 'test-tool-2') return mockTool2;
return undefined;
}),
});
const agent = request.agent(app);
const res = await agent
.post('/')
.send(
createStreamMessageRequest(
'run two tools',
'a2a-multi-tool-test-message',
),
)
.set('Content-Type', 'application/json')
.expect(200);
const events = streamToSSEEvents(res.text);
assertTaskCreationAndWorkingStatus(events);
// Second working update
const workingEvent = events[2].result as TaskStatusUpdateEvent;
expect(workingEvent.kind).toBe('status-update');
expect(workingEvent.status.state).toBe('working');
// State Update: Validate each tool call
const toolCallValidateEvent1 = events[3].result as TaskStatusUpdateEvent;
expect(toolCallValidateEvent1.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(toolCallValidateEvent1.status.message?.parts).toMatchObject([
{
data: {
status: 'validating',
request: { callId: 'test-call-id-1' },
},
},
]);
const toolCallValidateEvent2 = events[4].result as TaskStatusUpdateEvent;
expect(toolCallValidateEvent2.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(toolCallValidateEvent2.status.message?.parts).toMatchObject([
{
data: {
status: 'validating',
request: { callId: 'test-call-id-2' },
},
},
]);
// State Update: Set each tool call to awaiting
const toolCallAwaitEvent1 = events[5].result as TaskStatusUpdateEvent;
expect(toolCallAwaitEvent1.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-confirmation',
});
expect(toolCallAwaitEvent1.status.message?.parts).toMatchObject([
{
data: {
status: 'awaiting_approval',
request: { callId: 'test-call-id-1' },
},
},
]);
const toolCallAwaitEvent2 = events[6].result as TaskStatusUpdateEvent;
expect(toolCallAwaitEvent2.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-confirmation',
});
expect(toolCallAwaitEvent2.status.message?.parts).toMatchObject([
{
data: {
status: 'awaiting_approval',
request: { callId: 'test-call-id-2' },
},
},
]);
assertUniqueFinalEventIsLast(events);
expect(events.length).toBe(8);
});
it('should handle tool calls that do not require approval', async () => {
// First call yields the tool request
sendMessageStreamSpy.mockImplementationOnce(async function* () {
yield* [
{
type: GeminiEventType.ToolCallRequest,
value: {
callId: 'test-call-id-no-approval',
name: 'test-tool-no-approval',
args: {},
},
},
];
});
// Second call, after the tool runs, yields the final text
sendMessageStreamSpy.mockImplementationOnce(async function* () {
yield* [{ type: 'content', value: 'Tool executed successfully.' }];
});
const mockTool = new MockTool(
'test-tool-no-approval',
'Test Tool No Approval',
);
mockTool.execute.mockResolvedValue({
llmContent: 'Tool executed successfully.',
returnDisplay: 'Tool executed successfully.',
});
getToolRegistrySpy.mockReturnValue({
getAllTools: vi.fn().mockReturnValue([mockTool]),
getToolsByServer: vi.fn().mockReturnValue([]),
getTool: vi.fn().mockReturnValue(mockTool),
});
const agent = request.agent(app);
const res = await agent
.post('/')
.send(
createStreamMessageRequest(
'run a tool without approval',
'a2a-no-approval-test-message',
),
)
.set('Content-Type', 'application/json')
.expect(200);
const events = streamToSSEEvents(res.text);
assertTaskCreationAndWorkingStatus(events);
// Status update: working
const workingEvent2 = events[2].result as TaskStatusUpdateEvent;
expect(workingEvent2.kind).toBe('status-update');
expect(workingEvent2.status.state).toBe('working');
// Status update: tool-call-update (validating)
const validatingEvent = events[3].result as TaskStatusUpdateEvent;
expect(validatingEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(validatingEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'validating',
request: { callId: 'test-call-id-no-approval' },
},
},
]);
// Status update: tool-call-update (scheduled)
const scheduledEvent = events[4].result as TaskStatusUpdateEvent;
expect(scheduledEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(scheduledEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'scheduled',
request: { callId: 'test-call-id-no-approval' },
},
},
]);
// Status update: tool-call-update (executing)
const executingEvent = events[5].result as TaskStatusUpdateEvent;
expect(executingEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(executingEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'executing',
request: { callId: 'test-call-id-no-approval' },
},
},
]);
// Status update: tool-call-update (success)
const successEvent = events[6].result as TaskStatusUpdateEvent;
expect(successEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(successEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'success',
request: { callId: 'test-call-id-no-approval' },
},
},
]);
// Status update: working (before sending tool result to LLM)
const workingEvent3 = events[7].result as TaskStatusUpdateEvent;
expect(workingEvent3.kind).toBe('status-update');
expect(workingEvent3.status.state).toBe('working');
// Status update: text-content (final LLM response)
const textContentEvent = events[8].result as TaskStatusUpdateEvent;
expect(textContentEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'text-content',
});
expect(textContentEvent.status.message?.parts).toMatchObject([
{ text: 'Tool executed successfully.' },
]);
assertUniqueFinalEventIsLast(events);
expect(events.length).toBe(10);
});
it('should bypass tool approval in YOLO mode', async () => {
// First call yields the tool request
sendMessageStreamSpy.mockImplementationOnce(async function* () {
yield* [
{
type: GeminiEventType.ToolCallRequest,
value: {
callId: 'test-call-id-yolo',
name: 'test-tool-yolo',
args: {},
},
},
];
});
// Second call, after the tool runs, yields the final text
sendMessageStreamSpy.mockImplementationOnce(async function* () {
yield* [{ type: 'content', value: 'Tool executed successfully.' }];
});
// Set approval mode to yolo
getApprovalModeSpy.mockReturnValue(ApprovalMode.YOLO);
const mockTool = new MockTool(
'test-tool-yolo',
'Test Tool YOLO',
false,
false,
);
mockTool.execute.mockResolvedValue({
llmContent: 'Tool executed successfully.',
returnDisplay: 'Tool executed successfully.',
});
getToolRegistrySpy.mockReturnValue({
getAllTools: vi.fn().mockReturnValue([mockTool]),
getToolsByServer: vi.fn().mockReturnValue([]),
getTool: vi.fn().mockReturnValue(mockTool),
});
const agent = request.agent(app);
const res = await agent
.post('/')
.send(
createStreamMessageRequest(
'run a tool in yolo mode',
'a2a-yolo-mode-test-message',
),
)
.set('Content-Type', 'application/json')
.expect(200);
const events = streamToSSEEvents(res.text);
assertTaskCreationAndWorkingStatus(events);
// Status update: working
const workingEvent2 = events[2].result as TaskStatusUpdateEvent;
expect(workingEvent2.kind).toBe('status-update');
expect(workingEvent2.status.state).toBe('working');
// Status update: tool-call-update (validating)
const validatingEvent = events[3].result as TaskStatusUpdateEvent;
expect(validatingEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(validatingEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'validating',
request: { callId: 'test-call-id-yolo' },
},
},
]);
// Status update: tool-call-update (scheduled)
const awaitingEvent = events[4].result as TaskStatusUpdateEvent;
expect(awaitingEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(awaitingEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'scheduled',
request: { callId: 'test-call-id-yolo' },
},
},
]);
// Status update: tool-call-update (executing)
const executingEvent = events[5].result as TaskStatusUpdateEvent;
expect(executingEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(executingEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'executing',
request: { callId: 'test-call-id-yolo' },
},
},
]);
// Status update: tool-call-update (success)
const successEvent = events[6].result as TaskStatusUpdateEvent;
expect(successEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'tool-call-update',
});
expect(successEvent.status.message?.parts).toMatchObject([
{
data: {
status: 'success',
request: { callId: 'test-call-id-yolo' },
},
},
]);
// Status update: working (before sending tool result to LLM)
const workingEvent3 = events[7].result as TaskStatusUpdateEvent;
expect(workingEvent3.kind).toBe('status-update');
expect(workingEvent3.status.state).toBe('working');
// Status update: text-content (final LLM response)
const textContentEvent = events[8].result as TaskStatusUpdateEvent;
expect(textContentEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'text-content',
});
expect(textContentEvent.status.message?.parts).toMatchObject([
{ text: 'Tool executed successfully.' },
]);
assertUniqueFinalEventIsLast(events);
expect(events.length).toBe(10);
});
});

View File

@@ -0,0 +1,785 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import express from 'express';
import { AsyncLocalStorage } from 'node:async_hooks';
import type { Message, Task as SDKTask, AgentCard } from '@a2a-js/sdk';
import type {
TaskStore,
AgentExecutor,
AgentExecutionEvent,
RequestContext,
ExecutionEventBus,
} from '@a2a-js/sdk/server';
import { DefaultRequestHandler, InMemoryTaskStore } from '@a2a-js/sdk/server';
import { A2AExpressApp } from '@a2a-js/sdk/server/express'; // Import server components
import type {
ToolCallRequestInfo,
ServerGeminiToolCallRequestEvent,
Config,
} from '@qwen-code/qwen-code-core';
import { GeminiEventType } from '@qwen-code/qwen-code-core';
import { v4 as uuidv4 } from 'uuid';
import { logger } from './logger.js';
import type { StateChange, AgentSettings } from './types.js';
import { CoderAgentEvent } from './types.js';
import { loadConfig, loadEnvironment, setTargetDir } from './config.js';
import { loadSettings } from './settings.js';
import { loadExtensions } from './extension.js';
import { Task } from './task.js';
import { GCSTaskStore, NoOpTaskStore } from './gcs.js';
import type { PersistedStateMetadata } from './metadata_types.js';
import { getPersistedState, setPersistedState } from './metadata_types.js';
const requestStorage = new AsyncLocalStorage<{ req: express.Request }>();
/**
* Provides a wrapper for Task. Passes data from Task to SDKTask.
* The idea is to use this class inside CoderAgentExecutor to replace Task.
*/
class TaskWrapper {
task: Task;
agentSettings: AgentSettings;
constructor(task: Task, agentSettings: AgentSettings) {
this.task = task;
this.agentSettings = agentSettings;
}
get id() {
return this.task.id;
}
toSDKTask(): SDKTask {
const persistedState: PersistedStateMetadata = {
_agentSettings: this.agentSettings,
_taskState: this.task.taskState,
};
const sdkTask: SDKTask = {
id: this.task.id,
contextId: this.task.contextId,
kind: 'task',
status: {
state: this.task.taskState,
timestamp: new Date().toISOString(),
},
metadata: setPersistedState({}, persistedState),
history: [],
artifacts: [],
};
sdkTask.metadata!['_contextId'] = this.task.contextId;
return sdkTask;
}
}
const coderAgentCard: AgentCard = {
name: 'Gemini SDLC Agent',
description:
'An agent that generates code based on natural language instructions and streams file outputs.',
url: 'http://localhost:41242/',
provider: {
organization: 'Google',
url: 'https://google.com',
},
protocolVersion: '0.3.0',
version: '0.0.2', // Incremented version
capabilities: {
streaming: true,
pushNotifications: false,
stateTransitionHistory: true,
},
securitySchemes: undefined,
security: undefined,
defaultInputModes: ['text'],
defaultOutputModes: ['text'],
skills: [
{
id: 'code_generation',
name: 'Code Generation',
description:
'Generates code snippets or complete files based on user requests, streaming the results.',
tags: ['code', 'development', 'programming'],
examples: [
'Write a python function to calculate fibonacci numbers.',
'Create an HTML file with a basic button that alerts "Hello!" when clicked.',
],
inputModes: ['text'],
outputModes: ['text'],
},
],
supportsAuthenticatedExtendedCard: false,
};
/**
* CoderAgentExecutor implements the agent's core logic for code generation.
*/
class CoderAgentExecutor implements AgentExecutor {
private tasks: Map<string, TaskWrapper> = new Map();
// Track tasks with an active execution loop.
private executingTasks = new Set<string>();
constructor(private taskStore?: TaskStore) {}
private async getConfig(
agentSettings: AgentSettings,
taskId: string,
): Promise<Config> {
const workspaceRoot = setTargetDir(agentSettings);
loadEnvironment(); // Will override any global env with workspace envs
const settings = loadSettings(workspaceRoot);
const extensions = loadExtensions(workspaceRoot);
return await loadConfig(settings, extensions, taskId);
}
/**
* Reconstructs TaskWrapper from SDKTask.
*/
async reconstruct(
sdkTask: SDKTask,
eventBus?: ExecutionEventBus,
): Promise<TaskWrapper> {
const metadata = sdkTask.metadata || {};
const persistedState = getPersistedState(metadata);
if (!persistedState) {
throw new Error(
`Cannot reconstruct task ${sdkTask.id}: missing persisted state in metadata.`,
);
}
const agentSettings = persistedState._agentSettings;
const config = await this.getConfig(agentSettings, sdkTask.id);
const contextId =
(metadata['_contextId'] as string) || (sdkTask.contextId as string);
const runtimeTask = await Task.create(
sdkTask.id,
contextId,
config,
eventBus,
);
runtimeTask.taskState = persistedState._taskState;
await runtimeTask.geminiClient.initialize(
runtimeTask.config.getContentGeneratorConfig(),
);
const wrapper = new TaskWrapper(runtimeTask, agentSettings);
this.tasks.set(sdkTask.id, wrapper);
logger.info(`Task ${sdkTask.id} reconstructed from store.`);
return wrapper;
}
async createTask(
taskId: string,
contextId: string,
agentSettingsInput?: AgentSettings,
eventBus?: ExecutionEventBus,
): Promise<TaskWrapper> {
const agentSettings = agentSettingsInput || ({} as AgentSettings);
const config = await this.getConfig(agentSettings, taskId);
const runtimeTask = await Task.create(taskId, contextId, config, eventBus);
await runtimeTask.geminiClient.initialize(
runtimeTask.config.getContentGeneratorConfig(),
);
const wrapper = new TaskWrapper(runtimeTask, agentSettings);
this.tasks.set(taskId, wrapper);
logger.info(`New task ${taskId} created.`);
return wrapper;
}
getTask(taskId: string): TaskWrapper | undefined {
return this.tasks.get(taskId);
}
getAllTasks(): TaskWrapper[] {
return Array.from(this.tasks.values());
}
cancelTask = async (
taskId: string,
eventBus: ExecutionEventBus,
): Promise<void> => {
logger.info(
`[CoderAgentExecutor] Received cancel request for task ${taskId}`,
);
const wrapper = this.tasks.get(taskId);
if (!wrapper) {
logger.warn(
`[CoderAgentExecutor] Task ${taskId} not found for cancellation.`,
);
eventBus.publish({
kind: 'status-update',
taskId,
contextId: uuidv4(),
status: {
state: 'failed',
message: {
kind: 'message',
role: 'agent',
parts: [{ kind: 'text', text: `Task ${taskId} not found.` }],
messageId: uuidv4(),
taskId,
},
},
final: true,
});
return;
}
const { task } = wrapper;
if (task.taskState === 'canceled' || task.taskState === 'failed') {
logger.info(
`[CoderAgentExecutor] Task ${taskId} is already in a final state: ${task.taskState}. No action needed for cancellation.`,
);
eventBus.publish({
kind: 'status-update',
taskId,
contextId: task.contextId,
status: {
state: task.taskState,
message: {
kind: 'message',
role: 'agent',
parts: [
{
kind: 'text',
text: `Task ${taskId} is already ${task.taskState}.`,
},
],
messageId: uuidv4(),
taskId,
},
},
final: true,
});
return;
}
try {
logger.info(
`[CoderAgentExecutor] Initiating cancellation for task ${taskId}.`,
);
task.cancelPendingTools('Task canceled by user request.');
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
task.setTaskStateAndPublishUpdate(
'canceled',
stateChange,
'Task canceled by user request.',
undefined,
true,
);
logger.info(
`[CoderAgentExecutor] Task ${taskId} cancellation processed. Saving state.`,
);
await this.taskStore?.save(wrapper.toSDKTask());
logger.info(`[CoderAgentExecutor] Task ${taskId} state CANCELED saved.`);
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : 'Unknown error';
logger.error(
`[CoderAgentExecutor] Error during task cancellation for ${taskId}: ${errorMessage}`,
error,
);
eventBus.publish({
kind: 'status-update',
taskId,
contextId: task.contextId,
status: {
state: 'failed',
message: {
kind: 'message',
role: 'agent',
parts: [
{
kind: 'text',
text: `Failed to process cancellation for task ${taskId}: ${errorMessage}`,
},
],
messageId: uuidv4(),
taskId,
},
},
final: true,
});
}
};
async execute(
requestContext: RequestContext,
eventBus: ExecutionEventBus,
): Promise<void> {
const userMessage = requestContext.userMessage as Message;
const sdkTask = requestContext.task as SDKTask | undefined;
const taskId = sdkTask?.id || userMessage.taskId || uuidv4();
const contextId =
userMessage.contextId ||
sdkTask?.contextId ||
sdkTask?.metadata?.['_contextId'] ||
uuidv4();
logger.info(
`[CoderAgentExecutor] Executing for taskId: ${taskId}, contextId: ${contextId}`,
);
logger.info(
`[CoderAgentExecutor] userMessage: ${JSON.stringify(userMessage)}`,
);
eventBus.on('event', (event: AgentExecutionEvent) =>
logger.info('[EventBus event]: ', event),
);
const store = requestStorage.getStore();
if (!store) {
logger.error(
'[CoderAgentExecutor] Could not get request from async local storage. Cancellation on socket close will not be handled for this request.',
);
}
const abortController = new AbortController();
const abortSignal = abortController.signal;
if (store) {
// Grab the raw socket from the request object
const socket = store.req.socket;
const onClientEnd = () => {
logger.info(
`[CoderAgentExecutor] Client socket closed for task ${taskId}. Cancelling execution.`,
);
if (!abortController.signal.aborted) {
abortController.abort();
}
// Clean up the listener to prevent memory leaks
socket.removeListener('close', onClientEnd);
};
// Listen on the socket's 'end' event (remote closed the connection)
socket.on('end', onClientEnd);
// It's also good practice to remove the listener if the task completes successfully
abortSignal.addEventListener('abort', () => {
socket.removeListener('end', onClientEnd);
});
logger.info(
`[CoderAgentExecutor] Socket close handler set up for task ${taskId}.`,
);
}
let wrapper: TaskWrapper | undefined = this.tasks.get(taskId);
if (wrapper) {
wrapper.task.eventBus = eventBus;
logger.info(`[CoderAgentExecutor] Task ${taskId} found in memory cache.`);
} else if (sdkTask) {
logger.info(
`[CoderAgentExecutor] Task ${taskId} found in TaskStore. Reconstructing...`,
);
try {
wrapper = await this.reconstruct(sdkTask, eventBus);
} catch (e) {
logger.error(
`[CoderAgentExecutor] Failed to hydrate task ${taskId}:`,
e,
);
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
eventBus.publish({
kind: 'status-update',
taskId,
contextId: sdkTask.contextId,
status: {
state: 'failed',
message: {
kind: 'message',
role: 'agent',
parts: [
{
kind: 'text',
text: 'Internal error: Task state lost or corrupted.',
},
],
messageId: uuidv4(),
taskId,
contextId: sdkTask.contextId,
} as Message,
},
final: true,
metadata: { coderAgent: stateChange },
});
return;
}
} else {
logger.info(`[CoderAgentExecutor] Creating new task ${taskId}.`);
const agentSettings = userMessage.metadata?.[
'coderAgent'
] as AgentSettings;
wrapper = await this.createTask(
taskId,
contextId as string,
agentSettings,
eventBus,
);
const newTaskSDK = wrapper.toSDKTask();
eventBus.publish({
...newTaskSDK,
kind: 'task',
status: { state: 'submitted', timestamp: new Date().toISOString() },
history: [userMessage],
});
try {
await this.taskStore?.save(newTaskSDK);
logger.info(`[CoderAgentExecutor] New task ${taskId} saved to store.`);
} catch (saveError) {
logger.error(
`[CoderAgentExecutor] Failed to save new task ${taskId} to store:`,
saveError,
);
}
}
if (!wrapper) {
logger.error(
`[CoderAgentExecutor] Task ${taskId} is unexpectedly undefined after load/create.`,
);
return;
}
const currentTask = wrapper.task;
if (['canceled', 'failed', 'completed'].includes(currentTask.taskState)) {
logger.warn(
`[CoderAgentExecutor] Attempted to execute task ${taskId} which is already in state ${currentTask.taskState}. Ignoring.`,
);
return;
}
if (this.executingTasks.has(taskId)) {
logger.info(
`[CoderAgentExecutor] Task ${taskId} has a pending execution. Processing message and yielding.`,
);
currentTask.eventBus = eventBus;
for await (const _ of currentTask.acceptUserMessage(
requestContext,
abortController.signal,
)) {
logger.info(
`[CoderAgentExecutor] Processing user message ${userMessage.messageId} in secondary execution loop for task ${taskId}.`,
);
}
// End this execution-- the original/source will be resumed.
return;
}
logger.info(
`[CoderAgentExecutor] Starting main execution for message ${userMessage.messageId} for task ${taskId}.`,
);
this.executingTasks.add(taskId);
try {
let agentTurnActive = true;
logger.info(`[CoderAgentExecutor] Task ${taskId}: Processing user turn.`);
let agentEvents = currentTask.acceptUserMessage(
requestContext,
abortSignal,
);
while (agentTurnActive) {
logger.info(
`[CoderAgentExecutor] Task ${taskId}: Processing agent turn (LLM stream).`,
);
const toolCallRequests: ToolCallRequestInfo[] = [];
for await (const event of agentEvents) {
if (abortSignal.aborted) {
logger.warn(
`[CoderAgentExecutor] Task ${taskId}: Abort signal received during agent event processing.`,
);
throw new Error('Execution aborted');
}
if (event.type === GeminiEventType.ToolCallRequest) {
toolCallRequests.push(
(event as ServerGeminiToolCallRequestEvent).value,
);
continue;
}
await currentTask.acceptAgentMessage(event);
}
if (abortSignal.aborted) throw new Error('Execution aborted');
if (toolCallRequests.length > 0) {
logger.info(
`[CoderAgentExecutor] Task ${taskId}: Found ${toolCallRequests.length} tool call requests. Scheduling as a batch.`,
);
await currentTask.scheduleToolCalls(toolCallRequests, abortSignal);
}
logger.info(
`[CoderAgentExecutor] Task ${taskId}: Waiting for pending tools if any.`,
);
await currentTask.waitForPendingTools();
logger.info(
`[CoderAgentExecutor] Task ${taskId}: All pending tools completed or none were pending.`,
);
if (abortSignal.aborted) throw new Error('Execution aborted');
const completedTools = currentTask.getAndClearCompletedTools();
if (completedTools.length > 0) {
// If all completed tool calls were canceled, manually add them to history and set state to input-required, final:true
if (completedTools.every((tool) => tool.status === 'cancelled')) {
logger.info(
`[CoderAgentExecutor] Task ${taskId}: All tool calls were cancelled. Updating history and ending agent turn.`,
);
currentTask.addToolResponsesToHistory(completedTools);
agentTurnActive = false;
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
currentTask.setTaskStateAndPublishUpdate(
'input-required',
stateChange,
undefined,
undefined,
true,
);
} else {
logger.info(
`[CoderAgentExecutor] Task ${taskId}: Found ${completedTools.length} completed tool calls. Sending results back to LLM.`,
);
agentEvents = currentTask.sendCompletedToolsToLlm(
completedTools,
abortSignal,
);
// Continue the loop to process the LLM response to the tool results.
}
} else {
logger.info(
`[CoderAgentExecutor] Task ${taskId}: No more tool calls to process. Ending agent turn.`,
);
agentTurnActive = false;
}
}
logger.info(
`[CoderAgentExecutor] Task ${taskId}: Agent turn finished, setting to input-required.`,
);
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
currentTask.setTaskStateAndPublishUpdate(
'input-required',
stateChange,
undefined,
undefined,
true,
);
} catch (error) {
if (abortSignal.aborted) {
logger.warn(`[CoderAgentExecutor] Task ${taskId} execution aborted.`);
currentTask.cancelPendingTools('Execution aborted');
if (
currentTask.taskState !== 'canceled' &&
currentTask.taskState !== 'failed'
) {
currentTask.setTaskStateAndPublishUpdate(
'input-required',
{ kind: CoderAgentEvent.StateChangeEvent },
'Execution aborted by client.',
undefined,
true,
);
}
} else {
const errorMessage =
error instanceof Error ? error.message : 'Agent execution error';
logger.error(
`[CoderAgentExecutor] Error executing agent for task ${taskId}:`,
error,
);
currentTask.cancelPendingTools(errorMessage);
if (currentTask.taskState !== 'failed') {
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
currentTask.setTaskStateAndPublishUpdate(
'failed',
stateChange,
errorMessage,
undefined,
true,
);
}
}
} finally {
this.executingTasks.delete(taskId);
logger.info(
`[CoderAgentExecutor] Saving final state for task ${taskId}.`,
);
try {
await this.taskStore?.save(wrapper.toSDKTask());
logger.info(`[CoderAgentExecutor] Task ${taskId} state saved.`);
} catch (saveError) {
logger.error(
`[CoderAgentExecutor] Failed to save task ${taskId} state in finally block:`,
saveError,
);
}
}
}
}
export function updateCoderAgentCardUrl(port: number) {
coderAgentCard.url = `http://localhost:${port}/`;
}
export async function main() {
try {
const expressApp = await createApp();
const port = process.env['CODER_AGENT_PORT'] || 0;
const server = expressApp.listen(port, () => {
const address = server.address();
let actualPort;
if (process.env['CODER_AGENT_PORT']) {
actualPort = process.env['CODER_AGENT_PORT'];
} else if (address && typeof address !== 'string') {
actualPort = address.port;
} else {
throw new Error('[Core Agent] Could not find port number.');
}
updateCoderAgentCardUrl(Number(actualPort));
logger.info(
`[CoreAgent] Agent Server started on http://localhost:${actualPort}`,
);
logger.info(
`[CoreAgent] Agent Card: http://localhost:${actualPort}/.well-known/agent-card.json`,
);
logger.info('[CoreAgent] Press Ctrl+C to stop the server');
});
} catch (error) {
logger.error('[CoreAgent] Error during startup:', error);
process.exit(1);
}
}
export async function createApp() {
try {
// loadEnvironment() is called within getConfig now
const bucketName = process.env['GCS_BUCKET_NAME'];
let taskStoreForExecutor: TaskStore;
let taskStoreForHandler: TaskStore;
if (bucketName) {
logger.info(`Using GCSTaskStore with bucket: ${bucketName}`);
const gcsTaskStore = new GCSTaskStore(bucketName);
taskStoreForExecutor = gcsTaskStore;
taskStoreForHandler = new NoOpTaskStore(gcsTaskStore);
} else {
logger.info('Using InMemoryTaskStore');
const inMemoryTaskStore = new InMemoryTaskStore();
taskStoreForExecutor = inMemoryTaskStore;
taskStoreForHandler = inMemoryTaskStore;
}
const agentExecutor = new CoderAgentExecutor(taskStoreForExecutor);
const requestHandler = new DefaultRequestHandler(
coderAgentCard,
taskStoreForHandler,
agentExecutor,
);
let expressApp = express();
expressApp.use((req, res, next) => {
requestStorage.run({ req }, next);
});
const appBuilder = new A2AExpressApp(requestHandler);
expressApp = appBuilder.setupRoutes(expressApp, '');
expressApp.use(express.json());
expressApp.post('/tasks', async (req, res) => {
try {
const taskId = uuidv4();
const agentSettings = req.body.agentSettings as
| AgentSettings
| undefined;
const contextId = req.body.contextId || uuidv4();
const wrapper = await agentExecutor.createTask(
taskId,
contextId,
agentSettings,
);
await taskStoreForExecutor.save(wrapper.toSDKTask());
res.status(201).json(wrapper.id);
} catch (error) {
logger.error('[CoreAgent] Error creating task:', error);
const errorMessage =
error instanceof Error
? error.message
: 'Unknown error creating task';
res.status(500).send({ error: errorMessage });
}
});
expressApp.get('/tasks/metadata', async (req, res) => {
// This endpoint is only meaningful if the task store is in-memory.
if (!(taskStoreForExecutor instanceof InMemoryTaskStore)) {
res.status(501).send({
error:
'Listing all task metadata is only supported when using InMemoryTaskStore.',
});
}
try {
const wrappers = agentExecutor.getAllTasks();
if (wrappers && wrappers.length > 0) {
const tasksMetadata = await Promise.all(
wrappers.map((wrapper) => wrapper.task.getMetadata()),
);
res.status(200).json(tasksMetadata);
} else {
res.status(204).send();
}
} catch (error) {
logger.error('[CoreAgent] Error getting all task metadata:', error);
const errorMessage =
error instanceof Error
? error.message
: 'Unknown error getting task metadata';
res.status(500).send({ error: errorMessage });
}
});
expressApp.get('/tasks/:taskId/metadata', async (req, res) => {
const taskId = req.params.taskId;
let wrapper = agentExecutor.getTask(taskId);
if (!wrapper) {
const sdkTask = await taskStoreForExecutor.load(taskId);
if (sdkTask) {
wrapper = await agentExecutor.reconstruct(sdkTask);
}
}
if (!wrapper) {
res.status(404).send({ error: 'Task not found' });
return;
}
res.json({ metadata: await wrapper.task.getMetadata() });
});
return expressApp;
} catch (error) {
logger.error('[CoreAgent] Error during startup:', error);
process.exit(1);
}
}

View File

@@ -0,0 +1,203 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'node:fs';
import * as path from 'node:path';
import { homedir } from 'node:os';
import * as dotenv from 'dotenv';
import type { TelemetryTarget } from '@qwen-code/qwen-code-core';
import {
AuthType,
Config,
type ConfigParameters,
FileDiscoveryService,
ApprovalMode,
loadServerHierarchicalMemory,
GEMINI_CONFIG_DIR,
DEFAULT_GEMINI_EMBEDDING_MODEL,
DEFAULT_GEMINI_MODEL,
} from '@qwen-code/qwen-code-core';
import { logger } from './logger.js';
import type { Settings } from './settings.js';
import type { Extension } from './extension.js';
import { type AgentSettings, CoderAgentEvent } from './types.js';
export async function loadConfig(
settings: Settings,
extensions: Extension[],
taskId: string,
): Promise<Config> {
const mcpServers = mergeMcpServers(settings, extensions);
const workspaceDir = process.cwd();
const adcFilePath = process.env['GOOGLE_APPLICATION_CREDENTIALS'];
const configParams: ConfigParameters = {
sessionId: taskId,
model: DEFAULT_GEMINI_MODEL,
embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL,
sandbox: undefined, // Sandbox might not be relevant for a server-side agent
targetDir: workspaceDir, // Or a specific directory the agent operates on
debugMode: process.env['DEBUG'] === 'true' || false,
question: '', // Not used in server mode directly like CLI
fullContext: false, // Server might have different context needs
coreTools: settings.coreTools || undefined,
excludeTools: settings.excludeTools || undefined,
showMemoryUsage: settings.showMemoryUsage || false,
approvalMode:
process.env['GEMINI_YOLO_MODE'] === 'true'
? ApprovalMode.YOLO
: ApprovalMode.DEFAULT,
mcpServers,
cwd: workspaceDir,
telemetry: {
enabled: settings.telemetry?.enabled,
target: settings.telemetry?.target as TelemetryTarget,
otlpEndpoint:
process.env['OTEL_EXPORTER_OTLP_ENDPOINT'] ??
settings.telemetry?.otlpEndpoint,
logPrompts: settings.telemetry?.logPrompts,
},
// Git-aware file filtering settings
fileFiltering: {
respectGitIgnore: settings.fileFiltering?.respectGitIgnore,
enableRecursiveFileSearch:
settings.fileFiltering?.enableRecursiveFileSearch,
},
ideMode: false,
};
const fileService = new FileDiscoveryService(workspaceDir);
const extensionContextFilePaths = extensions.flatMap((e) => e.contextFiles);
const { memoryContent, fileCount } = await loadServerHierarchicalMemory(
workspaceDir,
[workspaceDir],
false,
fileService,
extensionContextFilePaths,
);
configParams.userMemory = memoryContent;
configParams.geminiMdFileCount = fileCount;
const config = new Config({
...configParams,
});
// Needed to initialize ToolRegistry, and git checkpointing if enabled
await config.initialize();
if (process.env['USE_CCPA']) {
logger.info('[Config] Using CCPA Auth:');
try {
if (adcFilePath) {
path.resolve(adcFilePath);
}
} catch (e) {
logger.error(
`[Config] USE_CCPA env var is true but unable to resolve GOOGLE_APPLICATION_CREDENTIALS file path ${adcFilePath}. Error ${e}`,
);
}
await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE);
logger.info(
`[Config] GOOGLE_CLOUD_PROJECT: ${process.env['GOOGLE_CLOUD_PROJECT']}`,
);
} else if (process.env['GEMINI_API_KEY']) {
logger.info('[Config] Using Gemini API Key');
await config.refreshAuth(AuthType.USE_GEMINI);
} else {
logger.error(
`[Config] Unable to set GeneratorConfig. Please provide a GEMINI_API_KEY or set USE_CCPA.`,
);
}
return config;
}
export function mergeMcpServers(settings: Settings, extensions: Extension[]) {
const mcpServers = { ...(settings.mcpServers || {}) };
for (const extension of extensions) {
Object.entries(extension.config.mcpServers || {}).forEach(
([key, server]) => {
if (mcpServers[key]) {
console.warn(
`Skipping extension MCP config for server with key "${key}" as it already exists.`,
);
return;
}
mcpServers[key] = server;
},
);
}
return mcpServers;
}
export function setTargetDir(agentSettings: AgentSettings | undefined): string {
const originalCWD = process.cwd();
const targetDir =
process.env['CODER_AGENT_WORKSPACE_PATH'] ??
(agentSettings?.kind === CoderAgentEvent.StateAgentSettingsEvent
? agentSettings.workspacePath
: undefined);
if (!targetDir) {
return originalCWD;
}
logger.info(
`[CoderAgentExecutor] Overriding workspace path to: ${targetDir}`,
);
try {
const resolvedPath = path.resolve(targetDir);
process.chdir(resolvedPath);
return resolvedPath;
} catch (e) {
logger.error(
`[CoderAgentExecutor] Error resolving workspace path: ${e}, returning original os.cwd()`,
);
return originalCWD;
}
}
export function loadEnvironment(): void {
const envFilePath = findEnvFile(process.cwd());
if (envFilePath) {
dotenv.config({ path: envFilePath, override: true });
}
}
function findEnvFile(startDir: string): string | null {
let currentDir = path.resolve(startDir);
while (true) {
// prefer gemini-specific .env under GEMINI_DIR
const geminiEnvPath = path.join(currentDir, GEMINI_CONFIG_DIR, '.env');
if (fs.existsSync(geminiEnvPath)) {
return geminiEnvPath;
}
const envPath = path.join(currentDir, '.env');
if (fs.existsSync(envPath)) {
return envPath;
}
const parentDir = path.dirname(currentDir);
if (parentDir === currentDir || !parentDir) {
// check .env under home as fallback, again preferring gemini-specific .env
const homeGeminiEnvPath = path.join(
process.cwd(),
GEMINI_CONFIG_DIR,
'.env',
);
if (fs.existsSync(homeGeminiEnvPath)) {
return homeGeminiEnvPath;
}
const homeEnvPath = path.join(homedir(), '.env');
if (fs.existsSync(homeEnvPath)) {
return homeEnvPath;
}
return null;
}
currentDir = parentDir;
}
}

View File

@@ -0,0 +1,146 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import request from 'supertest';
import type express from 'express';
import { createApp, updateCoderAgentCardUrl } from './agent.js';
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import type { Server } from 'node:http';
import type { TaskMetadata } from './types.js';
import type { AddressInfo } from 'node:net';
// Mock the logger to avoid polluting test output
// Comment out to help debug
vi.mock('./logger.js', () => ({
logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn() },
}));
// Mock Task.create to avoid its complex setup
vi.mock('./task.js', () => {
class MockTask {
id: string;
contextId: string;
taskState = 'submitted';
config = {
getContentGeneratorConfig: vi
.fn()
.mockReturnValue({ model: 'gemini-pro' }),
};
geminiClient = {
initialize: vi.fn().mockResolvedValue(undefined),
};
constructor(id: string, contextId: string) {
this.id = id;
this.contextId = contextId;
}
static create = vi
.fn()
.mockImplementation((id, contextId) =>
Promise.resolve(new MockTask(id, contextId)),
);
getMetadata = vi.fn().mockImplementation(async () => ({
id: this.id,
contextId: this.contextId,
taskState: this.taskState,
model: 'gemini-pro',
mcpServers: [],
availableTools: [],
}));
}
return { Task: MockTask };
});
describe('Agent Server Endpoints', () => {
let app: express.Express;
let server: Server;
let testWorkspace: string;
const createTask = (contextId: string) =>
request(app)
.post('/tasks')
.send({
contextId,
agentSettings: {
kind: 'agent-settings',
workspacePath: testWorkspace,
},
})
.set('Content-Type', 'application/json');
beforeAll(async () => {
// Create a unique temporary directory for the workspace to avoid conflicts
testWorkspace = fs.mkdtempSync(
path.join(os.tmpdir(), 'gemini-agent-test-'),
);
app = await createApp();
await new Promise<void>((resolve) => {
server = app.listen(0, () => {
const port = (server.address() as AddressInfo).port;
updateCoderAgentCardUrl(port);
resolve();
});
});
});
afterAll(
() =>
new Promise<void>((resolve, reject) => {
server.close((err) => {
if (err) return reject(err);
try {
fs.rmSync(testWorkspace, { recursive: true, force: true });
} catch (e) {
console.warn(`Could not remove temp dir '${testWorkspace}':`, e);
}
resolve();
});
}),
);
it('should create a new task via POST /tasks', async () => {
const response = await createTask('test-context');
expect(response.status).toBe(201);
expect(response.body).toBeTypeOf('string'); // Should return the task ID
}, 7000);
it('should get metadata for a specific task via GET /tasks/:taskId/metadata', async () => {
const createResponse = await createTask('test-context-2');
const taskId = createResponse.body;
const response = await request(app).get(`/tasks/${taskId}/metadata`);
expect(response.status).toBe(200);
expect(response.body.metadata.id).toBe(taskId);
}, 6000);
it('should get metadata for all tasks via GET /tasks/metadata', async () => {
const createResponse = await createTask('test-context-3');
const taskId = createResponse.body;
const response = await request(app).get('/tasks/metadata');
expect(response.status).toBe(200);
expect(Array.isArray(response.body)).toBe(true);
expect(response.body.length).toBeGreaterThan(0);
const taskMetadata = response.body.find(
(m: TaskMetadata) => m.id === taskId,
);
expect(taskMetadata).toBeDefined();
});
it('should return 404 for a non-existent task', async () => {
const response = await request(app).get('/tasks/fake-task/metadata');
expect(response.status).toBe(404);
});
it('should return agent metadata via GET /.well-known/agent-card.json', async () => {
const response = await request(app).get('/.well-known/agent-card.json');
const port = (server.address() as AddressInfo).port;
expect(response.status).toBe(200);
expect(response.body.name).toBe('Gemini SDLC Agent');
expect(response.body.url).toBe(`http://localhost:${port}/`);
});
});

View File

@@ -0,0 +1,118 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
// Copied exactly from packages/cli/src/config/extension.ts, last PR #1026
import type { MCPServerConfig } from '@qwen-code/qwen-code-core';
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import { logger } from './logger.js';
export const EXTENSIONS_DIRECTORY_NAME = path.join('.gemini', 'extensions');
export const EXTENSIONS_CONFIG_FILENAME = 'gemini-extension.json';
export interface Extension {
config: ExtensionConfig;
contextFiles: string[];
}
export interface ExtensionConfig {
name: string;
version: string;
mcpServers?: Record<string, MCPServerConfig>;
contextFileName?: string | string[];
}
export function loadExtensions(workspaceDir: string): Extension[] {
const allExtensions = [
...loadExtensionsFromDir(workspaceDir),
...loadExtensionsFromDir(os.homedir()),
];
const uniqueExtensions: Extension[] = [];
const seenNames = new Set<string>();
for (const extension of allExtensions) {
if (!seenNames.has(extension.config.name)) {
logger.info(
`Loading extension: ${extension.config.name} (version: ${extension.config.version})`,
);
uniqueExtensions.push(extension);
seenNames.add(extension.config.name);
}
}
return uniqueExtensions;
}
function loadExtensionsFromDir(dir: string): Extension[] {
const extensionsDir = path.join(dir, EXTENSIONS_DIRECTORY_NAME);
if (!fs.existsSync(extensionsDir)) {
return [];
}
const extensions: Extension[] = [];
for (const subdir of fs.readdirSync(extensionsDir)) {
const extensionDir = path.join(extensionsDir, subdir);
const extension = loadExtension(extensionDir);
if (extension != null) {
extensions.push(extension);
}
}
return extensions;
}
function loadExtension(extensionDir: string): Extension | null {
if (!fs.statSync(extensionDir).isDirectory()) {
logger.error(
`Warning: unexpected file ${extensionDir} in extensions directory.`,
);
return null;
}
const configFilePath = path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME);
if (!fs.existsSync(configFilePath)) {
logger.error(
`Warning: extension directory ${extensionDir} does not contain a config file ${configFilePath}.`,
);
return null;
}
try {
const configContent = fs.readFileSync(configFilePath, 'utf-8');
const config = JSON.parse(configContent) as ExtensionConfig;
if (!config.name || !config.version) {
logger.error(
`Invalid extension config in ${configFilePath}: missing name or version.`,
);
return null;
}
const contextFiles = getContextFileNames(config)
.map((contextFileName) => path.join(extensionDir, contextFileName))
.filter((contextFilePath) => fs.existsSync(contextFilePath));
return {
config,
contextFiles,
};
} catch (e) {
logger.error(
`Warning: error parsing extension config in ${configFilePath}: ${e}`,
);
return null;
}
}
function getContextFileNames(config: ExtensionConfig): string[] {
if (!config.contextFileName) {
return ['GEMINI.md'];
} else if (!Array.isArray(config.contextFileName)) {
return [config.contextFileName];
}
return config.contextFileName;
}

View File

@@ -0,0 +1,340 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { Storage } from '@google-cloud/storage';
import * as fse from 'fs-extra';
import { promises as fsPromises, createReadStream } from 'node:fs';
import * as tar from 'tar';
import { gzipSync, gunzipSync } from 'node:zlib';
import { v4 as uuidv4 } from 'uuid';
import type { Task as SDKTask } from '@a2a-js/sdk';
import type { TaskStore } from '@a2a-js/sdk/server';
import type { Mocked, MockedClass, Mock } from 'vitest';
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { GCSTaskStore, NoOpTaskStore } from './gcs.js';
import { logger } from './logger.js';
import * as configModule from './config.js';
import * as metadataModule from './metadata_types.js';
// Mock dependencies
vi.mock('@google-cloud/storage');
vi.mock('fs-extra', () => ({
pathExists: vi.fn(),
readdir: vi.fn(),
remove: vi.fn(),
ensureDir: vi.fn(),
}));
vi.mock('node:fs', async () => {
const actual = await vi.importActual<typeof import('node:fs')>('node:fs');
return {
...actual,
promises: {
...actual.promises,
readdir: vi.fn(),
},
createReadStream: vi.fn(),
};
});
vi.mock('tar');
vi.mock('zlib');
vi.mock('uuid');
vi.mock('./logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
vi.mock('./config');
vi.mock('./metadata_types');
vi.mock('node:stream/promises', () => ({
pipeline: vi.fn(),
}));
const mockStorage = Storage as MockedClass<typeof Storage>;
const mockFse = fse as Mocked<typeof fse>;
const mockCreateReadStream = createReadStream as Mock;
const mockTar = tar as Mocked<typeof tar>;
const mockGzipSync = gzipSync as Mock;
const mockGunzipSync = gunzipSync as Mock;
const mockUuidv4 = uuidv4 as Mock;
const mockSetTargetDir = configModule.setTargetDir as Mock;
const mockGetPersistedState = metadataModule.getPersistedState as Mock;
const METADATA_KEY = metadataModule.METADATA_KEY || '__persistedState';
type MockWriteStream = {
on: Mock<
(event: string, cb: (error?: Error | null) => void) => MockWriteStream
>;
destroy: Mock<() => void>;
destroyed: boolean;
};
type MockFile = {
save: Mock<(data: Buffer | string) => Promise<void>>;
download: Mock<() => Promise<[Buffer]>>;
exists: Mock<() => Promise<[boolean]>>;
createWriteStream: Mock<() => MockWriteStream>;
};
type MockBucket = {
exists: Mock<() => Promise<[boolean]>>;
file: Mock<(path: string) => MockFile>;
name: string;
};
type MockStorageInstance = {
bucket: Mock<(name: string) => MockBucket>;
getBuckets: Mock<() => Promise<[Array<{ name: string }>]>>;
createBucket: Mock<(name: string) => Promise<[MockBucket]>>;
};
describe('GCSTaskStore', () => {
let bucketName: string;
let mockBucket: MockBucket;
let mockFile: MockFile;
let mockWriteStream: MockWriteStream;
let mockStorageInstance: MockStorageInstance;
beforeEach(() => {
vi.clearAllMocks();
bucketName = 'test-bucket';
mockWriteStream = {
on: vi.fn((event, cb) => {
if (event === 'finish') setTimeout(cb, 0); // Simulate async finish
return mockWriteStream;
}),
destroy: vi.fn(),
destroyed: false,
};
mockFile = {
save: vi.fn().mockResolvedValue(undefined),
download: vi.fn().mockResolvedValue([Buffer.from('')]),
exists: vi.fn().mockResolvedValue([true]),
createWriteStream: vi.fn().mockReturnValue(mockWriteStream),
};
mockBucket = {
exists: vi.fn().mockResolvedValue([true]),
file: vi.fn().mockReturnValue(mockFile),
name: bucketName,
};
mockStorageInstance = {
bucket: vi.fn().mockReturnValue(mockBucket),
getBuckets: vi.fn().mockResolvedValue([[{ name: bucketName }]]),
createBucket: vi.fn().mockResolvedValue([mockBucket]),
};
mockStorage.mockReturnValue(mockStorageInstance as unknown as Storage);
mockUuidv4.mockReturnValue('test-uuid');
mockSetTargetDir.mockReturnValue('/tmp/workdir');
mockGetPersistedState.mockReturnValue({
_agentSettings: {},
_taskState: 'submitted',
});
(fse.pathExists as Mock).mockResolvedValue(true);
(fsPromises.readdir as Mock).mockResolvedValue(['file1.txt']);
mockTar.c.mockResolvedValue(undefined);
mockTar.x.mockResolvedValue(undefined);
mockFse.remove.mockResolvedValue(undefined);
mockFse.ensureDir.mockResolvedValue(undefined);
mockGzipSync.mockReturnValue(Buffer.from('compressed'));
mockGunzipSync.mockReturnValue(Buffer.from('{}'));
mockCreateReadStream.mockReturnValue({ on: vi.fn(), pipe: vi.fn() });
});
describe('Constructor & Initialization', () => {
it('should initialize and check bucket existence', async () => {
const store = new GCSTaskStore(bucketName);
await store['ensureBucketInitialized']();
expect(mockStorage).toHaveBeenCalledTimes(1);
expect(mockStorageInstance.getBuckets).toHaveBeenCalled();
expect(logger.info).toHaveBeenCalledWith(
expect.stringContaining('Bucket test-bucket exists'),
);
});
it('should create bucket if it does not exist', async () => {
mockStorageInstance.getBuckets.mockResolvedValue([[]]);
const store = new GCSTaskStore(bucketName);
await store['ensureBucketInitialized']();
expect(mockStorageInstance.createBucket).toHaveBeenCalledWith(bucketName);
expect(logger.info).toHaveBeenCalledWith(
expect.stringContaining('Bucket test-bucket created successfully'),
);
});
it('should throw if bucket creation fails', async () => {
mockStorageInstance.getBuckets.mockResolvedValue([[]]);
mockStorageInstance.createBucket.mockRejectedValue(
new Error('Create failed'),
);
const store = new GCSTaskStore(bucketName);
await expect(store['ensureBucketInitialized']()).rejects.toThrow(
'Failed to create GCS bucket test-bucket: Error: Create failed',
);
});
});
describe('save', () => {
const mockTask: SDKTask = {
id: 'task1',
contextId: 'ctx1',
kind: 'task',
status: { state: 'working' },
metadata: {},
};
it('should save metadata and workspace', async () => {
const store = new GCSTaskStore(bucketName);
await store.save(mockTask);
expect(mockFile.save).toHaveBeenCalledTimes(1);
expect(mockTar.c).toHaveBeenCalledTimes(1);
expect(mockCreateReadStream).toHaveBeenCalledTimes(1);
expect(mockFse.remove).toHaveBeenCalledTimes(1);
expect(logger.info).toHaveBeenCalledWith(
expect.stringContaining('metadata saved to GCS'),
);
expect(logger.info).toHaveBeenCalledWith(
expect.stringContaining('workspace saved to GCS'),
);
});
it('should handle tar creation failure', async () => {
mockFse.pathExists.mockImplementation(
async (path) =>
!path.toString().includes('task-task1-workspace-test-uuid.tar.gz'),
);
const store = new GCSTaskStore(bucketName);
await expect(store.save(mockTask)).rejects.toThrow(
'tar.c command failed to create',
);
});
});
describe('load', () => {
it('should load task metadata and workspace', async () => {
mockGunzipSync.mockReturnValue(
Buffer.from(
JSON.stringify({
[METADATA_KEY]: { _agentSettings: {}, _taskState: 'submitted' },
_contextId: 'ctx1',
}),
),
);
mockFile.download.mockResolvedValue([Buffer.from('compressed metadata')]);
mockFile.download.mockResolvedValueOnce([
Buffer.from('compressed metadata'),
]);
mockBucket.file = vi.fn((path) => {
const newMockFile = { ...mockFile };
if (path.includes('metadata')) {
newMockFile.download = vi
.fn()
.mockResolvedValue([Buffer.from('compressed metadata')]);
newMockFile.exists = vi.fn().mockResolvedValue([true]);
} else {
newMockFile.download = vi
.fn()
.mockResolvedValue([Buffer.from('compressed workspace')]);
newMockFile.exists = vi.fn().mockResolvedValue([true]);
}
return newMockFile;
});
const store = new GCSTaskStore(bucketName);
const task = await store.load('task1');
expect(task).toBeDefined();
expect(task?.id).toBe('task1');
expect(mockBucket.file).toHaveBeenCalledWith(
'tasks/task1/metadata.tar.gz',
);
expect(mockBucket.file).toHaveBeenCalledWith(
'tasks/task1/workspace.tar.gz',
);
expect(mockTar.x).toHaveBeenCalledTimes(1);
expect(mockFse.remove).toHaveBeenCalledTimes(1);
});
it('should return undefined if metadata not found', async () => {
mockFile.exists.mockResolvedValue([false]);
const store = new GCSTaskStore(bucketName);
const task = await store.load('task1');
expect(task).toBeUndefined();
expect(mockBucket.file).toHaveBeenCalledWith(
'tasks/task1/metadata.tar.gz',
);
});
it('should load metadata even if workspace not found', async () => {
mockGunzipSync.mockReturnValue(
Buffer.from(
JSON.stringify({
[METADATA_KEY]: { _agentSettings: {}, _taskState: 'submitted' },
_contextId: 'ctx1',
}),
),
);
mockBucket.file = vi.fn((path) => {
const newMockFile = { ...mockFile };
if (path.includes('workspace.tar.gz')) {
newMockFile.exists = vi.fn().mockResolvedValue([false]);
} else {
newMockFile.exists = vi.fn().mockResolvedValue([true]);
newMockFile.download = vi
.fn()
.mockResolvedValue([Buffer.from('compressed metadata')]);
}
return newMockFile;
});
const store = new GCSTaskStore(bucketName);
const task = await store.load('task1');
expect(task).toBeDefined();
expect(mockTar.x).not.toHaveBeenCalled();
expect(logger.info).toHaveBeenCalledWith(
expect.stringContaining('workspace archive not found'),
);
});
});
});
describe('NoOpTaskStore', () => {
let realStore: TaskStore;
let noOpStore: NoOpTaskStore;
beforeEach(() => {
// Create a mock of the real store to delegate to
realStore = {
save: vi.fn(),
load: vi.fn().mockResolvedValue({ id: 'task-123' } as SDKTask),
};
noOpStore = new NoOpTaskStore(realStore);
});
it("should not call the real store's save method", async () => {
const mockTask: SDKTask = { id: 'test-task' } as SDKTask;
await noOpStore.save(mockTask);
expect(realStore.save).not.toHaveBeenCalled();
});
it('should delegate the load method to the real store', async () => {
const taskId = 'task-123';
const result = await noOpStore.load(taskId);
expect(realStore.load).toHaveBeenCalledWith(taskId);
expect(result).toBeDefined();
expect(result?.id).toBe(taskId);
});
});

View File

@@ -0,0 +1,308 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { Storage } from '@google-cloud/storage';
import { gzipSync, gunzipSync } from 'node:zlib';
import * as tar from 'tar';
import * as fse from 'fs-extra';
import { promises as fsPromises, createReadStream } from 'node:fs';
import { tmpdir } from 'node:os';
import { join } from 'node:path';
import type { Task as SDKTask } from '@a2a-js/sdk';
import type { TaskStore } from '@a2a-js/sdk/server';
import { logger } from './logger.js';
import { setTargetDir } from './config.js';
import {
getPersistedState,
type PersistedTaskMetadata,
} from './metadata_types.js';
import { v4 as uuidv4 } from 'uuid';
type ObjectType = 'metadata' | 'workspace';
const getTmpArchiveFilename = (taskId: string): string =>
`task-${taskId}-workspace-${uuidv4()}.tar.gz`;
export class GCSTaskStore implements TaskStore {
private storage: Storage;
private bucketName: string;
private bucketInitialized: Promise<void>;
constructor(bucketName: string) {
if (!bucketName) {
throw new Error('GCS bucket name is required.');
}
this.storage = new Storage();
this.bucketName = bucketName;
logger.info(`GCSTaskStore initializing with bucket: ${this.bucketName}`);
// Prerequisites: user account or service account must have storage admin IAM role
// and the bucket name must be unique.
this.bucketInitialized = this.initializeBucket();
}
private async initializeBucket(): Promise<void> {
try {
const [buckets] = await this.storage.getBuckets();
const exists = buckets.some((bucket) => bucket.name === this.bucketName);
if (!exists) {
logger.info(
`Bucket ${this.bucketName} does not exist in the list. Attempting to create...`,
);
try {
await this.storage.createBucket(this.bucketName);
logger.info(`Bucket ${this.bucketName} created successfully.`);
} catch (createError) {
logger.info(
`Failed to create bucket ${this.bucketName}: ${createError}`,
);
throw new Error(
`Failed to create GCS bucket ${this.bucketName}: ${createError}`,
);
}
} else {
logger.info(`Bucket ${this.bucketName} exists.`);
}
} catch (error) {
logger.info(
`Error during bucket initialization for ${this.bucketName}: ${error}`,
);
throw new Error(
`Failed to initialize GCS bucket ${this.bucketName}: ${error}`,
);
}
}
private async ensureBucketInitialized(): Promise<void> {
await this.bucketInitialized;
}
private getObjectPath(taskId: string, type: ObjectType): string {
return `tasks/${taskId}/${type}.tar.gz`;
}
async save(task: SDKTask): Promise<void> {
await this.ensureBucketInitialized();
const taskId = task.id;
const persistedState = getPersistedState(
task.metadata as PersistedTaskMetadata,
);
if (!persistedState) {
throw new Error(`Task ${taskId} is missing persisted state in metadata.`);
}
const workDir = process.cwd();
const metadataObjectPath = this.getObjectPath(taskId, 'metadata');
const workspaceObjectPath = this.getObjectPath(taskId, 'workspace');
const dataToStore = task.metadata;
try {
const jsonString = JSON.stringify(dataToStore);
const compressedMetadata = gzipSync(Buffer.from(jsonString));
const metadataFile = this.storage
.bucket(this.bucketName)
.file(metadataObjectPath);
await metadataFile.save(compressedMetadata, {
contentType: 'application/gzip',
});
logger.info(
`Task ${taskId} metadata saved to GCS: gs://${this.bucketName}/${metadataObjectPath}`,
);
if (await fse.pathExists(workDir)) {
const entries = await fsPromises.readdir(workDir);
if (entries.length > 0) {
const tmpArchiveFile = join(tmpdir(), getTmpArchiveFilename(taskId));
try {
await tar.c(
{
gzip: true,
file: tmpArchiveFile,
cwd: workDir,
portable: true,
},
entries,
);
if (!(await fse.pathExists(tmpArchiveFile))) {
throw new Error(
`tar.c command failed to create ${tmpArchiveFile}`,
);
}
const workspaceFile = this.storage
.bucket(this.bucketName)
.file(workspaceObjectPath);
const sourceStream = createReadStream(tmpArchiveFile);
const destStream = workspaceFile.createWriteStream({
contentType: 'application/gzip',
resumable: true,
});
await new Promise<void>((resolve, reject) => {
sourceStream.on('error', (err) => {
logger.error(
`Error in source stream for ${tmpArchiveFile}:`,
err,
);
// Attempt to close destStream if source fails
if (!destStream.destroyed) {
destStream.destroy(err);
}
reject(err);
});
destStream.on('error', (err) => {
logger.error(
`Error in GCS dest stream for ${workspaceObjectPath}:`,
err,
);
reject(err);
});
destStream.on('finish', () => {
logger.info(
`GCS destStream finished for ${workspaceObjectPath}`,
);
resolve();
});
logger.info(
`Piping ${tmpArchiveFile} to GCS object ${workspaceObjectPath}`,
);
sourceStream.pipe(destStream);
});
logger.info(
`Task ${taskId} workspace saved to GCS: gs://${this.bucketName}/${workspaceObjectPath}`,
);
} catch (error) {
logger.error(
`Error during workspace save process for ${taskId}:`,
error,
);
throw error;
} finally {
logger.info(`Cleaning up temporary file: ${tmpArchiveFile}`);
try {
if (await fse.pathExists(tmpArchiveFile)) {
await fse.remove(tmpArchiveFile);
logger.info(
`Successfully removed temporary file: ${tmpArchiveFile}`,
);
} else {
logger.warn(
`Temporary file not found for cleanup: ${tmpArchiveFile}`,
);
}
} catch (removeError) {
logger.error(
`Error removing temporary file ${tmpArchiveFile}:`,
removeError,
);
}
}
} else {
logger.info(
`Workspace directory ${workDir} is empty, skipping workspace save for task ${taskId}.`,
);
}
} else {
logger.info(
`Workspace directory ${workDir} not found, skipping workspace save for task ${taskId}.`,
);
}
} catch (error) {
logger.error(`Failed to save task ${taskId} to GCS:`, error);
throw error;
}
}
async load(taskId: string): Promise<SDKTask | undefined> {
await this.ensureBucketInitialized();
const metadataObjectPath = this.getObjectPath(taskId, 'metadata');
const workspaceObjectPath = this.getObjectPath(taskId, 'workspace');
try {
const metadataFile = this.storage
.bucket(this.bucketName)
.file(metadataObjectPath);
const [metadataExists] = await metadataFile.exists();
if (!metadataExists) {
logger.info(`Task ${taskId} metadata not found in GCS.`);
return undefined;
}
const [compressedMetadata] = await metadataFile.download();
const jsonData = gunzipSync(compressedMetadata).toString();
const loadedMetadata = JSON.parse(jsonData);
logger.info(`Task ${taskId} metadata loaded from GCS.`);
const persistedState = getPersistedState(loadedMetadata);
if (!persistedState) {
throw new Error(
`Loaded metadata for task ${taskId} is missing internal persisted state.`,
);
}
const agentSettings = persistedState._agentSettings;
const workDir = setTargetDir(agentSettings);
await fse.ensureDir(workDir);
const workspaceFile = this.storage
.bucket(this.bucketName)
.file(workspaceObjectPath);
const [workspaceExists] = await workspaceFile.exists();
if (workspaceExists) {
const tmpArchiveFile = join(tmpdir(), getTmpArchiveFilename(taskId));
try {
await workspaceFile.download({ destination: tmpArchiveFile });
await tar.x({ file: tmpArchiveFile, cwd: workDir });
logger.info(
`Task ${taskId} workspace restored from GCS to ${workDir}`,
);
} finally {
if (await fse.pathExists(tmpArchiveFile)) {
await fse.remove(tmpArchiveFile);
}
}
} else {
logger.info(`Task ${taskId} workspace archive not found in GCS.`);
}
return {
id: taskId,
contextId: loadedMetadata._contextId || uuidv4(),
kind: 'task',
status: {
state: persistedState._taskState,
timestamp: new Date().toISOString(),
},
metadata: loadedMetadata,
history: [],
artifacts: [],
};
} catch (error) {
logger.error(`Failed to load task ${taskId} from GCS:`, error);
throw error;
}
}
}
export class NoOpTaskStore implements TaskStore {
constructor(private realStore: TaskStore) {}
async save(task: SDKTask): Promise<void> {
logger.info(`[NoOpTaskStore] save called for task ${task.id} - IGNORED`);
return Promise.resolve();
}
async load(taskId: string): Promise<SDKTask | undefined> {
logger.info(
`[NoOpTaskStore] load called for task ${taskId}, delegating to real store.`,
);
return this.realStore.load(taskId);
}
}

View File

@@ -0,0 +1,8 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export * from './agent.js';
export * from './types.js';

View File

@@ -0,0 +1,28 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import winston from 'winston';
const logger = winston.createLogger({
level: 'info',
format: winston.format.combine(
// First, add a timestamp to the log info object
winston.format.timestamp({
format: 'YYYY-MM-DD HH:mm:ss.SSS A', // Custom timestamp format
}),
// Here we define the custom output format
winston.format.printf((info) => {
const { level, timestamp, message, ...rest } = info;
return (
`[${level.toUpperCase()}] ${timestamp} -- ${message}` +
`${Object.keys(rest).length > 0 ? `\n${JSON.stringify(rest, null, 2)}` : ''}`
); // Only print ...rest if present
}),
),
transports: [new winston.transports.Console()],
});
export { logger };

View File

@@ -0,0 +1,33 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { AgentSettings } from './types.js';
import type { TaskState } from '@a2a-js/sdk';
export interface PersistedStateMetadata {
_agentSettings: AgentSettings;
_taskState: TaskState;
}
export type PersistedTaskMetadata = { [k: string]: unknown };
export const METADATA_KEY = '__persistedState';
export function getPersistedState(
metadata: PersistedTaskMetadata,
): PersistedStateMetadata | undefined {
return metadata?.[METADATA_KEY] as PersistedStateMetadata | undefined;
}
export function setPersistedState(
metadata: PersistedTaskMetadata,
state: PersistedStateMetadata,
): PersistedTaskMetadata {
return {
...metadata,
[METADATA_KEY]: state,
};
}

View File

@@ -0,0 +1,33 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import * as url from 'node:url';
import * as path from 'node:path';
import { logger } from './logger.js';
import { main } from './agent.js';
// Check if the module is the main script being run. path.resolve() creates a
// canonical, absolute path, which avoids cross-platform issues.
const isMainModule =
path.resolve(process.argv[1]) ===
path.resolve(url.fileURLToPath(import.meta.url));
process.on('uncaughtException', (error) => {
logger.error('Unhandled exception:', error);
process.exit(1);
});
if (
import.meta.url.startsWith('file:') &&
isMainModule &&
process.env['NODE_ENV'] !== 'test'
) {
main().catch((error) => {
logger.error('[CoreAgent] Unhandled error in main:', error);
process.exit(1);
});
}

View File

@@ -0,0 +1,154 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'node:fs';
import * as path from 'node:path';
import { homedir } from 'node:os';
import type { MCPServerConfig } from '@qwen-code/qwen-code-core';
import {
getErrorMessage,
type TelemetrySettings,
} from '@qwen-code/qwen-code-core';
import stripJsonComments from 'strip-json-comments';
export const SETTINGS_DIRECTORY_NAME = '.gemini';
export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME);
export const USER_SETTINGS_PATH = path.join(USER_SETTINGS_DIR, 'settings.json');
// Reconcile with https://github.com/google-gemini/gemini-cli/blob/b09bc6656080d4d12e1d06734aae2ec33af5c1ed/packages/cli/src/config/settings.ts#L53
export interface Settings {
mcpServers?: Record<string, MCPServerConfig>;
coreTools?: string[];
excludeTools?: string[];
telemetry?: TelemetrySettings;
showMemoryUsage?: boolean;
checkpointing?: CheckpointingSettings;
// Git-aware file filtering settings
fileFiltering?: {
respectGitIgnore?: boolean;
enableRecursiveFileSearch?: boolean;
};
}
export interface SettingsError {
message: string;
path: string;
}
export interface CheckpointingSettings {
enabled?: boolean;
}
/**
* Loads settings from user and workspace directories.
* Project settings override user settings.
*
* How is it different to gemini-cli/cli: Returns already merged settings rather
* than `LoadedSettings` (unnecessary since we are not modifying users
* settings.json).
*/
export function loadSettings(workspaceDir: string): Settings {
let userSettings: Settings = {};
let workspaceSettings: Settings = {};
const settingsErrors: SettingsError[] = [];
// Load user settings
try {
if (fs.existsSync(USER_SETTINGS_PATH)) {
const userContent = fs.readFileSync(USER_SETTINGS_PATH, 'utf-8');
const parsedUserSettings = JSON.parse(
stripJsonComments(userContent),
) as Settings;
userSettings = resolveEnvVarsInObject(parsedUserSettings);
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: USER_SETTINGS_PATH,
});
}
const workspaceSettingsPath = path.join(
workspaceDir,
SETTINGS_DIRECTORY_NAME,
'settings.json',
);
// Load workspace settings
try {
if (fs.existsSync(workspaceSettingsPath)) {
const projectContent = fs.readFileSync(workspaceSettingsPath, 'utf-8');
const parsedWorkspaceSettings = JSON.parse(
stripJsonComments(projectContent),
) as Settings;
workspaceSettings = resolveEnvVarsInObject(parsedWorkspaceSettings);
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: workspaceSettingsPath,
});
}
if (settingsErrors.length > 0) {
console.error('Errors loading settings:');
for (const error of settingsErrors) {
console.error(` Path: ${error.path}`);
console.error(` Message: ${error.message}`);
}
}
// If there are overlapping keys, the values of workspaceSettings will
// override values from userSettings
return {
...userSettings,
...workspaceSettings,
};
}
function resolveEnvVarsInString(value: string): string {
const envVarRegex = /\$(?:(\w+)|{([^}]+)})/g; // Find $VAR_NAME or ${VAR_NAME}
return value.replace(envVarRegex, (match, varName1, varName2) => {
const varName = varName1 || varName2;
if (process && process.env && typeof process.env[varName] === 'string') {
return process.env[varName]!;
}
return match;
});
}
function resolveEnvVarsInObject<T>(obj: T): T {
if (
obj === null ||
obj === undefined ||
typeof obj === 'boolean' ||
typeof obj === 'number'
) {
return obj;
}
if (typeof obj === 'string') {
return resolveEnvVarsInString(obj) as unknown as T;
}
if (Array.isArray(obj)) {
return obj.map((item) => resolveEnvVarsInObject(item)) as unknown as T;
}
if (typeof obj === 'object') {
const newObj = { ...obj } as T;
for (const key in newObj) {
if (Object.prototype.hasOwnProperty.call(newObj, key)) {
newObj[key] = resolveEnvVarsInObject(newObj[key]);
}
}
return newObj;
}
return obj;
}

View File

@@ -0,0 +1,930 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
CoreToolScheduler,
GeminiClient,
GeminiEventType,
ToolConfirmationOutcome,
ApprovalMode,
getAllMCPServerStatuses,
MCPServerStatus,
isNodeError,
parseAndFormatApiError,
} from '@qwen-code/qwen-code-core';
import type {
ToolConfirmationPayload,
CompletedToolCall,
ToolCall,
ToolCallRequestInfo,
ServerGeminiErrorEvent,
ServerGeminiStreamEvent,
ToolCallConfirmationDetails,
Config,
UserTierId,
} from '@qwen-code/qwen-code-core';
import type { RequestContext } from '@a2a-js/sdk/server';
import { type ExecutionEventBus } from '@a2a-js/sdk/server';
import type {
TaskStatusUpdateEvent,
TaskArtifactUpdateEvent,
TaskState,
Message,
Part,
Artifact,
} from '@a2a-js/sdk';
import { v4 as uuidv4 } from 'uuid';
import { logger } from './logger.js';
import * as fs from 'node:fs';
import { CoderAgentEvent } from './types.js';
import type {
CoderAgentMessage,
StateChange,
ToolCallUpdate,
TextContent,
TaskMetadata,
Thought,
ThoughtSummary,
} from './types.js';
import type { PartUnion, Part as genAiPart } from '@google/genai';
export class Task {
id: string;
contextId: string;
scheduler: CoreToolScheduler;
config: Config;
geminiClient: GeminiClient;
pendingToolConfirmationDetails: Map<string, ToolCallConfirmationDetails>;
taskState: TaskState;
eventBus?: ExecutionEventBus;
completedToolCalls: CompletedToolCall[];
skipFinalTrueAfterInlineEdit = false;
// For tool waiting logic
private pendingToolCalls: Map<string, string> = new Map(); //toolCallId --> status
private toolCompletionPromise?: Promise<void>;
private toolCompletionNotifier?: {
resolve: () => void;
reject: (reason?: Error) => void;
};
private constructor(
id: string,
contextId: string,
config: Config,
eventBus?: ExecutionEventBus,
) {
this.id = id;
this.contextId = contextId;
this.config = config;
this.scheduler = this.createScheduler();
this.geminiClient = new GeminiClient(this.config);
this.pendingToolConfirmationDetails = new Map();
this.taskState = 'submitted';
this.eventBus = eventBus;
this.completedToolCalls = [];
this._resetToolCompletionPromise();
this.config.setFlashFallbackHandler(
async (currentModel: string, fallbackModel: string): Promise<boolean> => {
config.setModel(fallbackModel); // gemini-cli-core sets to DEFAULT_GEMINI_FLASH_MODEL
// Switch model for future use but return false to stop current retry
return false;
},
);
}
static async create(
id: string,
contextId: string,
config: Config,
eventBus?: ExecutionEventBus,
): Promise<Task> {
return new Task(id, contextId, config, eventBus);
}
// Note: `getAllMCPServerStatuses` retrieves the status of all MCP servers for the entire
// process. This is not scoped to the individual task but reflects the global connection
// state managed within the @gemini-cli/core module.
async getMetadata(): Promise<TaskMetadata> {
const toolRegistry = await this.config.getToolRegistry();
const mcpServers = this.config.getMcpServers() || {};
const serverStatuses = getAllMCPServerStatuses();
const servers = Object.keys(mcpServers).map((serverName) => ({
name: serverName,
status: serverStatuses.get(serverName) || MCPServerStatus.DISCONNECTED,
tools: toolRegistry.getToolsByServer(serverName).map((tool) => ({
name: tool.name,
description: tool.description,
parameterSchema: tool.schema.parameters,
})),
}));
const availableTools = toolRegistry.getAllTools().map((tool) => ({
name: tool.name,
description: tool.description,
parameterSchema: tool.schema.parameters,
}));
const metadata: TaskMetadata = {
id: this.id,
contextId: this.contextId,
taskState: this.taskState,
model: this.config.getContentGeneratorConfig().model,
mcpServers: servers,
availableTools,
};
return metadata;
}
private _resetToolCompletionPromise(): void {
this.toolCompletionPromise = new Promise((resolve, reject) => {
this.toolCompletionNotifier = { resolve, reject };
});
// If there are no pending calls when reset, resolve immediately.
if (this.pendingToolCalls.size === 0 && this.toolCompletionNotifier) {
this.toolCompletionNotifier.resolve();
}
}
private _registerToolCall(toolCallId: string, status: string): void {
const wasEmpty = this.pendingToolCalls.size === 0;
this.pendingToolCalls.set(toolCallId, status);
if (wasEmpty) {
this._resetToolCompletionPromise();
}
logger.info(
`[Task] Registered tool call: ${toolCallId}. Pending: ${this.pendingToolCalls.size}`,
);
}
private _resolveToolCall(toolCallId: string): void {
if (this.pendingToolCalls.has(toolCallId)) {
this.pendingToolCalls.delete(toolCallId);
logger.info(
`[Task] Resolved tool call: ${toolCallId}. Pending: ${this.pendingToolCalls.size}`,
);
if (this.pendingToolCalls.size === 0 && this.toolCompletionNotifier) {
this.toolCompletionNotifier.resolve();
}
}
}
async waitForPendingTools(): Promise<void> {
if (this.pendingToolCalls.size === 0) {
return Promise.resolve();
}
logger.info(
`[Task] Waiting for ${this.pendingToolCalls.size} pending tool(s)...`,
);
return this.toolCompletionPromise;
}
cancelPendingTools(reason: string): void {
if (this.pendingToolCalls.size > 0) {
logger.info(
`[Task] Cancelling all ${this.pendingToolCalls.size} pending tool calls. Reason: ${reason}`,
);
}
if (this.toolCompletionNotifier) {
this.toolCompletionNotifier.reject(new Error(reason));
}
this.pendingToolCalls.clear();
// Reset the promise for any future operations, ensuring it's in a clean state.
this._resetToolCompletionPromise();
}
private _createTextMessage(
text: string,
role: 'agent' | 'user' = 'agent',
): Message {
return {
kind: 'message',
role,
parts: [{ kind: 'text', text }],
messageId: uuidv4(),
taskId: this.id,
contextId: this.contextId,
};
}
private _createStatusUpdateEvent(
stateToReport: TaskState,
coderAgentMessage: CoderAgentMessage,
message?: Message,
final = false,
timestamp?: string,
metadataError?: string,
): TaskStatusUpdateEvent {
const metadata: {
coderAgent: CoderAgentMessage;
model: string;
userTier?: UserTierId;
error?: string;
} = {
coderAgent: coderAgentMessage,
model: this.config.getModel(),
userTier: this.geminiClient.getUserTier(),
};
if (metadataError) {
metadata.error = metadataError;
}
return {
kind: 'status-update',
taskId: this.id,
contextId: this.contextId,
status: {
state: stateToReport,
message, // Shorthand property
timestamp: timestamp || new Date().toISOString(),
},
final,
metadata,
};
}
setTaskStateAndPublishUpdate(
newState: TaskState,
coderAgentMessage: CoderAgentMessage,
messageText?: string,
messageParts?: Part[], // For more complex messages
final = false,
metadataError?: string,
): void {
this.taskState = newState;
let message: Message | undefined;
if (messageText) {
message = this._createTextMessage(messageText);
} else if (messageParts) {
message = {
kind: 'message',
role: 'agent',
parts: messageParts,
messageId: uuidv4(),
taskId: this.id,
contextId: this.contextId,
};
}
const event = this._createStatusUpdateEvent(
this.taskState,
coderAgentMessage,
message,
final,
undefined,
metadataError,
);
this.eventBus?.publish(event);
}
private _schedulerOutputUpdate(
toolCallId: string,
outputChunk: string,
): void {
logger.info(
'[Task] Scheduler output update for tool call ' +
toolCallId +
': ' +
outputChunk,
);
const artifact: Artifact = {
artifactId: `tool-${toolCallId}-output`,
parts: [
{
kind: 'text',
text: outputChunk,
} as Part,
],
};
const artifactEvent: TaskArtifactUpdateEvent = {
kind: 'artifact-update',
taskId: this.id,
contextId: this.contextId,
artifact,
append: true,
lastChunk: false,
};
this.eventBus?.publish(artifactEvent);
}
private async _schedulerAllToolCallsComplete(
completedToolCalls: CompletedToolCall[],
): Promise<void> {
logger.info(
'[Task] All tool calls completed by scheduler (batch):',
completedToolCalls.map((tc) => tc.request.callId),
);
this.completedToolCalls.push(...completedToolCalls);
completedToolCalls.forEach((tc) => {
this._resolveToolCall(tc.request.callId);
});
}
private _schedulerToolCallsUpdate(toolCalls: ToolCall[]): void {
logger.info(
'[Task] Scheduler tool calls updated:',
toolCalls.map((tc) => `${tc.request.callId} (${tc.status})`),
);
// Update state and send continuous, non-final updates
toolCalls.forEach((tc) => {
const previousStatus = this.pendingToolCalls.get(tc.request.callId);
const hasChanged = previousStatus !== tc.status;
// Resolve tool call if it has reached a terminal state
if (['success', 'error', 'cancelled'].includes(tc.status)) {
this._resolveToolCall(tc.request.callId);
} else {
// This will update the map
this._registerToolCall(tc.request.callId, tc.status);
}
if (tc.status === 'awaiting_approval' && tc.confirmationDetails) {
this.pendingToolConfirmationDetails.set(
tc.request.callId,
tc.confirmationDetails,
);
}
// Only send an update if the status has actually changed.
if (hasChanged) {
const message = this.toolStatusMessage(tc, this.id, this.contextId);
const coderAgentMessage: CoderAgentMessage =
tc.status === 'awaiting_approval'
? { kind: CoderAgentEvent.ToolCallConfirmationEvent }
: { kind: CoderAgentEvent.ToolCallUpdateEvent };
const event = this._createStatusUpdateEvent(
this.taskState,
coderAgentMessage,
message,
false, // Always false for these continuous updates
);
this.eventBus?.publish(event);
}
});
if (this.config.getApprovalMode() === ApprovalMode.YOLO) {
logger.info('[Task] YOLO mode enabled. Auto-approving all tool calls.');
toolCalls.forEach((tc: ToolCall) => {
if (tc.status === 'awaiting_approval' && tc.confirmationDetails) {
tc.confirmationDetails.onConfirm(ToolConfirmationOutcome.ProceedOnce);
this.pendingToolConfirmationDetails.delete(tc.request.callId);
}
});
return;
}
const allPendingStatuses = Array.from(this.pendingToolCalls.values());
const isAwaitingApproval = allPendingStatuses.some(
(status) => status === 'awaiting_approval',
);
const allPendingAreStable = allPendingStatuses.every(
(status) =>
status === 'awaiting_approval' ||
status === 'success' ||
status === 'error' ||
status === 'cancelled',
);
// 1. Are any pending tool calls awaiting_approval
// 2. Are all pending tool calls in a stable state (i.e. not in validing or executing)
// 3. After an inline edit, the edited tool call will send awaiting_approval THEN scheduled. We wait for the next update in this case.
if (
isAwaitingApproval &&
allPendingAreStable &&
!this.skipFinalTrueAfterInlineEdit
) {
this.skipFinalTrueAfterInlineEdit = false;
// We don't need to send another message, just a final status update.
this.setTaskStateAndPublishUpdate(
'input-required',
{ kind: CoderAgentEvent.StateChangeEvent },
undefined,
undefined,
/*final*/ true,
);
}
}
private createScheduler(): CoreToolScheduler {
const scheduler = new CoreToolScheduler({
outputUpdateHandler: this._schedulerOutputUpdate.bind(this),
onAllToolCallsComplete: this._schedulerAllToolCallsComplete.bind(this),
onToolCallsUpdate: this._schedulerToolCallsUpdate.bind(this),
getPreferredEditor: () => 'vscode',
config: this.config,
onEditorClose: () => {},
});
return scheduler;
}
private toolStatusMessage(
tc: ToolCall,
taskId: string,
contextId: string,
): Message {
const messageParts: Part[] = [];
// Create a serializable version of the ToolCall (pick necesssary
// properties/avoic methods causing circular reference errors)
const serializableToolCall: { [key: string]: unknown } = {
request: tc.request,
status: tc.status,
};
// For WaitingToolCall type
if ('confirmationDetails' in tc) {
serializableToolCall['confirmationDetails'] = tc.confirmationDetails;
}
if (tc.tool) {
serializableToolCall['tool'] = {
name: tc.tool.name,
displayName: tc.tool.displayName,
description: tc.tool.description,
kind: tc.tool.kind,
isOutputMarkdown: tc.tool.isOutputMarkdown,
canUpdateOutput: tc.tool.canUpdateOutput,
schema: tc.tool.schema,
parameterSchema: tc.tool.parameterSchema,
};
}
messageParts.push({
kind: 'data',
data: serializableToolCall as ToolCall,
} as Part);
return {
kind: 'message',
role: 'agent',
parts: messageParts,
messageId: uuidv4(),
taskId,
contextId,
};
}
private async getProposedContent(
file_path: string,
old_string: string,
new_string: string,
): Promise<string> {
try {
const currentContent = fs.readFileSync(file_path, 'utf8');
return this._applyReplacement(
currentContent,
old_string,
new_string,
old_string === '' && currentContent === '',
);
} catch (err) {
if (!isNodeError(err) || err.code !== 'ENOENT') throw err;
return '';
}
}
private _applyReplacement(
currentContent: string | null,
oldString: string,
newString: string,
isNewFile: boolean,
): string {
if (isNewFile) {
return newString;
}
if (currentContent === null) {
// Should not happen if not a new file, but defensively return empty or newString if oldString is also empty
return oldString === '' ? newString : '';
}
// If oldString is empty and it's not a new file, do not modify the content.
if (oldString === '' && !isNewFile) {
return currentContent;
}
return currentContent.replaceAll(oldString, newString);
}
async scheduleToolCalls(
requests: ToolCallRequestInfo[],
abortSignal: AbortSignal,
): Promise<void> {
if (requests.length === 0) {
return;
}
for (const request of requests) {
if (
!request.args['newContent'] &&
request.name === 'replace' &&
request.args &&
request.args['file_path'] &&
request.args['old_string'] &&
request.args['new_string']
) {
request.args['newContent'] = await this.getProposedContent(
request.args['file_path'] as string,
request.args['old_string'] as string,
request.args['new_string'] as string,
);
}
}
logger.info(`[Task] Scheduling batch of ${requests.length} tool calls.`);
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
this.setTaskStateAndPublishUpdate('working', stateChange);
await this.scheduler.schedule(requests, abortSignal);
}
async acceptAgentMessage(event: ServerGeminiStreamEvent): Promise<void> {
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
switch (event.type) {
case GeminiEventType.Content:
logger.info('[Task] Sending agent message content...');
this._sendTextContent(event.value);
break;
case GeminiEventType.ToolCallRequest:
// This is now handled by the agent loop, which collects all requests
// and calls scheduleToolCalls once.
logger.warn(
'[Task] A single tool call request was passed to acceptAgentMessage. This should be handled in a batch by the agent. Ignoring.',
);
break;
case GeminiEventType.ToolCallResponse:
// This event type from ServerGeminiStreamEvent might be for when LLM *generates* a tool response part.
// The actual execution result comes via user message.
logger.info(
'[Task] Received tool call response from LLM (part of generation):',
event.value,
);
break;
case GeminiEventType.ToolCallConfirmation:
// This is when LLM requests confirmation, not when user provides it.
logger.info(
'[Task] Received tool call confirmation request from LLM:',
event.value.request.callId,
);
this.pendingToolConfirmationDetails.set(
event.value.request.callId,
event.value.details,
);
// This will be handled by the scheduler and _schedulerToolCallsUpdate will set InputRequired if needed.
// No direct state change here, scheduler drives it.
break;
case GeminiEventType.UserCancelled:
logger.info('[Task] Received user cancelled event from LLM stream.');
this.cancelPendingTools('User cancelled via LLM stream event');
this.setTaskStateAndPublishUpdate(
'input-required',
stateChange,
'Task cancelled by user',
undefined,
true,
);
break;
case GeminiEventType.Thought:
logger.info('[Task] Sending agent thought...');
this._sendThought(event.value);
break;
case GeminiEventType.ChatCompressed:
break;
case GeminiEventType.Finished:
logger.info(`[Task ${this.id}] Agent finished its turn.`);
break;
case GeminiEventType.Error:
default: {
// Block scope for lexical declaration
const errorEvent = event as ServerGeminiErrorEvent; // Type assertion
const errorMessage =
errorEvent.value?.error.message ?? 'Unknown error from LLM stream';
logger.error(
'[Task] Received error event from LLM stream:',
errorMessage,
);
let errMessage = 'Unknown error from LLM stream';
if (errorEvent.value) {
errMessage = parseAndFormatApiError(errorEvent.value);
}
this.cancelPendingTools(`LLM stream error: ${errorMessage}`);
this.setTaskStateAndPublishUpdate(
this.taskState,
stateChange,
`Agent Error, unknown agent message: ${errorMessage}`,
undefined,
false,
errMessage,
);
break;
}
}
}
private async _handleToolConfirmationPart(part: Part): Promise<boolean> {
if (
part.kind !== 'data' ||
!part.data ||
typeof part.data['callId'] !== 'string' ||
typeof part.data['outcome'] !== 'string'
) {
return false;
}
const callId = part.data['callId'] as string;
const outcomeString = part.data['outcome'] as string;
let confirmationOutcome: ToolConfirmationOutcome | undefined;
if (outcomeString === 'proceed_once') {
confirmationOutcome = ToolConfirmationOutcome.ProceedOnce;
} else if (outcomeString === 'cancel') {
confirmationOutcome = ToolConfirmationOutcome.Cancel;
} else if (outcomeString === 'proceed_always') {
confirmationOutcome = ToolConfirmationOutcome.ProceedAlways;
} else if (outcomeString === 'proceed_always_server') {
confirmationOutcome = ToolConfirmationOutcome.ProceedAlwaysServer;
} else if (outcomeString === 'proceed_always_tool') {
confirmationOutcome = ToolConfirmationOutcome.ProceedAlwaysTool;
} else if (outcomeString === 'modify_with_editor') {
confirmationOutcome = ToolConfirmationOutcome.ModifyWithEditor;
} else {
logger.warn(
`[Task] Unknown tool confirmation outcome: "${outcomeString}" for callId: ${callId}`,
);
return false;
}
const confirmationDetails = this.pendingToolConfirmationDetails.get(callId);
if (!confirmationDetails) {
logger.warn(
`[Task] Received tool confirmation for unknown or already processed callId: ${callId}`,
);
return false;
}
logger.info(
`[Task] Handling tool confirmation for callId: ${callId} with outcome: ${outcomeString}`,
);
try {
// Temporarily unset GCP environment variables so they do not leak into
// tool calls.
const gcpProject = process.env['GOOGLE_CLOUD_PROJECT'];
const gcpCreds = process.env['GOOGLE_APPLICATION_CREDENTIALS'];
try {
delete process.env['GOOGLE_CLOUD_PROJECT'];
delete process.env['GOOGLE_APPLICATION_CREDENTIALS'];
// This will trigger the scheduler to continue or cancel the specific tool.
// The scheduler's onToolCallsUpdate will then reflect the new state (e.g., executing or cancelled).
// If `edit` tool call, pass updated payload if presesent
if (confirmationDetails.type === 'edit') {
const payload = part.data['newContent']
? ({
newContent: part.data['newContent'] as string,
} as ToolConfirmationPayload)
: undefined;
this.skipFinalTrueAfterInlineEdit = !!payload;
await confirmationDetails.onConfirm(confirmationOutcome, payload);
} else {
await confirmationDetails.onConfirm(confirmationOutcome);
}
} finally {
if (gcpProject) {
process.env['GOOGLE_CLOUD_PROJECT'] = gcpProject;
}
if (gcpCreds) {
process.env['GOOGLE_APPLICATION_CREDENTIALS'] = gcpCreds;
}
}
// Do not delete if modifying, a subsequent tool confirmation for the same
// callId will be passed with ProceedOnce/Cancel/etc
// Note !== ToolConfirmationOutcome.ModifyWithEditor does not work!
if (confirmationOutcome !== 'modify_with_editor') {
this.pendingToolConfirmationDetails.delete(callId);
}
// If outcome is Cancel, scheduler should update status to 'cancelled', which then resolves the tool.
// If ProceedOnce, scheduler updates to 'executing', then eventually 'success'/'error', which resolves.
return true;
} catch (error) {
logger.error(
`[Task] Error during tool confirmation for callId ${callId}:`,
error,
);
// If confirming fails, we should probably mark this tool as failed
this._resolveToolCall(callId); // Resolve it as it won't proceed.
const errorMessageText =
error instanceof Error
? error.message
: `Error processing tool confirmation for ${callId}`;
const message = this._createTextMessage(errorMessageText);
const toolCallUpdate: ToolCallUpdate = {
kind: CoderAgentEvent.ToolCallUpdateEvent,
};
const event = this._createStatusUpdateEvent(
this.taskState,
toolCallUpdate,
message,
false,
);
this.eventBus?.publish(event);
return false;
}
}
getAndClearCompletedTools(): CompletedToolCall[] {
const tools = [...this.completedToolCalls];
this.completedToolCalls = [];
return tools;
}
addToolResponsesToHistory(completedTools: CompletedToolCall[]): void {
logger.info(
`[Task] Adding ${completedTools.length} tool responses to history without generating a new response.`,
);
const responsesToAdd = completedTools.flatMap(
(toolCall) => toolCall.response.responseParts,
);
for (const response of responsesToAdd) {
let parts: genAiPart[];
if (Array.isArray(response)) {
parts = response;
} else if (typeof response === 'string') {
parts = [{ text: response }];
} else {
parts = [response];
}
this.geminiClient.addHistory({
role: 'user',
parts,
});
}
}
async *sendCompletedToolsToLlm(
completedToolCalls: CompletedToolCall[],
aborted: AbortSignal,
): AsyncGenerator<ServerGeminiStreamEvent> {
if (completedToolCalls.length === 0) {
yield* (async function* () {})(); // Yield nothing
return;
}
const llmParts: PartUnion[] = [];
logger.info(
`[Task] Feeding ${completedToolCalls.length} tool responses to LLM.`,
);
for (const completedToolCall of completedToolCalls) {
logger.info(
`[Task] Adding tool response for "${completedToolCall.request.name}" (callId: ${completedToolCall.request.callId}) to LLM input.`,
);
const responseParts = completedToolCall.response.responseParts;
if (Array.isArray(responseParts)) {
llmParts.push(...responseParts);
} else {
llmParts.push(responseParts);
}
}
logger.info('[Task] Sending new parts to agent.');
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
// Set task state to working as we are about to call LLM
this.setTaskStateAndPublishUpdate('working', stateChange);
// TODO: Determine what it mean to have, then add a prompt ID.
yield* this.geminiClient.sendMessageStream(
llmParts,
aborted,
/*prompt_id*/ '',
);
}
async *acceptUserMessage(
requestContext: RequestContext,
aborted: AbortSignal,
): AsyncGenerator<ServerGeminiStreamEvent> {
const userMessage = requestContext.userMessage;
const llmParts: PartUnion[] = [];
let anyConfirmationHandled = false;
let hasContentForLlm = false;
for (const part of userMessage.parts) {
const confirmationHandled = await this._handleToolConfirmationPart(part);
if (confirmationHandled) {
anyConfirmationHandled = true;
// If a confirmation was handled, the scheduler will now run the tool (or cancel it).
// We don't send anything to the LLM for this part.
// The subsequent tool execution will eventually lead to resolveToolCall.
continue;
}
if (part.kind === 'text') {
llmParts.push({ text: part.text });
hasContentForLlm = true;
}
}
if (hasContentForLlm) {
logger.info('[Task] Sending new parts to LLM.');
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
// Set task state to working as we are about to call LLM
this.setTaskStateAndPublishUpdate('working', stateChange);
// TODO: Determine what it mean to have, then add a prompt ID.
yield* this.geminiClient.sendMessageStream(
llmParts,
aborted,
/*prompt_id*/ '',
);
} else if (anyConfirmationHandled) {
logger.info(
'[Task] User message only contained tool confirmations. Scheduler is active. No new input for LLM this turn.',
);
// Ensure task state reflects that scheduler might be working due to confirmation.
// If scheduler is active, it will emit its own status updates.
// If all pending tools were just confirmed, waitForPendingTools will handle the wait.
// If some tools are still pending approval, scheduler would have set InputRequired.
// If not, and no new text, we are just waiting.
if (
this.pendingToolCalls.size > 0 &&
this.taskState !== 'input-required'
) {
const stateChange: StateChange = {
kind: CoderAgentEvent.StateChangeEvent,
};
this.setTaskStateAndPublishUpdate('working', stateChange); // Reflect potential background activity
}
yield* (async function* () {})(); // Yield nothing
} else {
logger.info(
'[Task] No relevant parts in user message for LLM interaction or tool confirmation.',
);
// If there's no new text and no confirmations, and no pending tools,
// it implies we might need to signal input required if nothing else is happening.
// However, the agent.ts will make this determination after waitForPendingTools.
yield* (async function* () {})(); // Yield nothing
}
}
_sendTextContent(content: string): void {
if (content === '') {
return;
}
logger.info('[Task] Sending text content to event bus.');
const message = this._createTextMessage(content);
const textContent: TextContent = {
kind: CoderAgentEvent.TextContentEvent,
};
this.eventBus?.publish(
this._createStatusUpdateEvent(
this.taskState,
textContent,
message,
false,
),
);
}
_sendThought(content: ThoughtSummary): void {
if (!content.subject && !content.description) {
return;
}
logger.info('[Task] Sending thought to event bus.');
const message: Message = {
kind: 'message',
role: 'agent',
parts: [
{
kind: 'data',
data: content,
} as Part,
],
messageId: uuidv4(),
taskId: this.id,
contextId: this.contextId,
};
const thought: Thought = {
kind: CoderAgentEvent.ThoughtEvent,
};
this.eventBus?.publish(
this._createStatusUpdateEvent(this.taskState, thought, message, false),
);
}
}

View File

@@ -0,0 +1,180 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type {
Task as SDKTask,
TaskStatusUpdateEvent,
SendStreamingMessageSuccessResponse,
} from '@a2a-js/sdk';
import {
BaseDeclarativeTool,
BaseToolInvocation,
Kind,
} from '@qwen-code/qwen-code-core';
import type {
ToolCallConfirmationDetails,
ToolResult,
ToolInvocation,
} from '@qwen-code/qwen-code-core';
import { expect, vi } from 'vitest';
export const mockOnUserConfirmForToolConfirmation = vi.fn();
export class MockToolInvocation extends BaseToolInvocation<object, ToolResult> {
constructor(
private readonly tool: MockTool,
params: object,
) {
super(params);
}
getDescription(): string {
return JSON.stringify(this.params);
}
override shouldConfirmExecute(
abortSignal: AbortSignal,
): Promise<ToolCallConfirmationDetails | false> {
return this.tool.shouldConfirmExecute(this.params, abortSignal);
}
execute(
signal: AbortSignal,
updateOutput?: (output: string) => void,
terminalColumns?: number,
terminalRows?: number,
): Promise<ToolResult> {
return this.tool.execute(
this.params,
signal,
updateOutput,
terminalColumns,
terminalRows,
);
}
}
// TODO: dedup with gemini-cli, add shouldConfirmExecute() support in core
export class MockTool extends BaseDeclarativeTool<object, ToolResult> {
constructor(
name: string,
displayName: string,
canUpdateOutput = false,
isOutputMarkdown = false,
shouldConfirmExecute?: () => Promise<ToolCallConfirmationDetails | false>,
) {
super(
name,
displayName,
'A mock tool for testing',
Kind.Other,
{},
isOutputMarkdown,
canUpdateOutput,
);
if (shouldConfirmExecute) {
this.shouldConfirmExecute.mockImplementation(shouldConfirmExecute);
} else {
// Default to no confirmation needed
this.shouldConfirmExecute.mockResolvedValue(false);
}
}
execute = vi.fn();
shouldConfirmExecute = vi.fn();
protected createInvocation(
params: object,
): ToolInvocation<object, ToolResult> {
return new MockToolInvocation(this, params);
}
}
export function createStreamMessageRequest(
text: string,
messageId: string,
taskId?: string,
) {
const request: {
jsonrpc: string;
id: string;
method: string;
params: {
message: {
kind: string;
role: string;
parts: [{ kind: string; text: string }];
messageId: string;
};
metadata: {
coderAgent: {
kind: string;
workspacePath: string;
};
};
taskId?: string;
};
} = {
jsonrpc: '2.0',
id: '1',
method: 'message/stream',
params: {
message: {
kind: 'message',
role: 'user',
parts: [{ kind: 'text', text }],
messageId,
},
metadata: {
coderAgent: {
kind: 'agent-settings',
workspacePath: '/tmp',
},
},
},
};
if (taskId) {
request.params.taskId = taskId;
}
return request;
}
export function assertUniqueFinalEventIsLast(
events: SendStreamingMessageSuccessResponse[],
) {
// Final event is input-required & final
const finalEvent = events[events.length - 1].result as TaskStatusUpdateEvent;
expect(finalEvent.metadata?.['coderAgent']).toMatchObject({
kind: 'state-change',
});
expect(finalEvent.status?.state).toBe('input-required');
expect(finalEvent.final).toBe(true);
// There is only one event with final and its the last
expect(
events.filter((e) => (e.result as TaskStatusUpdateEvent).final).length,
).toBe(1);
expect(
events.findIndex((e) => (e.result as TaskStatusUpdateEvent).final),
).toBe(events.length - 1);
}
export function assertTaskCreationAndWorkingStatus(
events: SendStreamingMessageSuccessResponse[],
) {
// Initial task creation event
const taskEvent = events[0].result as SDKTask;
expect(taskEvent.kind).toBe('task');
expect(taskEvent.status.state).toBe('submitted');
// Status update: working
const workingEvent = events[1].result as TaskStatusUpdateEvent;
expect(workingEvent.kind).toBe('status-update');
expect(workingEvent.status.state).toBe('working');
}

View File

@@ -0,0 +1,104 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type {
MCPServerStatus,
ToolConfirmationOutcome,
} from '@qwen-code/qwen-code-core';
import type { TaskState } from '@a2a-js/sdk';
// Interfaces and enums for the CoderAgent protocol.
export enum CoderAgentEvent {
/**
* An event requesting one or more tool call confirmations.
*/
ToolCallConfirmationEvent = 'tool-call-confirmation',
/**
* An event updating on the status of one or more tool calls.
*/
ToolCallUpdateEvent = 'tool-call-update',
/**
* An event providing text updates on the task.
*/
TextContentEvent = 'text-content',
/**
* An event that indicates a change in the task's execution state.
*/
StateChangeEvent = 'state-change',
/**
* An user-sent event to initiate the agent.
*/
StateAgentSettingsEvent = 'agent-settings',
/**
* An event that contains a thought from the agent.
*/
ThoughtEvent = 'thought',
}
export interface AgentSettings {
kind: CoderAgentEvent.StateAgentSettingsEvent;
workspacePath: string;
}
export interface ToolCallConfirmation {
kind: CoderAgentEvent.ToolCallConfirmationEvent;
}
export interface ToolCallUpdate {
kind: CoderAgentEvent.ToolCallUpdateEvent;
}
export interface TextContent {
kind: CoderAgentEvent.TextContentEvent;
}
export interface StateChange {
kind: CoderAgentEvent.StateChangeEvent;
}
export interface Thought {
kind: CoderAgentEvent.ThoughtEvent;
}
export type ThoughtSummary = {
subject: string;
description: string;
};
export interface ToolConfirmationResponse {
outcome: ToolConfirmationOutcome;
callId: string;
}
export type CoderAgentMessage =
| AgentSettings
| ToolCallConfirmation
| ToolCallUpdate
| TextContent
| StateChange
| Thought;
export interface TaskMetadata {
id: string;
contextId: string;
taskState: TaskState;
model: string;
mcpServers: Array<{
name: string;
status: MCPServerStatus;
tools: Array<{
name: string;
description: string;
parameterSchema: unknown;
}>;
}>;
availableTools: Array<{
name: string;
description: string;
parameterSchema: unknown;
}>;
}

View File

@@ -0,0 +1,11 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "dist",
"lib": ["DOM", "DOM.Iterable", "ES2021"],
"composite": true,
"types": ["node", "vitest/globals"]
},
"include": ["index.ts", "src/**/*.ts", "src/**/*.json"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,26 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
reporters: [['default'], ['junit', { outputFile: 'junit.xml' }]],
passWithNoTests: true,
coverage: {
provider: 'v8',
reportsDirectory: './coverage',
reporter: [
['text', { file: 'full-text-summary.txt' }],
'html',
'json',
'lcov',
'cobertura',
['json-summary', { outputFile: 'coverage-summary.json' }],
],
},
},
});

View File

@@ -8,9 +8,18 @@
import './src/gemini.js';
import { main } from './src/gemini.js';
import { FatalError } from '@qwen-code/qwen-code-core';
// --- Global Entry Point ---
main().catch((error) => {
if (error instanceof FatalError) {
let errorMessage = error.message;
if (!process.env['NO_COLOR']) {
errorMessage = `\x1b[31m${errorMessage}\x1b[0m`;
}
console.error(errorMessage);
process.exit(error.exitCode);
}
console.error('An unexpected critical error occurred:');
if (error instanceof Error) {
console.error(error.stack);

View File

@@ -36,20 +36,21 @@
"command-exists": "^1.2.9",
"diff": "^7.0.0",
"dotenv": "^17.1.0",
"fzf": "^0.5.2",
"glob": "^10.4.1",
"highlight.js": "^11.11.1",
"ink": "^6.1.1",
"ink-big-text": "^2.0.0",
"ink": "^6.2.3",
"ink-gradient": "^3.0.0",
"ink-link": "^4.1.0",
"ink-select-input": "^6.2.0",
"ink-spinner": "^5.0.0",
"lodash-es": "^4.17.21",
"lowlight": "^3.3.0",
"mime-types": "^3.0.1",
"open": "^10.1.2",
"qrcode-terminal": "^0.12.0",
"react": "^19.1.0",
"read-package-up": "^11.0.0",
"simple-git": "^3.28.0",
"shell-quote": "^1.8.3",
"string-width": "^7.1.0",
"strip-ansi": "^7.1.0",
@@ -61,10 +62,12 @@
},
"devDependencies": {
"@babel/runtime": "^7.27.6",
"@google/gemini-cli-test-utils": "file:../test-utils",
"@testing-library/react": "^16.3.0",
"@types/command-exists": "^1.2.3",
"@types/diff": "^7.0.2",
"@types/dotenv": "^6.1.1",
"@types/lodash-es": "^4.17.12",
"@types/node": "^20.11.24",
"@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6",

View File

@@ -0,0 +1,32 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { installCommand } from './extensions/install.js';
import { uninstallCommand } from './extensions/uninstall.js';
import { listCommand } from './extensions/list.js';
import { updateCommand } from './extensions/update.js';
import { disableCommand } from './extensions/disable.js';
import { enableCommand } from './extensions/enable.js';
export const extensionsCommand: CommandModule = {
command: 'extensions <command>',
describe: 'Manage Gemini CLI extensions.',
builder: (yargs) =>
yargs
.command(installCommand)
.command(uninstallCommand)
.command(listCommand)
.command(updateCommand)
.command(disableCommand)
.command(enableCommand)
.demandCommand(1, 'You need at least one command before continuing.')
.version(false),
handler: () => {
// This handler is not called when a subcommand is provided.
// Yargs will show the help menu.
},
};

View File

@@ -0,0 +1,51 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type CommandModule } from 'yargs';
import { disableExtension } from '../../config/extension.js';
import { SettingScope } from '../../config/settings.js';
import { getErrorMessage } from '../../utils/errors.js';
interface DisableArgs {
name: string;
scope: SettingScope;
}
export async function handleDisable(args: DisableArgs) {
try {
disableExtension(args.name, args.scope);
console.log(
`Extension "${args.name}" successfully disabled for scope "${args.scope}".`,
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const disableCommand: CommandModule = {
command: 'disable [--scope] <name>',
describe: 'Disables an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to disable.',
type: 'string',
})
.option('scope', {
describe: 'The scope to disable the extenison in.',
type: 'string',
default: SettingScope.User,
choices: [SettingScope.User, SettingScope.Workspace],
})
.check((_argv) => true),
handler: async (argv) => {
await handleDisable({
name: argv['name'] as string,
scope: argv['scope'] as SettingScope,
});
},
};

View File

@@ -0,0 +1,59 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type CommandModule } from 'yargs';
import { FatalConfigError, getErrorMessage } from '@qwen-code/qwen-code-core';
import { enableExtension } from '../../config/extension.js';
import { SettingScope } from '../../config/settings.js';
interface EnableArgs {
name: string;
scope?: SettingScope;
}
export async function handleEnable(args: EnableArgs) {
try {
const scopes = args.scope
? [args.scope]
: [SettingScope.User, SettingScope.Workspace];
enableExtension(args.name, scopes);
if (args.scope) {
console.log(
`Extension "${args.name}" successfully enabled for scope "${args.scope}".`,
);
} else {
console.log(
`Extension "${args.name}" successfully enabled in all scopes.`,
);
}
} catch (error) {
throw new FatalConfigError(getErrorMessage(error));
}
}
export const enableCommand: CommandModule = {
command: 'disable [--scope] <name>',
describe: 'Enables an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to enable.',
type: 'string',
})
.option('scope', {
describe:
'The scope to enable the extenison in. If not set, will be enabled in all scopes.',
type: 'string',
choices: [SettingScope.User, SettingScope.Workspace],
})
.check((_argv) => true),
handler: async (argv) => {
await handleEnable({
name: argv['name'] as string,
scope: argv['scope'] as SettingScope,
});
},
};

View File

@@ -0,0 +1,31 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { installCommand } from './install.js';
import yargs from 'yargs';
describe('extensions install command', () => {
it('should fail if no source is provided', () => {
const validationParser = yargs([])
.locale('en')
.command(installCommand)
.fail(false);
expect(() => validationParser.parse('install')).toThrow(
'Either a git URL --source or a --path must be provided.',
);
});
it('should fail if both git source and local path are provided', () => {
const validationParser = yargs([])
.locale('en')
.command(installCommand)
.fail(false);
expect(() =>
validationParser.parse('install --source some-url --path /some/path'),
).toThrow('Arguments source and path are mutually exclusive');
});
});

View File

@@ -0,0 +1,64 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import {
installExtension,
type ExtensionInstallMetadata,
} from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
interface InstallArgs {
source?: string;
path?: string;
}
export async function handleInstall(args: InstallArgs) {
try {
const installMetadata: ExtensionInstallMetadata = {
source: (args.source || args.path) as string,
type: args.source ? 'git' : 'local',
};
const extensionName = await installExtension(installMetadata);
console.log(
`Extension "${extensionName}" installed successfully and enabled.`,
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const installCommand: CommandModule = {
command: 'install [--source | --path ]',
describe: 'Installs an extension from a git repository or a local path.',
builder: (yargs) =>
yargs
.option('source', {
describe: 'The git URL of the extension to install.',
type: 'string',
})
.option('path', {
describe: 'Path to a local extension directory.',
type: 'string',
})
.conflicts('source', 'path')
.check((argv) => {
if (!argv.source && !argv.path) {
throw new Error(
'Either a git URL --source or a --path must be provided.',
);
}
return true;
}),
handler: async (argv) => {
await handleInstall({
source: argv['source'] as string | undefined,
path: argv['path'] as string | undefined,
});
},
};

View File

@@ -0,0 +1,36 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { loadUserExtensions, toOutputString } from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
export async function handleList() {
try {
const extensions = loadUserExtensions();
if (extensions.length === 0) {
console.log('No extensions installed.');
return;
}
console.log(
extensions
.map((extension, _): string => toOutputString(extension))
.join('\n\n'),
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const listCommand: CommandModule = {
command: 'list',
describe: 'Lists installed extensions.',
builder: (yargs) => yargs,
handler: async () => {
await handleList();
},
};

View File

@@ -0,0 +1,21 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { uninstallCommand } from './uninstall.js';
import yargs from 'yargs';
describe('extensions uninstall command', () => {
it('should fail if no source is provided', () => {
const validationParser = yargs([])
.locale('en')
.command(uninstallCommand)
.fail(false);
expect(() => validationParser.parse('uninstall')).toThrow(
'Not enough non-option arguments: got 0, need at least 1',
);
});
});

View File

@@ -0,0 +1,47 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { uninstallExtension } from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
interface UninstallArgs {
name: string;
}
export async function handleUninstall(args: UninstallArgs) {
try {
await uninstallExtension(args.name);
console.log(`Extension "${args.name}" successfully uninstalled.`);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const uninstallCommand: CommandModule = {
command: 'uninstall <name>',
describe: 'Uninstalls an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to uninstall.',
type: 'string',
})
.check((argv) => {
if (!argv.name) {
throw new Error(
'Please include the name of the extension to uninstall as a positional argument.',
);
}
return true;
}),
handler: async (argv) => {
await handleUninstall({
name: argv['name'] as string,
});
},
};

View File

@@ -0,0 +1,47 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandModule } from 'yargs';
import { updateExtension } from '../../config/extension.js';
import { getErrorMessage } from '../../utils/errors.js';
interface UpdateArgs {
name: string;
}
export async function handleUpdate(args: UpdateArgs) {
try {
// TODO(chrstnb): we should list extensions if the requested extension is not installed.
const updatedExtensionInfo = await updateExtension(args.name);
if (!updatedExtensionInfo) {
console.log(`Extension "${args.name}" failed to update.`);
return;
}
console.log(
`Extension "${args.name}" successfully updated: ${updatedExtensionInfo.originalVersion}${updatedExtensionInfo.updatedVersion}.`,
);
} catch (error) {
console.error(getErrorMessage(error));
process.exit(1);
}
}
export const updateCommand: CommandModule = {
command: 'update <name>',
describe: 'Updates an extension.',
builder: (yargs) =>
yargs
.positional('name', {
describe: 'The name of the extension to update.',
type: 'string',
})
.check((_argv) => true),
handler: async (argv) => {
await handleUpdate({
name: argv['name'] as string,
});
},
};

View File

@@ -7,7 +7,7 @@
// File for 'gemini mcp add' command
import type { CommandModule } from 'yargs';
import { loadSettings, SettingScope } from '../../config/settings.js';
import { MCPServerConfig } from '@qwen-code/qwen-code-core';
import type { MCPServerConfig } from '@qwen-code/qwen-code-core';
async function addMcpServer(
name: string,

View File

@@ -11,9 +11,27 @@ import { loadExtensions } from '../../config/extension.js';
import { createTransport } from '@qwen-code/qwen-code-core';
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
vi.mock('../../config/settings.js');
vi.mock('../../config/extension.js');
vi.mock('@qwen-code/qwen-code-core');
vi.mock('../../config/settings.js', () => ({
loadSettings: vi.fn(),
}));
vi.mock('../../config/extension.js', () => ({
loadExtensions: vi.fn(),
}));
vi.mock('@qwen-code/qwen-code-core', () => ({
createTransport: vi.fn(),
MCPServerStatus: {
CONNECTED: 'CONNECTED',
CONNECTING: 'CONNECTING',
DISCONNECTED: 'DISCONNECTED',
},
Storage: vi.fn().mockImplementation((_cwd: string) => ({
getGlobalSettingsPath: () => '/tmp/qwen/settings.json',
getWorkspaceSettingsPath: () => '/tmp/qwen/workspace-settings.json',
getProjectTempDir: () => '/test/home/.qwen/tmp/mocked_hash',
})),
GEMINI_CONFIG_DIR: '.qwen',
getErrorMessage: (e: unknown) => (e instanceof Error ? e.message : String(e)),
}));
vi.mock('@modelcontextprotocol/sdk/client/index.js');
const mockedLoadSettings = loadSettings as vi.Mock;

View File

@@ -7,11 +7,8 @@
// File for 'gemini mcp list' command
import type { CommandModule } from 'yargs';
import { loadSettings } from '../../config/settings.js';
import {
MCPServerConfig,
MCPServerStatus,
createTransport,
} from '@qwen-code/qwen-code-core';
import type { MCPServerConfig } from '@qwen-code/qwen-code-core';
import { MCPServerStatus, createTransport } from '@qwen-code/qwen-code-core';
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { loadExtensions } from '../../config/extension.js';

View File

@@ -5,14 +5,14 @@
*/
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
import * as fs from 'fs';
import * as path from 'path';
import { tmpdir } from 'os';
import {
Config,
import * as fs from 'node:fs';
import * as path from 'node:path';
import { tmpdir } from 'node:os';
import type {
ConfigParameters,
ContentGeneratorConfig,
} from '@qwen-code/qwen-code-core';
import { Config } from '@qwen-code/qwen-code-core';
import { http, HttpResponse } from 'msw';
import { setupServer } from 'msw/node';
@@ -282,7 +282,7 @@ describe('Configuration Integration Tests', () => {
'test',
];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
// Verify that the argument was parsed correctly
expect(argv.approvalMode).toBe('auto_edit');
@@ -306,7 +306,7 @@ describe('Configuration Integration Tests', () => {
'test',
];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.approvalMode).toBe('yolo');
expect(argv.prompt).toBe('test');
@@ -329,7 +329,7 @@ describe('Configuration Integration Tests', () => {
'test',
];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.approvalMode).toBe('default');
expect(argv.prompt).toBe('test');
@@ -345,7 +345,7 @@ describe('Configuration Integration Tests', () => {
try {
process.argv = ['node', 'script.js', '--yolo', '-p', 'test'];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.yolo).toBe(true);
expect(argv.approvalMode).toBeUndefined(); // Should NOT be set when using --yolo
@@ -362,7 +362,7 @@ describe('Configuration Integration Tests', () => {
process.argv = ['node', 'script.js', '--approval-mode', 'invalid_mode'];
// Should throw during argument parsing due to yargs validation
await expect(parseArguments()).rejects.toThrow();
await expect(parseArguments({} as Settings)).rejects.toThrow();
} finally {
process.argv = originalArgv;
}
@@ -381,7 +381,7 @@ describe('Configuration Integration Tests', () => {
];
// Should throw during argument parsing due to conflict validation
await expect(parseArguments()).rejects.toThrow();
await expect(parseArguments({} as Settings)).rejects.toThrow();
} finally {
process.argv = originalArgv;
}
@@ -394,7 +394,7 @@ describe('Configuration Integration Tests', () => {
// Test that no approval mode arguments defaults to no flags set
process.argv = ['node', 'script.js', '-p', 'test'];
const argv = await parseArguments();
const argv = await parseArguments({} as Settings);
expect(argv.approvalMode).toBeUndefined();
expect(argv.yolo).toBe(false);

File diff suppressed because it is too large Load Diff

197
packages/cli/src/config/config.ts Normal file → Executable file
View File

@@ -4,37 +4,41 @@
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs';
import * as path from 'path';
import { homedir } from 'node:os';
import yargs from 'yargs/yargs';
import { hideBin } from 'yargs/helpers';
import process from 'node:process';
import { mcpCommand } from '../commands/mcp.js';
import type {
ConfigParameters,
FileFilteringOptions,
MCPServerConfig,
TelemetryTarget,
} from '@qwen-code/qwen-code-core';
import {
ApprovalMode,
Config,
DEFAULT_GEMINI_EMBEDDING_MODEL,
DEFAULT_GEMINI_MODEL,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
EditTool,
FileDiscoveryService,
getCurrentGeminiMdFilename,
loadServerHierarchicalMemory,
setGeminiMdFilename as setServerGeminiMdFilename,
getCurrentGeminiMdFilename,
ApprovalMode,
DEFAULT_GEMINI_MODEL,
DEFAULT_GEMINI_EMBEDDING_MODEL,
DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
FileDiscoveryService,
TelemetryTarget,
FileFilteringOptions,
ShellTool,
EditTool,
WriteFileTool,
MCPServerConfig,
ConfigParameters,
} from '@qwen-code/qwen-code-core';
import { Settings } from './settings.js';
import * as fs from 'node:fs';
import { homedir } from 'node:os';
import * as path from 'node:path';
import process from 'node:process';
import { hideBin } from 'yargs/helpers';
import yargs from 'yargs/yargs';
import { extensionsCommand } from '../commands/extensions.js';
import { mcpCommand } from '../commands/mcp.js';
import type { Settings } from './settings.js';
import { Extension, annotateActiveExtensions } from './extension.js';
import { getCliVersion } from '../utils/version.js';
import { loadSandboxConfig } from './sandboxConfig.js';
import { resolvePath } from '../utils/resolvePath.js';
import { getCliVersion } from '../utils/version.js';
import type { Extension } from './extension.js';
import { annotateActiveExtensions } from './extension.js';
import { loadSandboxConfig } from './sandboxConfig.js';
import { isWorkspaceTrusted } from './trustedFolders.js';
@@ -56,9 +60,7 @@ export interface CliArgs {
prompt: string | undefined;
promptInteractive: string | undefined;
allFiles: boolean | undefined;
all_files: boolean | undefined;
showMemoryUsage: boolean | undefined;
show_memory_usage: boolean | undefined;
yolo: boolean | undefined;
approvalMode: string | undefined;
telemetry: boolean | undefined;
@@ -69,6 +71,7 @@ export interface CliArgs {
telemetryLogPrompts: boolean | undefined;
telemetryOutfile: string | undefined;
allowedMcpServerNames: string[] | undefined;
allowedTools: string[] | undefined;
experimentalAcp: boolean | undefined;
extensions: string[] | undefined;
listExtensions: boolean | undefined;
@@ -78,9 +81,10 @@ export interface CliArgs {
proxy: string | undefined;
includeDirectories: string[] | undefined;
tavilyApiKey: string | undefined;
screenReader: boolean | undefined;
}
export async function parseArguments(): Promise<CliArgs> {
export async function parseArguments(settings: Settings): Promise<CliArgs> {
const yargsInstance = yargs(hideBin(process.argv))
// Set locale to English for consistent output, especially in tests
.locale('en')
@@ -128,29 +132,11 @@ export async function parseArguments(): Promise<CliArgs> {
description: 'Include ALL files in context?',
default: false,
})
.option('all_files', {
type: 'boolean',
description: 'Include ALL files in context?',
default: false,
})
.deprecateOption(
'all_files',
'Use --all-files instead. We will be removing --all_files in the coming weeks.',
)
.option('show-memory-usage', {
type: 'boolean',
description: 'Show memory usage in status bar',
default: false,
})
.option('show_memory_usage', {
type: 'boolean',
description: 'Show memory usage in status bar',
default: false,
})
.deprecateOption(
'show_memory_usage',
'Use --show-memory-usage instead. We will be removing --show_memory_usage in the coming weeks.',
)
.option('yolo', {
alias: 'y',
type: 'boolean',
@@ -210,6 +196,11 @@ export async function parseArguments(): Promise<CliArgs> {
string: true,
description: 'Allowed MCP server names',
})
.option('allowed-tools', {
type: 'array',
string: true,
description: 'Tools that are allowed to run without confirmation',
})
.option('extensions', {
alias: 'e',
type: 'array',
@@ -253,7 +244,11 @@ export async function parseArguments(): Promise<CliArgs> {
type: 'string',
description: 'Tavily API key for web search functionality',
})
.option('screen-reader', {
type: 'boolean',
description: 'Enable screen reader mode for accessibility.',
default: false,
})
.check((argv) => {
if (argv.prompt && argv['promptInteractive']) {
throw new Error(
@@ -269,7 +264,13 @@ export async function parseArguments(): Promise<CliArgs> {
}),
)
// Register MCP subcommands
.command(mcpCommand)
.command(mcpCommand);
if (settings?.experimental?.extensionManagement ?? false) {
yargsInstance.command(extensionsCommand);
}
yargsInstance
.version(await getCliVersion()) // This will enable the --version flag based on package.json
.alias('v', 'version')
.help()
@@ -282,7 +283,10 @@ export async function parseArguments(): Promise<CliArgs> {
// Handle case where MCP subcommands are executed - they should exit the process
// and not return to main CLI logic
if (result._.length > 0 && result._[0] === 'mcp') {
if (
result._.length > 0 &&
(result._[0] === 'mcp' || result._[0] === 'extensions')
) {
// MCP commands handle their own execution and process exit
process.exit(0);
}
@@ -329,7 +333,7 @@ export async function loadHierarchicalGeminiMemory(
extensionContextFilePaths,
memoryImportFormat,
fileFilteringOptions,
settings.memoryDiscoveryMaxDirs,
settings.context?.discoveryMaxDirs,
);
}
@@ -346,18 +350,20 @@ export async function loadCliConfig(
(v) => v === 'true' || v === '1',
) ||
false;
const memoryImportFormat = settings.memoryImportFormat || 'tree';
const memoryImportFormat = settings.context?.importFormat || 'tree';
const ideMode = settings.ideMode ?? false;
const ideMode = settings.ide?.enabled ?? false;
const folderTrustFeature = settings.folderTrustFeature ?? false;
const folderTrustSetting = settings.folderTrust ?? true;
const folderTrustFeature =
settings.security?.folderTrust?.featureEnabled ?? false;
const folderTrustSetting = settings.security?.folderTrust?.enabled ?? true;
const folderTrust = folderTrustFeature && folderTrustSetting;
const trustedFolder = isWorkspaceTrusted(settings);
const allExtensions = annotateActiveExtensions(
extensions,
argv.extensions || [],
cwd,
);
const activeExtensions = extensions.filter(
@@ -382,8 +388,8 @@ export async function loadCliConfig(
// TODO(b/343434939): This is a bit of a hack. The contextFileName should ideally be passed
// directly to the Config constructor in core, and have core handle setGeminiMdFilename.
// However, loadHierarchicalGeminiMemory is called *before* createServerConfig.
if (settings.contextFileName) {
setServerGeminiMdFilename(settings.contextFileName);
if (settings.context?.fileName) {
setServerGeminiMdFilename(settings.context.fileName);
} else {
// Reset to default if not provided in settings.
setServerGeminiMdFilename(getCurrentGeminiMdFilename());
@@ -397,17 +403,19 @@ export async function loadCliConfig(
const fileFiltering = {
...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS,
...settings.fileFiltering,
...settings.context?.fileFiltering,
};
const includeDirectories = (settings.includeDirectories || [])
const includeDirectories = (settings.context?.includeDirectories || [])
.map(resolvePath)
.concat((argv.includeDirectories || []).map(resolvePath));
// Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
cwd,
settings.loadMemoryFromIncludeDirectories ? includeDirectories : [],
settings.context?.loadMemoryFromIncludeDirectories
? includeDirectories
: [],
debugMode,
fileService,
settings,
@@ -444,6 +452,14 @@ export async function loadCliConfig(
argv.yolo || false ? ApprovalMode.YOLO : ApprovalMode.DEFAULT;
}
// Force approval mode to default if the folder is not trusted.
if (!trustedFolder && approvalMode !== ApprovalMode.DEFAULT) {
logger.warn(
`Approval mode overridden to "default" because the current folder is not trusted.`,
);
approvalMode = ApprovalMode.DEFAULT;
}
const interactive =
!!argv.promptInteractive || (process.stdin.isTTY && question.length === 0);
// In non-interactive mode, exclude tools that require a prompt.
@@ -475,16 +491,16 @@ export async function loadCliConfig(
const blockedMcpServers: Array<{ name: string; extensionName: string }> = [];
if (!argv.allowedMcpServerNames) {
if (settings.allowMCPServers) {
if (settings.mcp?.allowed) {
mcpServers = allowedMcpServers(
mcpServers,
settings.allowMCPServers,
settings.mcp.allowed,
blockedMcpServers,
);
}
if (settings.excludeMCPServers) {
const excludedNames = new Set(settings.excludeMCPServers.filter(Boolean));
if (settings.mcp?.excluded) {
const excludedNames = new Set(settings.mcp.excluded.filter(Boolean));
if (excludedNames.size > 0) {
mcpServers = Object.fromEntries(
Object.entries(mcpServers).filter(([key]) => !excludedNames.has(key)),
@@ -504,6 +520,10 @@ export async function loadCliConfig(
const sandboxConfig = await loadSandboxConfig(settings, argv);
const cliVersion = await getCliVersion();
const screenReader =
argv.screenReader !== undefined
? argv.screenReader
: (settings.ui?.accessibility?.screenReader ?? false);
return new Config({
sessionId,
embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL,
@@ -511,25 +531,26 @@ export async function loadCliConfig(
targetDir: cwd,
includeDirectories,
loadMemoryFromIncludeDirectories:
settings.loadMemoryFromIncludeDirectories || false,
settings.context?.loadMemoryFromIncludeDirectories || false,
debugMode,
question,
fullContext: argv.allFiles || argv.all_files || false,
coreTools: settings.coreTools || undefined,
fullContext: argv.allFiles || false,
coreTools: settings.tools?.core || undefined,
allowedTools: argv.allowedTools || settings.tools?.allowed || undefined,
excludeTools,
toolDiscoveryCommand: settings.toolDiscoveryCommand,
toolCallCommand: settings.toolCallCommand,
mcpServerCommand: settings.mcpServerCommand,
toolDiscoveryCommand: settings.tools?.discoveryCommand,
toolCallCommand: settings.tools?.callCommand,
mcpServerCommand: settings.mcp?.serverCommand,
mcpServers,
userMemory: memoryContent,
geminiMdFileCount: fileCount,
approvalMode,
showMemoryUsage:
argv.showMemoryUsage ||
argv.show_memory_usage ||
settings.showMemoryUsage ||
false,
accessibility: settings.accessibility,
argv.showMemoryUsage || settings.ui?.showMemoryUsage || false,
accessibility: {
...settings.ui?.accessibility,
screenReader,
},
telemetry: {
enabled: argv.telemetry ?? settings.telemetry?.enabled,
target: (argv.telemetryTarget ??
@@ -546,15 +567,17 @@ export async function loadCliConfig(
logPrompts: argv.telemetryLogPrompts ?? settings.telemetry?.logPrompts,
outfile: argv.telemetryOutfile ?? settings.telemetry?.outfile,
},
usageStatisticsEnabled: settings.usageStatisticsEnabled ?? true,
usageStatisticsEnabled: settings.privacy?.usageStatisticsEnabled ?? true,
// Git-aware file filtering settings
fileFiltering: {
respectGitIgnore: settings.fileFiltering?.respectGitIgnore,
respectGeminiIgnore: settings.fileFiltering?.respectGeminiIgnore,
respectGitIgnore: settings.context?.fileFiltering?.respectGitIgnore,
respectGeminiIgnore: settings.context?.fileFiltering?.respectGeminiIgnore,
enableRecursiveFileSearch:
settings.fileFiltering?.enableRecursiveFileSearch,
settings.context?.fileFiltering?.enableRecursiveFileSearch,
disableFuzzySearch: settings.context?.fileFiltering?.disableFuzzySearch,
},
checkpointing: argv.checkpointing || settings.checkpointing?.enabled,
checkpointing:
argv.checkpointing || settings.general?.checkpointing?.enabled,
proxy:
argv.proxy ||
process.env['HTTPS_PROXY'] ||
@@ -563,18 +586,16 @@ export async function loadCliConfig(
process.env['http_proxy'],
cwd,
fileDiscoveryService: fileService,
bugCommand: settings.bugCommand,
model: argv.model || settings.model || DEFAULT_GEMINI_MODEL,
bugCommand: settings.advanced?.bugCommand,
model: argv.model || settings.model?.name || DEFAULT_GEMINI_MODEL,
extensionContextFilePaths,
maxSessionTurns: settings.maxSessionTurns ?? -1,
sessionTokenLimit: settings.sessionTokenLimit ?? -1,
maxSessionTurns: settings.model?.maxSessionTurns ?? -1,
experimentalZedIntegration: argv.experimentalAcp || false,
listExtensions: argv.listExtensions || false,
extensions: allExtensions,
blockedMcpServers,
noBrowser: !!process.env['NO_BROWSER'],
summarizeToolOutput: settings.summarizeToolOutput,
ideMode,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.enableOpenAILogging
@@ -590,20 +611,24 @@ export async function loadCliConfig(
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
},
]) as ConfigParameters['systemPromptMappings'],
authType: settings.selectedAuthType,
authType: settings.security?.auth?.selectedType,
contentGenerator: settings.contentGenerator,
cliVersion,
tavilyApiKey:
argv.tavilyApiKey ||
settings.tavilyApiKey ||
process.env['TAVILY_API_KEY'],
chatCompression: settings.chatCompression,
summarizeToolOutput: settings.model?.summarizeToolOutput,
ideMode,
chatCompression: settings.model?.chatCompression,
folderTrustFeature,
folderTrust,
interactive,
trustedFolder,
shouldUseNodePtyShell: settings.shouldUseNodePtyShell,
skipNextSpeakerCheck: settings.skipNextSpeakerCheck,
useRipgrep: settings.tools?.useRipgrep,
shouldUseNodePtyShell: settings.tools?.usePty,
skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck,
enablePromptCompletion: settings.general?.enablePromptCompletion ?? false,
});
}
@@ -665,7 +690,7 @@ function mergeExcludeTools(
extraExcludes?: string[] | undefined,
): string[] {
const allExcludeTools = new Set([
...(settings.excludeTools || []),
...(settings.tools?.exclude || []),
...(extraExcludes || []),
]);
for (const extension of extensions) {

View File

@@ -5,24 +5,52 @@
*/
import { vi } from 'vitest';
import * as fs from 'fs';
import * as os from 'os';
import * as path from 'path';
import * as fs from 'node:fs';
import * as os from 'node:os';
import * as path from 'node:path';
import {
EXTENSIONS_CONFIG_FILENAME,
EXTENSIONS_DIRECTORY_NAME,
INSTALL_METADATA_FILENAME,
annotateActiveExtensions,
disableExtension,
enableExtension,
installExtension,
loadExtension,
loadExtensions,
performWorkspaceExtensionMigration,
uninstallExtension,
updateExtension,
} from './extension.js';
import {
type GeminiCLIExtension,
type MCPServerConfig,
} from '@qwen-code/qwen-code-core';
import { execSync } from 'node:child_process';
import { SettingScope, loadSettings } from './settings.js';
import { type SimpleGit, simpleGit } from 'simple-git';
vi.mock('simple-git', () => ({
simpleGit: vi.fn(),
}));
vi.mock('os', async (importOriginal) => {
const os = await importOriginal<typeof import('os')>();
const os = await importOriginal<typeof os>();
return {
...os,
homedir: vi.fn(),
};
});
vi.mock('child_process', async (importOriginal) => {
const actual = await importOriginal<typeof import('child_process')>();
return {
...actual,
execSync: vi.fn(),
};
});
const EXTENSIONS_DIRECTORY_NAME = path.join('.qwen', 'extensions');
describe('loadExtensions', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
@@ -40,56 +68,7 @@ describe('loadExtensions', () => {
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should include extension path in loaded extension', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const extensionDir = path.join(workspaceExtensionsDir, 'test-extension');
fs.mkdirSync(extensionDir, { recursive: true });
const config = {
name: 'test-extension',
version: '1.0.0',
};
fs.writeFileSync(
path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify(config),
);
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(1);
expect(extensions[0].path).toBe(extensionDir);
expect(extensions[0].config.name).toBe('test-extension');
});
it('should include extension path in loaded extension', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const extensionDir = path.join(workspaceExtensionsDir, 'test-extension');
fs.mkdirSync(extensionDir, { recursive: true });
const config = {
name: 'test-extension',
version: '1.0.0',
};
fs.writeFileSync(
path.join(extensionDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify(config),
);
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(1);
expect(extensions[0].path).toBe(extensionDir);
expect(extensions[0].config.name).toBe('test-extension');
vi.restoreAllMocks();
});
it('should include extension path in loaded extension', () => {
@@ -159,26 +138,101 @@ describe('loadExtensions', () => {
path.join(workspaceExtensionsDir, 'ext1', 'my-context-file.md'),
]);
});
it('should filter out disabled extensions', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
createExtension(workspaceExtensionsDir, 'ext1', '1.0.0');
createExtension(workspaceExtensionsDir, 'ext2', '2.0.0');
const settingsDir = path.join(tempWorkspaceDir, '.qwen');
fs.mkdirSync(settingsDir, { recursive: true });
fs.writeFileSync(
path.join(settingsDir, 'settings.json'),
JSON.stringify({ extensions: { disabled: ['ext1'] } }),
);
const extensions = loadExtensions(tempWorkspaceDir);
const activeExtensions = annotateActiveExtensions(
extensions,
[],
tempWorkspaceDir,
).filter((e) => e.isActive);
expect(activeExtensions).toHaveLength(1);
expect(activeExtensions[0].name).toBe('ext2');
});
it('should hydrate variables', () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
createExtension(
workspaceExtensionsDir,
'test-extension',
'1.0.0',
false,
undefined,
{
'test-server': {
cwd: '${extensionPath}${/}server',
},
},
);
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(1);
const loadedConfig = extensions[0].config;
const expectedCwd = path.join(
workspaceExtensionsDir,
'test-extension',
'server',
);
expect(loadedConfig.mcpServers?.['test-server'].cwd).toBe(expectedCwd);
});
});
describe('annotateActiveExtensions', () => {
const extensions = [
{ config: { name: 'ext1', version: '1.0.0' }, contextFiles: [] },
{ config: { name: 'ext2', version: '1.0.0' }, contextFiles: [] },
{ config: { name: 'ext3', version: '1.0.0' }, contextFiles: [] },
{
path: '/path/to/ext1',
config: { name: 'ext1', version: '1.0.0' },
contextFiles: [],
},
{
path: '/path/to/ext2',
config: { name: 'ext2', version: '1.0.0' },
contextFiles: [],
},
{
path: '/path/to/ext3',
config: { name: 'ext3', version: '1.0.0' },
contextFiles: [],
},
];
it('should mark all extensions as active if no enabled extensions are provided', () => {
const activeExtensions = annotateActiveExtensions(extensions, []);
const activeExtensions = annotateActiveExtensions(
extensions,
[],
'/path/to/workspace',
);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.every((e) => e.isActive)).toBe(true);
});
it('should mark only the enabled extensions as active', () => {
const activeExtensions = annotateActiveExtensions(extensions, [
'ext1',
'ext3',
]);
const activeExtensions = annotateActiveExtensions(
extensions,
['ext1', 'ext3'],
'/path/to/workspace',
);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe(
true,
@@ -192,13 +246,21 @@ describe('annotateActiveExtensions', () => {
});
it('should mark all extensions as inactive when "none" is provided', () => {
const activeExtensions = annotateActiveExtensions(extensions, ['none']);
const activeExtensions = annotateActiveExtensions(
extensions,
['none'],
'/path/to/workspace',
);
expect(activeExtensions).toHaveLength(3);
expect(activeExtensions.every((e) => !e.isActive)).toBe(true);
});
it('should handle case-insensitivity', () => {
const activeExtensions = annotateActiveExtensions(extensions, ['EXT1']);
const activeExtensions = annotateActiveExtensions(
extensions,
['EXT1'],
'/path/to/workspace',
);
expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe(
true,
);
@@ -206,24 +268,258 @@ describe('annotateActiveExtensions', () => {
it('should log an error for unknown extensions', () => {
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
annotateActiveExtensions(extensions, ['ext4']);
annotateActiveExtensions(extensions, ['ext4'], '/path/to/workspace');
expect(consoleSpy).toHaveBeenCalledWith('Extension not found: ext4');
consoleSpy.mockRestore();
});
});
describe('installExtension', () => {
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
// Clean up before each test
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
fs.mkdirSync(userExtensionsDir, { recursive: true });
vi.mocked(execSync).mockClear();
});
afterEach(() => {
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should install an extension from a local path', async () => {
const sourceExtDir = createExtension(
tempHomeDir,
'my-local-extension',
'1.0.0',
);
const targetExtDir = path.join(userExtensionsDir, 'my-local-extension');
const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME);
await installExtension({ source: sourceExtDir, type: 'local' });
expect(fs.existsSync(targetExtDir)).toBe(true);
expect(fs.existsSync(metadataPath)).toBe(true);
const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8'));
expect(metadata).toEqual({
source: sourceExtDir,
type: 'local',
});
fs.rmSync(targetExtDir, { recursive: true, force: true });
});
it('should throw an error if the extension already exists', async () => {
const sourceExtDir = createExtension(
tempHomeDir,
'my-local-extension',
'1.0.0',
);
await installExtension({ source: sourceExtDir, type: 'local' });
await expect(
installExtension({ source: sourceExtDir, type: 'local' }),
).rejects.toThrow(
'Extension "my-local-extension" is already installed. Please uninstall it first.',
);
});
it('should throw an error and cleanup if gemini-extension.json is missing', async () => {
const sourceExtDir = path.join(tempHomeDir, 'bad-extension');
fs.mkdirSync(sourceExtDir, { recursive: true });
await expect(
installExtension({ source: sourceExtDir, type: 'local' }),
).rejects.toThrow(
`Invalid extension at ${sourceExtDir}. Please make sure it has a valid gemini-extension.json file.`,
);
const targetExtDir = path.join(userExtensionsDir, 'bad-extension');
expect(fs.existsSync(targetExtDir)).toBe(false);
});
it('should install an extension from a git URL', async () => {
const gitUrl = 'https://github.com/google/gemini-extensions.git';
const extensionName = 'gemini-extensions';
const targetExtDir = path.join(userExtensionsDir, extensionName);
const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME);
const clone = vi.fn().mockImplementation(async (_, destination) => {
fs.mkdirSync(destination, { recursive: true });
fs.writeFileSync(
path.join(destination, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name: extensionName, version: '1.0.0' }),
);
});
const mockedSimpleGit = simpleGit as vi.MockedFunction<typeof simpleGit>;
mockedSimpleGit.mockReturnValue({ clone } as unknown as SimpleGit);
await installExtension({ source: gitUrl, type: 'git' });
expect(fs.existsSync(targetExtDir)).toBe(true);
expect(fs.existsSync(metadataPath)).toBe(true);
const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8'));
expect(metadata).toEqual({
source: gitUrl,
type: 'git',
});
fs.rmSync(targetExtDir, { recursive: true, force: true });
});
});
describe('uninstallExtension', () => {
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
// Clean up before each test
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
fs.mkdirSync(userExtensionsDir, { recursive: true });
vi.mocked(execSync).mockClear();
});
afterEach(() => {
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should uninstall an extension by name', async () => {
const sourceExtDir = createExtension(
userExtensionsDir,
'my-local-extension',
'1.0.0',
);
await uninstallExtension('my-local-extension');
expect(fs.existsSync(sourceExtDir)).toBe(false);
});
it('should uninstall an extension by name and retain existing extensions', async () => {
const sourceExtDir = createExtension(
userExtensionsDir,
'my-local-extension',
'1.0.0',
);
const otherExtDir = createExtension(
userExtensionsDir,
'other-extension',
'1.0.0',
);
await uninstallExtension('my-local-extension');
expect(fs.existsSync(sourceExtDir)).toBe(false);
expect(loadExtensions(tempHomeDir)).toHaveLength(1);
expect(fs.existsSync(otherExtDir)).toBe(true);
});
it('should throw an error if the extension does not exist', async () => {
await expect(uninstallExtension('nonexistent-extension')).rejects.toThrow(
'Extension "nonexistent-extension" not found.',
);
});
});
describe('performWorkspaceExtensionMigration', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
beforeEach(() => {
tempWorkspaceDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-workspace-'),
);
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
});
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
vi.restoreAllMocks();
});
it('should install the extensions in the user directory', async () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const ext1Path = createExtension(workspaceExtensionsDir, 'ext1', '1.0.0');
const ext2Path = createExtension(workspaceExtensionsDir, 'ext2', '1.0.0');
const extensionsToMigrate = [
loadExtension(ext1Path)!,
loadExtension(ext2Path)!,
];
const failed =
await performWorkspaceExtensionMigration(extensionsToMigrate);
expect(failed).toEqual([]);
const userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
const userExt1Path = path.join(userExtensionsDir, 'ext1');
const extensions = loadExtensions(tempWorkspaceDir);
expect(extensions).toHaveLength(2);
const metadataPath = path.join(userExt1Path, INSTALL_METADATA_FILENAME);
expect(fs.existsSync(metadataPath)).toBe(true);
const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8'));
expect(metadata).toEqual({
source: ext1Path,
type: 'local',
});
});
it('should return the names of failed installations', async () => {
const workspaceExtensionsDir = path.join(
tempWorkspaceDir,
EXTENSIONS_DIRECTORY_NAME,
);
fs.mkdirSync(workspaceExtensionsDir, { recursive: true });
const ext1Path = createExtension(workspaceExtensionsDir, 'ext1', '1.0.0');
const extensions = [
loadExtension(ext1Path)!,
{
path: '/ext/path/1',
config: { name: 'ext2', version: '1.0.0' },
contextFiles: [],
},
];
const failed = await performWorkspaceExtensionMigration(extensions);
expect(failed).toEqual(['ext2']);
});
});
function createExtension(
extensionsDir: string,
name: string,
version: string,
addContextFile = false,
contextFileName?: string,
): void {
mcpServers?: Record<string, MCPServerConfig>,
): string {
const extDir = path.join(extensionsDir, name);
fs.mkdirSync(extDir);
fs.mkdirSync(extDir, { recursive: true });
fs.writeFileSync(
path.join(extDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name, version, contextFileName }),
JSON.stringify({ name, version, contextFileName, mcpServers }),
);
if (addContextFile) {
@@ -233,4 +529,193 @@ function createExtension(
if (contextFileName) {
fs.writeFileSync(path.join(extDir, contextFileName), 'context');
}
return extDir;
}
describe('updateExtension', () => {
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
// Clean up before each test
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
fs.mkdirSync(userExtensionsDir, { recursive: true });
vi.mocked(execSync).mockClear();
});
afterEach(() => {
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should update a git-installed extension', async () => {
// 1. "Install" an extension
const gitUrl = 'https://github.com/google/gemini-extensions.git';
const extensionName = 'gemini-extensions';
const targetExtDir = path.join(userExtensionsDir, extensionName);
const metadataPath = path.join(targetExtDir, INSTALL_METADATA_FILENAME);
// Create the "installed" extension directory and files
fs.mkdirSync(targetExtDir, { recursive: true });
fs.writeFileSync(
path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name: extensionName, version: '1.0.0' }),
);
fs.writeFileSync(
metadataPath,
JSON.stringify({ source: gitUrl, type: 'git' }),
);
// 2. Mock the git clone for the update
const clone = vi.fn().mockImplementation(async (_, destination) => {
fs.mkdirSync(destination, { recursive: true });
// This is the "updated" version
fs.writeFileSync(
path.join(destination, EXTENSIONS_CONFIG_FILENAME),
JSON.stringify({ name: extensionName, version: '1.1.0' }),
);
});
const mockedSimpleGit = simpleGit as vi.MockedFunction<typeof simpleGit>;
mockedSimpleGit.mockReturnValue({
clone,
} as unknown as SimpleGit);
// 3. Call updateExtension
const updateInfo = await updateExtension(extensionName);
// 4. Assertions
expect(updateInfo).toEqual({
originalVersion: '1.0.0',
updatedVersion: '1.1.0',
});
// Check that the config file reflects the new version
const updatedConfig = JSON.parse(
fs.readFileSync(
path.join(targetExtDir, EXTENSIONS_CONFIG_FILENAME),
'utf-8',
),
);
expect(updatedConfig.version).toBe('1.1.0');
});
});
describe('disableExtension', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
beforeEach(() => {
tempWorkspaceDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-workspace-'),
);
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
vi.spyOn(process, 'cwd').mockReturnValue(tempWorkspaceDir);
});
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
});
it('should disable an extension at the user scope', () => {
disableExtension('my-extension', SettingScope.User);
const settings = loadSettings(tempWorkspaceDir);
expect(
settings.forScope(SettingScope.User).settings.extensions?.disabled,
).toEqual(['my-extension']);
});
it('should disable an extension at the workspace scope', () => {
disableExtension('my-extension', SettingScope.Workspace);
const settings = loadSettings(tempWorkspaceDir);
expect(
settings.forScope(SettingScope.Workspace).settings.extensions?.disabled,
).toEqual(['my-extension']);
});
it('should handle disabling the same extension twice', () => {
disableExtension('my-extension', SettingScope.User);
disableExtension('my-extension', SettingScope.User);
const settings = loadSettings(tempWorkspaceDir);
expect(
settings.forScope(SettingScope.User).settings.extensions?.disabled,
).toEqual(['my-extension']);
});
it('should throw an error if you request system scope', () => {
expect(() => disableExtension('my-extension', SettingScope.System)).toThrow(
'System and SystemDefaults scopes are not supported.',
);
});
});
describe('enableExtension', () => {
let tempWorkspaceDir: string;
let tempHomeDir: string;
let userExtensionsDir: string;
beforeEach(() => {
tempWorkspaceDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-workspace-'),
);
tempHomeDir = fs.mkdtempSync(
path.join(os.tmpdir(), 'qwen-code-test-home-'),
);
userExtensionsDir = path.join(tempHomeDir, '.qwen', 'extensions');
vi.mocked(os.homedir).mockReturnValue(tempHomeDir);
vi.spyOn(process, 'cwd').mockReturnValue(tempWorkspaceDir);
});
afterEach(() => {
fs.rmSync(tempWorkspaceDir, { recursive: true, force: true });
fs.rmSync(tempHomeDir, { recursive: true, force: true });
fs.rmSync(userExtensionsDir, { recursive: true, force: true });
});
afterAll(() => {
vi.restoreAllMocks();
});
const getActiveExtensions = (): GeminiCLIExtension[] => {
const extensions = loadExtensions(tempWorkspaceDir);
const activeExtensions = annotateActiveExtensions(
extensions,
[],
tempWorkspaceDir,
);
return activeExtensions.filter((e) => e.isActive);
};
it('should enable an extension at the user scope', () => {
createExtension(userExtensionsDir, 'ext1', '1.0.0');
disableExtension('ext1', SettingScope.User);
let activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(0);
enableExtension('ext1', [SettingScope.User]);
activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(1);
expect(activeExtensions[0].name).toBe('ext1');
});
it('should enable an extension at the workspace scope', () => {
createExtension(userExtensionsDir, 'ext1', '1.0.0');
disableExtension('ext1', SettingScope.Workspace);
let activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(0);
enableExtension('ext1', [SettingScope.Workspace]);
activeExtensions = getActiveExtensions();
expect(activeExtensions).toHaveLength(1);
expect(activeExtensions[0].name).toBe('ext1');
});
});

View File

@@ -4,19 +4,29 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { MCPServerConfig, GeminiCLIExtension } from '@qwen-code/qwen-code-core';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
import type {
MCPServerConfig,
GeminiCLIExtension,
} from '@qwen-code/qwen-code-core';
import { Storage } from '@qwen-code/qwen-code-core';
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import { simpleGit } from 'simple-git';
import { SettingScope, loadSettings } from '../config/settings.js';
import { getErrorMessage } from '../utils/errors.js';
import { recursivelyHydrateStrings } from './extensions/variables.js';
export const EXTENSIONS_DIRECTORY_NAME = path.join('.qwen', 'extensions');
export const EXTENSIONS_CONFIG_FILENAME = 'qwen-extension.json';
export const EXTENSIONS_CONFIG_FILENAME_OLD = 'gemini-extension.json';
export const INSTALL_METADATA_FILENAME = '.qwen-extension-install.json';
export interface Extension {
path: string;
config: ExtensionConfig;
contextFiles: string[];
installMetadata?: ExtensionInstallMetadata | undefined;
}
export interface ExtensionConfig {
@@ -27,14 +37,103 @@ export interface ExtensionConfig {
excludeTools?: string[];
}
export interface ExtensionInstallMetadata {
source: string;
type: 'git' | 'local';
}
export interface ExtensionUpdateInfo {
originalVersion: string;
updatedVersion: string;
}
export class ExtensionStorage {
private readonly extensionName: string;
constructor(extensionName: string) {
this.extensionName = extensionName;
}
getExtensionDir(): string {
return path.join(
ExtensionStorage.getUserExtensionsDir(),
this.extensionName,
);
}
getConfigPath(): string {
return path.join(this.getExtensionDir(), EXTENSIONS_CONFIG_FILENAME);
}
static getUserExtensionsDir(): string {
const storage = new Storage(os.homedir());
return storage.getExtensionsDir();
}
static async createTmpDir(): Promise<string> {
return await fs.promises.mkdtemp(
path.join(os.tmpdir(), 'gemini-extension'),
);
}
}
export function getWorkspaceExtensions(workspaceDir: string): Extension[] {
return loadExtensionsFromDir(workspaceDir);
}
async function copyExtension(
source: string,
destination: string,
): Promise<void> {
await fs.promises.cp(source, destination, { recursive: true });
}
export async function performWorkspaceExtensionMigration(
extensions: Extension[],
): Promise<string[]> {
const failedInstallNames: string[] = [];
for (const extension of extensions) {
try {
const installMetadata: ExtensionInstallMetadata = {
source: extension.path,
type: 'local',
};
await installExtension(installMetadata);
} catch (_) {
failedInstallNames.push(extension.config.name);
}
}
return failedInstallNames;
}
export function loadExtensions(workspaceDir: string): Extension[] {
const allExtensions = [
...loadExtensionsFromDir(workspaceDir),
...loadExtensionsFromDir(os.homedir()),
];
const settings = loadSettings(workspaceDir).merged;
const disabledExtensions = settings.extensions?.disabled ?? [];
const allExtensions = [...loadUserExtensions()];
if (!settings.experimental?.extensionManagement) {
allExtensions.push(...getWorkspaceExtensions(workspaceDir));
}
const uniqueExtensions = new Map<string, Extension>();
for (const extension of allExtensions) {
if (
!uniqueExtensions.has(extension.config.name) &&
!disabledExtensions.includes(extension.config.name)
) {
uniqueExtensions.set(extension.config.name, extension);
}
}
return Array.from(uniqueExtensions.values());
}
export function loadUserExtensions(): Extension[] {
const userExtensions = loadExtensionsFromDir(os.homedir());
const uniqueExtensions = new Map<string, Extension>();
for (const extension of userExtensions) {
if (!uniqueExtensions.has(extension.config.name)) {
uniqueExtensions.set(extension.config.name, extension);
}
@@ -43,8 +142,9 @@ export function loadExtensions(workspaceDir: string): Extension[] {
return Array.from(uniqueExtensions.values());
}
function loadExtensionsFromDir(dir: string): Extension[] {
const extensionsDir = path.join(dir, EXTENSIONS_DIRECTORY_NAME);
export function loadExtensionsFromDir(dir: string): Extension[] {
const storage = new Storage(dir);
const extensionsDir = storage.getExtensionsDir();
if (!fs.existsSync(extensionsDir)) {
return [];
}
@@ -61,7 +161,7 @@ function loadExtensionsFromDir(dir: string): Extension[] {
return extensions;
}
function loadExtension(extensionDir: string): Extension | null {
export function loadExtension(extensionDir: string): Extension | null {
if (!fs.statSync(extensionDir).isDirectory()) {
console.error(
`Warning: unexpected file ${extensionDir} in extensions directory.`,
@@ -86,7 +186,11 @@ function loadExtension(extensionDir: string): Extension | null {
try {
const configContent = fs.readFileSync(configFilePath, 'utf-8');
const config = JSON.parse(configContent) as ExtensionConfig;
const config = recursivelyHydrateStrings(JSON.parse(configContent), {
extensionPath: extensionDir,
'/': path.sep,
pathSeparator: path.sep,
}) as unknown as ExtensionConfig;
if (!config.name || !config.version) {
console.error(
`Invalid extension config in ${configFilePath}: missing name or version.`,
@@ -102,15 +206,31 @@ function loadExtension(extensionDir: string): Extension | null {
path: extensionDir,
config,
contextFiles,
installMetadata: loadInstallMetadata(extensionDir),
};
} catch (e) {
console.error(
`Warning: error parsing extension config in ${configFilePath}: ${e}`,
`Warning: error parsing extension config in ${configFilePath}: ${getErrorMessage(
e,
)}`,
);
return null;
}
}
function loadInstallMetadata(
extensionDir: string,
): ExtensionInstallMetadata | undefined {
const metadataFilePath = path.join(extensionDir, INSTALL_METADATA_FILENAME);
try {
const configContent = fs.readFileSync(metadataFilePath, 'utf-8');
const metadata = JSON.parse(configContent) as ExtensionInstallMetadata;
return metadata;
} catch (_e) {
return undefined;
}
}
function getContextFileNames(config: ExtensionConfig): string[] {
if (!config.contextFileName) {
return ['QWEN.md'];
@@ -120,17 +240,28 @@ function getContextFileNames(config: ExtensionConfig): string[] {
return config.contextFileName;
}
/**
* Returns an annotated list of extensions. If an extension is listed in enabledExtensionNames, it will be active.
* If enabledExtensionNames is empty, an extension is active unless it is in list of disabled extensions in settings.
* @param extensions The base list of extensions.
* @param enabledExtensionNames The names of explicitly enabled extensions.
* @param workspaceDir The current workspace directory.
*/
export function annotateActiveExtensions(
extensions: Extension[],
enabledExtensionNames: string[],
workspaceDir: string,
): GeminiCLIExtension[] {
const settings = loadSettings(workspaceDir).merged;
const disabledExtensions = settings.extensions?.disabled ?? [];
const annotatedExtensions: GeminiCLIExtension[] = [];
if (enabledExtensionNames.length === 0) {
return extensions.map((extension) => ({
name: extension.config.name,
version: extension.config.version,
isActive: true,
isActive: !disabledExtensions.includes(extension.config.name),
path: extension.path,
}));
}
@@ -175,3 +306,230 @@ export function annotateActiveExtensions(
return annotatedExtensions;
}
/**
* Clones a Git repository to a specified local path.
* @param gitUrl The Git URL to clone.
* @param destination The destination path to clone the repository to.
*/
async function cloneFromGit(
gitUrl: string,
destination: string,
): Promise<void> {
try {
// TODO(chrstnb): Download the archive instead to avoid unnecessary .git info.
await simpleGit().clone(gitUrl, destination, ['--depth', '1']);
} catch (error) {
throw new Error(`Failed to clone Git repository from ${gitUrl}`, {
cause: error,
});
}
}
export async function installExtension(
installMetadata: ExtensionInstallMetadata,
cwd: string = process.cwd(),
): Promise<string> {
const extensionsDir = ExtensionStorage.getUserExtensionsDir();
await fs.promises.mkdir(extensionsDir, { recursive: true });
// Convert relative paths to absolute paths for the metadata file.
if (
installMetadata.type === 'local' &&
!path.isAbsolute(installMetadata.source)
) {
installMetadata.source = path.resolve(cwd, installMetadata.source);
}
let localSourcePath: string;
let tempDir: string | undefined;
if (installMetadata.type === 'git') {
tempDir = await ExtensionStorage.createTmpDir();
await cloneFromGit(installMetadata.source, tempDir);
localSourcePath = tempDir;
} else {
localSourcePath = installMetadata.source;
}
let newExtensionName: string | undefined;
try {
const newExtension = loadExtension(localSourcePath);
if (!newExtension) {
throw new Error(
`Invalid extension at ${installMetadata.source}. Please make sure it has a valid gemini-extension.json file.`,
);
}
// ~/.gemini/extensions/{ExtensionConfig.name}.
newExtensionName = newExtension.config.name;
const extensionStorage = new ExtensionStorage(newExtensionName);
const destinationPath = extensionStorage.getExtensionDir();
const installedExtensions = loadUserExtensions();
if (
installedExtensions.some(
(installed) => installed.config.name === newExtensionName,
)
) {
throw new Error(
`Extension "${newExtensionName}" is already installed. Please uninstall it first.`,
);
}
await copyExtension(localSourcePath, destinationPath);
const metadataString = JSON.stringify(installMetadata, null, 2);
const metadataPath = path.join(destinationPath, INSTALL_METADATA_FILENAME);
await fs.promises.writeFile(metadataPath, metadataString);
} finally {
if (tempDir) {
await fs.promises.rm(tempDir, { recursive: true, force: true });
}
}
return newExtensionName;
}
export async function uninstallExtension(
extensionName: string,
cwd: string = process.cwd(),
): Promise<void> {
const installedExtensions = loadUserExtensions();
if (
!installedExtensions.some(
(installed) => installed.config.name === extensionName,
)
) {
throw new Error(`Extension "${extensionName}" not found.`);
}
removeFromDisabledExtensions(
extensionName,
[SettingScope.User, SettingScope.Workspace],
cwd,
);
const storage = new ExtensionStorage(extensionName);
return await fs.promises.rm(storage.getExtensionDir(), {
recursive: true,
force: true,
});
}
export function toOutputString(extension: Extension): string {
let output = `${extension.config.name} (${extension.config.version})`;
output += `\n Path: ${extension.path}`;
if (extension.installMetadata) {
output += `\n Source: ${extension.installMetadata.source}`;
}
if (extension.contextFiles.length > 0) {
output += `\n Context files:`;
extension.contextFiles.forEach((contextFile) => {
output += `\n ${contextFile}`;
});
}
if (extension.config.mcpServers) {
output += `\n MCP servers:`;
Object.keys(extension.config.mcpServers).forEach((key) => {
output += `\n ${key}`;
});
}
if (extension.config.excludeTools) {
output += `\n Excluded tools:`;
extension.config.excludeTools.forEach((tool) => {
output += `\n ${tool}`;
});
}
return output;
}
export async function updateExtension(
extensionName: string,
cwd: string = process.cwd(),
): Promise<ExtensionUpdateInfo | undefined> {
const installedExtensions = loadUserExtensions();
const extension = installedExtensions.find(
(installed) => installed.config.name === extensionName,
);
if (!extension) {
throw new Error(
`Extension "${extensionName}" not found. Run gemini extensions list to see available extensions.`,
);
}
if (!extension.installMetadata) {
throw new Error(
`Extension cannot be updated because it is missing the .gemini-extension.install.json file. To update manually, uninstall and then reinstall the updated version.`,
);
}
const originalVersion = extension.config.version;
const tempDir = await ExtensionStorage.createTmpDir();
try {
await copyExtension(extension.path, tempDir);
await uninstallExtension(extensionName, cwd);
await installExtension(extension.installMetadata, cwd);
const updatedExtension = loadExtension(extension.path);
if (!updatedExtension) {
throw new Error('Updated extension not found after installation.');
}
const updatedVersion = updatedExtension.config.version;
return {
originalVersion,
updatedVersion,
};
} catch (e) {
console.error(
`Error updating extension, rolling back. ${getErrorMessage(e)}`,
);
await copyExtension(tempDir, extension.path);
throw e;
} finally {
await fs.promises.rm(tempDir, { recursive: true, force: true });
}
}
export function disableExtension(
name: string,
scope: SettingScope,
cwd: string = process.cwd(),
) {
if (scope === SettingScope.System || scope === SettingScope.SystemDefaults) {
throw new Error('System and SystemDefaults scopes are not supported.');
}
const settings = loadSettings(cwd);
const settingsFile = settings.forScope(scope);
const extensionSettings = settingsFile.settings.extensions || {
disabled: [],
};
const disabledExtensions = extensionSettings.disabled || [];
if (!disabledExtensions.includes(name)) {
disabledExtensions.push(name);
extensionSettings.disabled = disabledExtensions;
settings.setValue(scope, 'extensions', extensionSettings);
}
}
export function enableExtension(name: string, scopes: SettingScope[]) {
removeFromDisabledExtensions(name, scopes);
}
/**
* Removes an extension from the list of disabled extensions.
* @param name The name of the extension to remove.
* @param scope The scopes to remove the name from.
*/
function removeFromDisabledExtensions(
name: string,
scopes: SettingScope[],
cwd: string = process.cwd(),
) {
const settings = loadSettings(cwd);
for (const scope of scopes) {
const settingsFile = settings.forScope(scope);
const extensionSettings = settingsFile.settings.extensions || {
disabled: [],
};
const disabledExtensions = extensionSettings.disabled || [];
extensionSettings.disabled = disabledExtensions.filter(
(extension) => extension !== name,
);
settings.setValue(scope, 'extensions', extensionSettings);
}
}

View File

@@ -0,0 +1,30 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export interface VariableDefinition {
type: 'string';
description: string;
default?: string;
required?: boolean;
}
export interface VariableSchema {
[key: string]: VariableDefinition;
}
const PATH_SEPARATOR_DEFINITION = {
type: 'string',
description: 'The path separator.',
} as const;
export const VARIABLE_SCHEMA = {
extensionPath: {
type: 'string',
description: 'The path of the extension in the filesystem.',
},
'/': PATH_SEPARATOR_DEFINITION,
pathSeparator: PATH_SEPARATOR_DEFINITION,
} as const;

View File

@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { expect, describe, it } from 'vitest';
import { hydrateString } from './variables.js';
describe('hydrateString', () => {
it('should replace a single variable', () => {
const context = {
extensionPath: 'path/my-extension',
};
const result = hydrateString('Hello, ${extensionPath}!', context);
expect(result).toBe('Hello, path/my-extension!');
});
});

View File

@@ -0,0 +1,65 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { type VariableSchema, VARIABLE_SCHEMA } from './variableSchema.js';
export type JsonObject = { [key: string]: JsonValue };
export type JsonArray = JsonValue[];
export type JsonValue =
| string
| number
| boolean
| null
| JsonObject
| JsonArray;
export type VariableContext = {
[key in keyof typeof VARIABLE_SCHEMA]?: string;
};
export function validateVariables(
variables: VariableContext,
schema: VariableSchema,
) {
for (const key in schema) {
const definition = schema[key];
if (definition.required && !variables[key as keyof VariableContext]) {
throw new Error(`Missing required variable: ${key}`);
}
}
}
export function hydrateString(str: string, context: VariableContext): string {
validateVariables(context, VARIABLE_SCHEMA);
const regex = /\${(.*?)}/g;
return str.replace(regex, (match, key) =>
context[key as keyof VariableContext] == null
? match
: (context[key as keyof VariableContext] as string),
);
}
export function recursivelyHydrateStrings(
obj: JsonValue,
values: VariableContext,
): JsonValue {
if (typeof obj === 'string') {
return hydrateString(obj, values);
}
if (Array.isArray(obj)) {
return obj.map((item) => recursivelyHydrateStrings(item, values));
}
if (typeof obj === 'object' && obj !== null) {
const newObj: JsonObject = {};
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
newObj[key] = recursivelyHydrateStrings(obj[key], values);
}
}
return newObj;
}
return obj;
}

View File

@@ -5,11 +5,8 @@
*/
import { describe, it, expect } from 'vitest';
import {
Command,
KeyBindingConfig,
defaultKeyBindings,
} from './keyBindings.js';
import type { KeyBindingConfig } from './keyBindings.js';
import { Command, defaultKeyBindings } from './keyBindings.js';
describe('keyBindings config', () => {
describe('defaultKeyBindings', () => {

View File

@@ -4,11 +4,12 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { SandboxConfig } from '@qwen-code/qwen-code-core';
import type { SandboxConfig } from '@qwen-code/qwen-code-core';
import { FatalSandboxError } from '@qwen-code/qwen-code-core';
import commandExists from 'command-exists';
import * as os from 'node:os';
import { getPackageJson } from '../utils/package.js';
import { Settings } from './settings.js';
import type { Settings } from './settings.js';
// This is a stripped-down version of the CliArgs interface from config.ts
// to avoid circular dependencies.
@@ -51,21 +52,19 @@ function getSandboxCommand(
if (typeof sandbox === 'string' && sandbox) {
if (!isSandboxCommand(sandbox)) {
console.error(
`ERROR: invalid sandbox command '${sandbox}'. Must be one of ${VALID_SANDBOX_COMMANDS.join(
throw new FatalSandboxError(
`Invalid sandbox command '${sandbox}'. Must be one of ${VALID_SANDBOX_COMMANDS.join(
', ',
)}`,
);
process.exit(1);
}
// confirm that specified command exists
if (commandExists.sync(sandbox)) {
return sandbox;
}
console.error(
`ERROR: missing sandbox command '${sandbox}' (from GEMINI_SANDBOX)`,
throw new FatalSandboxError(
`Missing sandbox command '${sandbox}' (from GEMINI_SANDBOX)`,
);
process.exit(1);
}
// look for seatbelt, docker, or podman, in that order
@@ -80,11 +79,10 @@ function getSandboxCommand(
// throw an error if user requested sandbox but no command was found
if (sandbox === true) {
console.error(
'ERROR: GEMINI_SANDBOX is true but failed to determine command for sandbox; ' +
throw new FatalSandboxError(
'GEMINI_SANDBOX is true but failed to determine command for sandbox; ' +
'install docker or podman or specify command in GEMINI_SANDBOX',
);
process.exit(1);
}
return '';
@@ -94,7 +92,7 @@ export async function loadSandboxConfig(
settings: Settings,
argv: SandboxCliArgs,
): Promise<SandboxConfig | undefined> {
const sandboxOption = argv.sandbox ?? settings.sandbox;
const sandboxOption = argv.sandbox ?? settings.tools?.sandbox;
const command = getSandboxCommand(sandboxOption);
const packageJson = await getPackageJson();

File diff suppressed because it is too large Load Diff

View File

@@ -4,29 +4,84 @@
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs';
import * as path from 'path';
import { homedir, platform } from 'os';
import * as fs from 'node:fs';
import * as path from 'node:path';
import { homedir, platform } from 'node:os';
import * as dotenv from 'dotenv';
import {
GEMINI_CONFIG_DIR as GEMINI_DIR,
getErrorMessage,
Storage,
} from '@qwen-code/qwen-code-core';
import stripJsonComments from 'strip-json-comments';
import { DefaultLight } from '../ui/themes/default-light.js';
import { DefaultDark } from '../ui/themes/default.js';
import { Settings, MemoryImportFormat } from './settingsSchema.js';
import { isWorkspaceTrusted } from './trustedFolders.js';
import type { Settings, MemoryImportFormat } from './settingsSchema.js';
import { mergeWith } from 'lodash-es';
export type { Settings, MemoryImportFormat };
export const SETTINGS_DIRECTORY_NAME = '.qwen';
export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME);
export const USER_SETTINGS_PATH = path.join(USER_SETTINGS_DIR, 'settings.json');
export const USER_SETTINGS_PATH = Storage.getGlobalSettingsPath();
export const USER_SETTINGS_DIR = path.dirname(USER_SETTINGS_PATH);
export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE'];
const MIGRATE_V2_OVERWRITE = false;
const MIGRATION_MAP: Record<string, string> = {
preferredEditor: 'general.preferredEditor',
vimMode: 'general.vimMode',
disableAutoUpdate: 'general.disableAutoUpdate',
disableUpdateNag: 'general.disableUpdateNag',
checkpointing: 'general.checkpointing',
theme: 'ui.theme',
customThemes: 'ui.customThemes',
hideWindowTitle: 'ui.hideWindowTitle',
hideTips: 'ui.hideTips',
hideBanner: 'ui.hideBanner',
hideFooter: 'ui.hideFooter',
showMemoryUsage: 'ui.showMemoryUsage',
showLineNumbers: 'ui.showLineNumbers',
accessibility: 'ui.accessibility',
ideMode: 'ide.enabled',
hasSeenIdeIntegrationNudge: 'ide.hasSeenNudge',
usageStatisticsEnabled: 'privacy.usageStatisticsEnabled',
telemetry: 'telemetry',
model: 'model.name',
maxSessionTurns: 'model.maxSessionTurns',
summarizeToolOutput: 'model.summarizeToolOutput',
chatCompression: 'model.chatCompression',
skipNextSpeakerCheck: 'model.skipNextSpeakerCheck',
contextFileName: 'context.fileName',
memoryImportFormat: 'context.importFormat',
memoryDiscoveryMaxDirs: 'context.discoveryMaxDirs',
includeDirectories: 'context.includeDirectories',
loadMemoryFromIncludeDirectories: 'context.loadFromIncludeDirectories',
fileFiltering: 'context.fileFiltering',
sandbox: 'tools.sandbox',
shouldUseNodePtyShell: 'tools.usePty',
allowedTools: 'tools.allowed',
coreTools: 'tools.core',
excludeTools: 'tools.exclude',
toolDiscoveryCommand: 'tools.discoveryCommand',
toolCallCommand: 'tools.callCommand',
mcpServerCommand: 'mcp.serverCommand',
allowMCPServers: 'mcp.allowed',
excludeMCPServers: 'mcp.excluded',
folderTrustFeature: 'security.folderTrust.featureEnabled',
folderTrust: 'security.folderTrust.enabled',
selectedAuthType: 'security.auth.selectedType',
useExternalAuth: 'security.auth.useExternal',
autoConfigureMaxOldSpaceSize: 'advanced.autoConfigureMemory',
dnsResolutionOrder: 'advanced.dnsResolutionOrder',
excludedProjectEnvVars: 'advanced.excludedEnvVars',
bugCommand: 'advanced.bugCommand',
};
export function getSystemSettingsPath(): string {
if (process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH']) {
return process.env['GEMINI_CLI_SYSTEM_SETTINGS_PATH'];
if (process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH']) {
return process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH'];
}
if (platform() === 'darwin') {
return '/Library/Application Support/QwenCode/settings.json';
@@ -37,8 +92,14 @@ export function getSystemSettingsPath(): string {
}
}
export function getWorkspaceSettingsPath(workspaceDir: string): string {
return path.join(workspaceDir, SETTINGS_DIRECTORY_NAME, 'settings.json');
export function getSystemDefaultsPath(): string {
if (process.env['QWEN_CODE_SYSTEM_DEFAULTS_PATH']) {
return process.env['QWEN_CODE_SYSTEM_DEFAULTS_PATH'];
}
return path.join(
path.dirname(getSystemSettingsPath()),
'system-defaults.json',
);
}
export type { DnsResolutionOrder } from './settingsSchema.js';
@@ -47,6 +108,7 @@ export enum SettingScope {
User = 'User',
Workspace = 'Workspace',
System = 'System',
SystemDefaults = 'SystemDefaults',
}
export interface CheckpointingSettings {
@@ -59,6 +121,7 @@ export interface SummarizeToolOutputSettings {
export interface AccessibilitySettings {
disableLoadingPhrases?: boolean;
screenReader?: boolean;
}
export interface SettingsError {
@@ -71,38 +134,290 @@ export interface SettingsFile {
path: string;
}
function setNestedProperty(
obj: Record<string, unknown>,
path: string,
value: unknown,
) {
const keys = path.split('.');
const lastKey = keys.pop();
if (!lastKey) return;
let current: Record<string, unknown> = obj;
for (const key of keys) {
if (current[key] === undefined) {
current[key] = {};
}
const next = current[key];
if (typeof next === 'object' && next !== null) {
current = next as Record<string, unknown>;
} else {
// This path is invalid, so we stop.
return;
}
}
current[lastKey] = value;
}
function needsMigration(settings: Record<string, unknown>): boolean {
return !('general' in settings);
}
function migrateSettingsToV2(
flatSettings: Record<string, unknown>,
): Record<string, unknown> | null {
if (!needsMigration(flatSettings)) {
return null;
}
const v2Settings: Record<string, unknown> = {};
const flatKeys = new Set(Object.keys(flatSettings));
for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) {
if (flatKeys.has(oldKey)) {
setNestedProperty(v2Settings, newPath, flatSettings[oldKey]);
flatKeys.delete(oldKey);
}
}
// Preserve mcpServers at the top level
if (flatSettings['mcpServers']) {
v2Settings['mcpServers'] = flatSettings['mcpServers'];
flatKeys.delete('mcpServers');
}
// Carry over any unrecognized keys
for (const remainingKey of flatKeys) {
v2Settings[remainingKey] = flatSettings[remainingKey];
}
return v2Settings;
}
function getNestedProperty(
obj: Record<string, unknown>,
path: string,
): unknown {
const keys = path.split('.');
let current: unknown = obj;
for (const key of keys) {
if (typeof current !== 'object' || current === null || !(key in current)) {
return undefined;
}
current = (current as Record<string, unknown>)[key];
}
return current;
}
const REVERSE_MIGRATION_MAP: Record<string, string> = Object.fromEntries(
Object.entries(MIGRATION_MAP).map(([key, value]) => [value, key]),
);
// Dynamically determine the top-level keys from the V2 settings structure.
const KNOWN_V2_CONTAINERS = new Set(
Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]),
);
export function migrateSettingsToV1(
v2Settings: Record<string, unknown>,
): Record<string, unknown> {
const v1Settings: Record<string, unknown> = {};
const v2Keys = new Set(Object.keys(v2Settings));
for (const [newPath, oldKey] of Object.entries(REVERSE_MIGRATION_MAP)) {
const value = getNestedProperty(v2Settings, newPath);
if (value !== undefined) {
v1Settings[oldKey] = value;
v2Keys.delete(newPath.split('.')[0]);
}
}
// Preserve mcpServers at the top level
if (v2Settings['mcpServers']) {
v1Settings['mcpServers'] = v2Settings['mcpServers'];
v2Keys.delete('mcpServers');
}
// Carry over any unrecognized keys
for (const remainingKey of v2Keys) {
const value = v2Settings[remainingKey];
if (value === undefined) {
continue;
}
// Don't carry over empty objects that were just containers for migrated settings.
if (
KNOWN_V2_CONTAINERS.has(remainingKey) &&
typeof value === 'object' &&
value !== null &&
!Array.isArray(value) &&
Object.keys(value).length === 0
) {
continue;
}
v1Settings[remainingKey] = value;
}
return v1Settings;
}
function mergeSettings(
system: Settings,
systemDefaults: Settings,
user: Settings,
workspace: Settings,
isTrusted: boolean,
): Settings {
// folderTrust is not supported at workspace level.
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { folderTrust, ...workspaceWithoutFolderTrust } = workspace;
const safeWorkspace = isTrusted ? workspace : ({} as Settings);
// folderTrust is not supported at workspace level.
const { security, ...restOfWorkspace } = safeWorkspace;
const safeWorkspaceWithoutFolderTrust = security
? {
...restOfWorkspace,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
security: (({ folderTrust, ...rest }) => rest)(security),
}
: {
...restOfWorkspace,
security: {},
};
// Settings are merged with the following precedence (last one wins for
// single values):
// 1. System Defaults
// 2. User Settings
// 3. Workspace Settings
// 4. System Settings (as overrides)
//
// For properties that are arrays (e.g., includeDirectories), the arrays
// are concatenated. For objects (e.g., customThemes), they are merged.
return {
...systemDefaults,
...user,
...workspaceWithoutFolderTrust,
...safeWorkspaceWithoutFolderTrust,
...system,
customThemes: {
...(user.customThemes || {}),
...(workspace.customThemes || {}),
...(system.customThemes || {}),
general: {
...(systemDefaults.general || {}),
...(user.general || {}),
...(safeWorkspaceWithoutFolderTrust.general || {}),
...(system.general || {}),
},
ui: {
...(systemDefaults.ui || {}),
...(user.ui || {}),
...(safeWorkspaceWithoutFolderTrust.ui || {}),
...(system.ui || {}),
customThemes: {
...(systemDefaults.ui?.customThemes || {}),
...(user.ui?.customThemes || {}),
...(safeWorkspaceWithoutFolderTrust.ui?.customThemes || {}),
...(system.ui?.customThemes || {}),
},
},
ide: {
...(systemDefaults.ide || {}),
...(user.ide || {}),
...(safeWorkspaceWithoutFolderTrust.ide || {}),
...(system.ide || {}),
},
privacy: {
...(systemDefaults.privacy || {}),
...(user.privacy || {}),
...(safeWorkspaceWithoutFolderTrust.privacy || {}),
...(system.privacy || {}),
},
telemetry: {
...(systemDefaults.telemetry || {}),
...(user.telemetry || {}),
...(safeWorkspaceWithoutFolderTrust.telemetry || {}),
...(system.telemetry || {}),
},
security: {
...(systemDefaults.security || {}),
...(user.security || {}),
...(safeWorkspaceWithoutFolderTrust.security || {}),
...(system.security || {}),
},
mcp: {
...(systemDefaults.mcp || {}),
...(user.mcp || {}),
...(safeWorkspaceWithoutFolderTrust.mcp || {}),
...(system.mcp || {}),
},
mcpServers: {
...(systemDefaults.mcpServers || {}),
...(user.mcpServers || {}),
...(workspace.mcpServers || {}),
...(safeWorkspaceWithoutFolderTrust.mcpServers || {}),
...(system.mcpServers || {}),
},
includeDirectories: [
...(system.includeDirectories || []),
...(user.includeDirectories || []),
...(workspace.includeDirectories || []),
],
chatCompression: {
...(system.chatCompression || {}),
...(user.chatCompression || {}),
...(workspace.chatCompression || {}),
tools: {
...(systemDefaults.tools || {}),
...(user.tools || {}),
...(safeWorkspaceWithoutFolderTrust.tools || {}),
...(system.tools || {}),
},
context: {
...(systemDefaults.context || {}),
...(user.context || {}),
...(safeWorkspaceWithoutFolderTrust.context || {}),
...(system.context || {}),
includeDirectories: [
...(systemDefaults.context?.includeDirectories || []),
...(user.context?.includeDirectories || []),
...(safeWorkspaceWithoutFolderTrust.context?.includeDirectories || []),
...(system.context?.includeDirectories || []),
],
},
model: {
...(systemDefaults.model || {}),
...(user.model || {}),
...(safeWorkspaceWithoutFolderTrust.model || {}),
...(system.model || {}),
chatCompression: {
...(systemDefaults.model?.chatCompression || {}),
...(user.model?.chatCompression || {}),
...(safeWorkspaceWithoutFolderTrust.model?.chatCompression || {}),
...(system.model?.chatCompression || {}),
},
},
advanced: {
...(systemDefaults.advanced || {}),
...(user.advanced || {}),
...(safeWorkspaceWithoutFolderTrust.advanced || {}),
...(system.advanced || {}),
excludedEnvVars: [
...new Set([
...(systemDefaults.advanced?.excludedEnvVars || []),
...(user.advanced?.excludedEnvVars || []),
...(safeWorkspaceWithoutFolderTrust.advanced?.excludedEnvVars || []),
...(system.advanced?.excludedEnvVars || []),
]),
],
},
extensions: {
...(systemDefaults.extensions || {}),
...(user.extensions || {}),
...(safeWorkspaceWithoutFolderTrust.extensions || {}),
...(system.extensions || {}),
disabled: [
...new Set([
...(systemDefaults.extensions?.disabled || []),
...(user.extensions?.disabled || []),
...(safeWorkspaceWithoutFolderTrust.extensions?.disabled || []),
...(system.extensions?.disabled || []),
]),
],
workspacesWithMigrationNudge: [
...new Set([
...(systemDefaults.extensions?.workspacesWithMigrationNudge || []),
...(user.extensions?.workspacesWithMigrationNudge || []),
...(safeWorkspaceWithoutFolderTrust.extensions
?.workspacesWithMigrationNudge || []),
...(system.extensions?.workspacesWithMigrationNudge || []),
]),
],
},
};
}
@@ -110,21 +425,30 @@ function mergeSettings(
export class LoadedSettings {
constructor(
system: SettingsFile,
systemDefaults: SettingsFile,
user: SettingsFile,
workspace: SettingsFile,
errors: SettingsError[],
isTrusted: boolean,
migratedInMemorScopes: Set<SettingScope>,
) {
this.system = system;
this.systemDefaults = systemDefaults;
this.user = user;
this.workspace = workspace;
this.errors = errors;
this.isTrusted = isTrusted;
this.migratedInMemorScopes = migratedInMemorScopes;
this._merged = this.computeMergedSettings();
}
readonly system: SettingsFile;
readonly systemDefaults: SettingsFile;
readonly user: SettingsFile;
readonly workspace: SettingsFile;
readonly errors: SettingsError[];
readonly isTrusted: boolean;
readonly migratedInMemorScopes: Set<SettingScope>;
private _merged: Settings;
@@ -135,8 +459,10 @@ export class LoadedSettings {
private computeMergedSettings(): Settings {
return mergeSettings(
this.system.settings,
this.systemDefaults.settings,
this.user.settings,
this.workspace.settings,
this.isTrusted,
);
}
@@ -148,18 +474,16 @@ export class LoadedSettings {
return this.workspace;
case SettingScope.System:
return this.system;
case SettingScope.SystemDefaults:
return this.systemDefaults;
default:
throw new Error(`Invalid scope: ${scope}`);
}
}
setValue<K extends keyof Settings>(
scope: SettingScope,
key: K,
value: Settings[K],
): void {
setValue(scope: SettingScope, key: string, value: unknown): void {
const settingsFile = this.forScope(scope);
settingsFile.settings[key] = value;
setNestedProperty(settingsFile.settings, key, value);
this._merged = this.computeMergedSettings();
saveSettings(settingsFile);
}
@@ -269,7 +593,9 @@ export function loadEnvironment(settings?: Settings): void {
// If no settings provided, try to load workspace settings for exclusions
let resolvedSettings = settings;
if (!resolvedSettings) {
const workspaceSettingsPath = getWorkspaceSettingsPath(process.cwd());
const workspaceSettingsPath = new Storage(
process.cwd(),
).getWorkspaceSettingsPath();
try {
if (fs.existsSync(workspaceSettingsPath)) {
const workspaceContent = fs.readFileSync(
@@ -294,7 +620,8 @@ export function loadEnvironment(settings?: Settings): void {
const parsedEnv = dotenv.parse(envFileContent);
const excludedVars =
resolvedSettings?.excludedProjectEnvVars || DEFAULT_EXCLUDED_ENV_VARS;
resolvedSettings?.advanced?.excludedEnvVars ||
DEFAULT_EXCLUDED_ENV_VARS;
const isProjectEnvFile = !envFilePath.includes(GEMINI_DIR);
for (const key in parsedEnv) {
@@ -322,10 +649,13 @@ export function loadEnvironment(settings?: Settings): void {
*/
export function loadSettings(workspaceDir: string): LoadedSettings {
let systemSettings: Settings = {};
let systemDefaultSettings: Settings = {};
let userSettings: Settings = {};
let workspaceSettings: Settings = {};
const settingsErrors: SettingsError[] = [];
const systemSettingsPath = getSystemSettingsPath();
const systemDefaultsPath = getSystemDefaultsPath();
const migratedInMemorScopes = new Set<SettingScope>();
// Resolve paths to their canonical representation to handle symlinks
const resolvedWorkspaceDir = path.resolve(workspaceDir);
@@ -342,70 +672,102 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
// We expect homedir to always exist and be resolvable.
const realHomeDir = fs.realpathSync(resolvedHomeDir);
const workspaceSettingsPath = getWorkspaceSettingsPath(workspaceDir);
const workspaceSettingsPath = new Storage(
workspaceDir,
).getWorkspaceSettingsPath();
// Load system settings
try {
if (fs.existsSync(systemSettingsPath)) {
const systemContent = fs.readFileSync(systemSettingsPath, 'utf-8');
systemSettings = JSON.parse(stripJsonComments(systemContent)) as Settings;
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: systemSettingsPath,
});
}
// Load user settings
try {
if (fs.existsSync(USER_SETTINGS_PATH)) {
const userContent = fs.readFileSync(USER_SETTINGS_PATH, 'utf-8');
userSettings = JSON.parse(stripJsonComments(userContent)) as Settings;
// Support legacy theme names
if (userSettings.theme && userSettings.theme === 'VS') {
userSettings.theme = DefaultLight.name;
} else if (userSettings.theme && userSettings.theme === 'VS2015') {
userSettings.theme = DefaultDark.name;
}
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: USER_SETTINGS_PATH,
});
}
if (realWorkspaceDir !== realHomeDir) {
// Load workspace settings
const loadAndMigrate = (filePath: string, scope: SettingScope): Settings => {
try {
if (fs.existsSync(workspaceSettingsPath)) {
const projectContent = fs.readFileSync(workspaceSettingsPath, 'utf-8');
workspaceSettings = JSON.parse(
stripJsonComments(projectContent),
) as Settings;
if (workspaceSettings.theme && workspaceSettings.theme === 'VS') {
workspaceSettings.theme = DefaultLight.name;
} else if (
workspaceSettings.theme &&
workspaceSettings.theme === 'VS2015'
if (fs.existsSync(filePath)) {
const content = fs.readFileSync(filePath, 'utf-8');
const rawSettings: unknown = JSON.parse(stripJsonComments(content));
if (
typeof rawSettings !== 'object' ||
rawSettings === null ||
Array.isArray(rawSettings)
) {
workspaceSettings.theme = DefaultDark.name;
settingsErrors.push({
message: 'Settings file is not a valid JSON object.',
path: filePath,
});
return {};
}
let settingsObject = rawSettings as Record<string, unknown>;
if (needsMigration(settingsObject)) {
const migratedSettings = migrateSettingsToV2(settingsObject);
if (migratedSettings) {
if (MIGRATE_V2_OVERWRITE) {
try {
fs.renameSync(filePath, `${filePath}.orig`);
fs.writeFileSync(
filePath,
JSON.stringify(migratedSettings, null, 2),
'utf-8',
);
} catch (e) {
console.error(
`Error migrating settings file on disk: ${getErrorMessage(
e,
)}`,
);
}
} else {
migratedInMemorScopes.add(scope);
}
settingsObject = migratedSettings;
}
}
return settingsObject as Settings;
}
} catch (error: unknown) {
settingsErrors.push({
message: getErrorMessage(error),
path: workspaceSettingsPath,
path: filePath,
});
}
return {};
};
systemSettings = loadAndMigrate(systemSettingsPath, SettingScope.System);
systemDefaultSettings = loadAndMigrate(
systemDefaultsPath,
SettingScope.SystemDefaults,
);
userSettings = loadAndMigrate(USER_SETTINGS_PATH, SettingScope.User);
if (realWorkspaceDir !== realHomeDir) {
workspaceSettings = loadAndMigrate(
workspaceSettingsPath,
SettingScope.Workspace,
);
}
// Support legacy theme names
if (userSettings.ui?.theme === 'VS') {
userSettings.ui.theme = DefaultLight.name;
} else if (userSettings.ui?.theme === 'VS2015') {
userSettings.ui.theme = DefaultDark.name;
}
if (workspaceSettings.ui?.theme === 'VS') {
workspaceSettings.ui.theme = DefaultLight.name;
} else if (workspaceSettings.ui?.theme === 'VS2015') {
workspaceSettings.ui.theme = DefaultDark.name;
}
// For the initial trust check, we can only use user and system settings.
const initialTrustCheckSettings = mergeWith({}, systemSettings, userSettings);
const isTrusted =
isWorkspaceTrusted(initialTrustCheckSettings as Settings) ?? true;
// Create a temporary merged settings object to pass to loadEnvironment.
const tempMergedSettings = mergeSettings(
systemSettings,
systemDefaultSettings,
userSettings,
workspaceSettings,
isTrusted,
);
// loadEnviroment depends on settings so we have to create a temp version of
@@ -423,6 +785,10 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
path: systemSettingsPath,
settings: systemSettings,
},
{
path: systemDefaultsPath,
settings: systemDefaultSettings,
},
{
path: USER_SETTINGS_PATH,
settings: userSettings,
@@ -432,21 +798,10 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
settings: workspaceSettings,
},
settingsErrors,
isTrusted,
migratedInMemorScopes,
);
// Validate chatCompression settings
const chatCompression = loadedSettings.merged.chatCompression;
const threshold = chatCompression?.contextPercentageThreshold;
if (
threshold != null &&
(typeof threshold !== 'number' || threshold < 0 || threshold > 1)
) {
console.warn(
`Invalid value for chatCompression.contextPercentageThreshold: "${threshold}". Please use a value between 0 and 1. Using default compression settings.`,
);
delete loadedSettings.merged.chatCompression;
}
return loadedSettings;
}
@@ -458,9 +813,16 @@ export function saveSettings(settingsFile: SettingsFile): void {
fs.mkdirSync(dirPath, { recursive: true });
}
let settingsToSave = settingsFile.settings;
if (!MIGRATE_V2_OVERWRITE) {
settingsToSave = migrateSettingsToV1(
settingsToSave as Record<string, unknown>,
) as Settings;
}
fs.writeFileSync(
settingsFile.path,
JSON.stringify(settingsFile.settings, null, 2),
JSON.stringify(settingsToSave, null, 2),
'utf-8',
);
} catch (error) {

View File

@@ -5,53 +5,25 @@
*/
import { describe, it, expect } from 'vitest';
import { SETTINGS_SCHEMA, Settings } from './settingsSchema.js';
import type { Settings } from './settingsSchema.js';
import { SETTINGS_SCHEMA } from './settingsSchema.js';
describe('SettingsSchema', () => {
describe('SETTINGS_SCHEMA', () => {
it('should contain all expected top-level settings', () => {
const expectedSettings = [
'theme',
'customThemes',
'showMemoryUsage',
'usageStatisticsEnabled',
'autoConfigureMaxOldSpaceSize',
'preferredEditor',
'maxSessionTurns',
'memoryImportFormat',
'memoryDiscoveryMaxDirs',
'contextFileName',
'vimMode',
'ideMode',
'accessibility',
'checkpointing',
'fileFiltering',
'disableAutoUpdate',
'hideWindowTitle',
'hideTips',
'hideBanner',
'selectedAuthType',
'useExternalAuth',
'sandbox',
'coreTools',
'excludeTools',
'toolDiscoveryCommand',
'toolCallCommand',
'mcpServerCommand',
'mcpServers',
'allowMCPServers',
'excludeMCPServers',
'general',
'ui',
'ide',
'privacy',
'telemetry',
'bugCommand',
'summarizeToolOutput',
'dnsResolutionOrder',
'excludedProjectEnvVars',
'disableUpdateNag',
'includeDirectories',
'loadMemoryFromIncludeDirectories',
'model',
'hasSeenIdeIntegrationNudge',
'folderTrustFeature',
'context',
'tools',
'mcp',
'security',
'advanced',
];
expectedSettings.forEach((setting) => {
@@ -77,9 +49,16 @@ describe('SettingsSchema', () => {
it('should have correct nested setting structure', () => {
const nestedSettings = [
'accessibility',
'checkpointing',
'fileFiltering',
'general',
'ui',
'ide',
'privacy',
'model',
'context',
'tools',
'mcp',
'security',
'advanced',
];
nestedSettings.forEach((setting) => {
@@ -96,29 +75,36 @@ describe('SettingsSchema', () => {
it('should have accessibility nested properties', () => {
expect(
SETTINGS_SCHEMA.accessibility.properties?.disableLoadingPhrases,
SETTINGS_SCHEMA.ui?.properties?.accessibility?.properties,
).toBeDefined();
expect(
SETTINGS_SCHEMA.accessibility.properties?.disableLoadingPhrases.type,
SETTINGS_SCHEMA.ui?.properties?.accessibility.properties
?.disableLoadingPhrases.type,
).toBe('boolean');
});
it('should have checkpointing nested properties', () => {
expect(SETTINGS_SCHEMA.checkpointing.properties?.enabled).toBeDefined();
expect(SETTINGS_SCHEMA.checkpointing.properties?.enabled.type).toBe(
'boolean',
);
expect(
SETTINGS_SCHEMA.general?.properties?.checkpointing.properties?.enabled,
).toBeDefined();
expect(
SETTINGS_SCHEMA.general?.properties?.checkpointing.properties?.enabled
.type,
).toBe('boolean');
});
it('should have fileFiltering nested properties', () => {
expect(
SETTINGS_SCHEMA.fileFiltering.properties?.respectGitIgnore,
SETTINGS_SCHEMA.context.properties.fileFiltering.properties
?.respectGitIgnore,
).toBeDefined();
expect(
SETTINGS_SCHEMA.fileFiltering.properties?.respectGeminiIgnore,
SETTINGS_SCHEMA.context.properties.fileFiltering.properties
?.respectGeminiIgnore,
).toBeDefined();
expect(
SETTINGS_SCHEMA.fileFiltering.properties?.enableRecursiveFileSearch,
SETTINGS_SCHEMA.context.properties.fileFiltering.properties
?.enableRecursiveFileSearch,
).toBeDefined();
});
@@ -147,11 +133,6 @@ describe('SettingsSchema', () => {
expect(categories.size).toBeGreaterThan(0);
expect(categories).toContain('General');
expect(categories).toContain('UI');
expect(categories).toContain('Mode');
expect(categories).toContain('Updates');
expect(categories).toContain('Accessibility');
expect(categories).toContain('Checkpointing');
expect(categories).toContain('File Filtering');
expect(categories).toContain('Advanced');
});
@@ -180,73 +161,148 @@ describe('SettingsSchema', () => {
it('should have showInDialog property configured', () => {
// Check that user-facing settings are marked for dialog display
expect(SETTINGS_SCHEMA.showMemoryUsage.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.vimMode.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.ideMode.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.disableAutoUpdate.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.hideWindowTitle.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.hideTips.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.hideBanner.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.usageStatisticsEnabled.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.ui.properties.showMemoryUsage.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.general.properties.vimMode.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.ide.properties.enabled.showInDialog).toBe(true);
expect(
SETTINGS_SCHEMA.general.properties.disableAutoUpdate.showInDialog,
).toBe(true);
expect(SETTINGS_SCHEMA.ui.properties.hideWindowTitle.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.ui.properties.hideTips.showInDialog).toBe(true);
expect(SETTINGS_SCHEMA.ui.properties.hideBanner.showInDialog).toBe(true);
expect(
SETTINGS_SCHEMA.privacy.properties.usageStatisticsEnabled.showInDialog,
).toBe(false);
// Check that advanced settings are hidden from dialog
expect(SETTINGS_SCHEMA.selectedAuthType.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.coreTools.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.security.properties.auth.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.tools.properties.core.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.mcpServers.showInDialog).toBe(false);
expect(SETTINGS_SCHEMA.telemetry.showInDialog).toBe(false);
// Check that some settings are appropriately hidden
expect(SETTINGS_SCHEMA.theme.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.customThemes.showInDialog).toBe(false); // Managed via theme editor
expect(SETTINGS_SCHEMA.checkpointing.showInDialog).toBe(false); // Experimental feature
expect(SETTINGS_SCHEMA.accessibility.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.fileFiltering.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.preferredEditor.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.autoConfigureMaxOldSpaceSize.showInDialog).toBe(
true,
);
expect(SETTINGS_SCHEMA.ui.properties.theme.showInDialog).toBe(false); // Changed to false
expect(SETTINGS_SCHEMA.ui.properties.customThemes.showInDialog).toBe(
false,
); // Managed via theme editor
expect(
SETTINGS_SCHEMA.general.properties.checkpointing.showInDialog,
).toBe(false); // Experimental feature
expect(SETTINGS_SCHEMA.ui.properties.accessibility.showInDialog).toBe(
false,
); // Changed to false
expect(
SETTINGS_SCHEMA.context.properties.fileFiltering.showInDialog,
).toBe(false); // Changed to false
expect(
SETTINGS_SCHEMA.general.properties.preferredEditor.showInDialog,
).toBe(false); // Changed to false
expect(
SETTINGS_SCHEMA.advanced.properties.autoConfigureMemory.showInDialog,
).toBe(false);
});
it('should infer Settings type correctly', () => {
// This test ensures that the Settings type is properly inferred from the schema
const settings: Settings = {
theme: 'dark',
includeDirectories: ['/path/to/dir'],
loadMemoryFromIncludeDirectories: true,
ui: {
theme: 'dark',
},
context: {
includeDirectories: ['/path/to/dir'],
loadMemoryFromIncludeDirectories: true,
},
};
// TypeScript should not complain about these properties
expect(settings.theme).toBe('dark');
expect(settings.includeDirectories).toEqual(['/path/to/dir']);
expect(settings.loadMemoryFromIncludeDirectories).toBe(true);
expect(settings.ui?.theme).toBe('dark');
expect(settings.context?.includeDirectories).toEqual(['/path/to/dir']);
expect(settings.context?.loadMemoryFromIncludeDirectories).toBe(true);
});
it('should have includeDirectories setting in schema', () => {
expect(SETTINGS_SCHEMA.includeDirectories).toBeDefined();
expect(SETTINGS_SCHEMA.includeDirectories.type).toBe('array');
expect(SETTINGS_SCHEMA.includeDirectories.category).toBe('General');
expect(SETTINGS_SCHEMA.includeDirectories.default).toEqual([]);
expect(
SETTINGS_SCHEMA.context?.properties.includeDirectories,
).toBeDefined();
expect(SETTINGS_SCHEMA.context?.properties.includeDirectories.type).toBe(
'array',
);
expect(
SETTINGS_SCHEMA.context?.properties.includeDirectories.category,
).toBe('Context');
expect(
SETTINGS_SCHEMA.context?.properties.includeDirectories.default,
).toEqual([]);
});
it('should have loadMemoryFromIncludeDirectories setting in schema', () => {
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories).toBeDefined();
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories.type).toBe(
'boolean',
);
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories.category).toBe(
'General',
);
expect(SETTINGS_SCHEMA.loadMemoryFromIncludeDirectories.default).toBe(
false,
);
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories,
).toBeDefined();
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories
.type,
).toBe('boolean');
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories
.category,
).toBe('Context');
expect(
SETTINGS_SCHEMA.context?.properties.loadMemoryFromIncludeDirectories
.default,
).toBe(false);
});
it('should have folderTrustFeature setting in schema', () => {
expect(SETTINGS_SCHEMA.folderTrustFeature).toBeDefined();
expect(SETTINGS_SCHEMA.folderTrustFeature.type).toBe('boolean');
expect(SETTINGS_SCHEMA.folderTrustFeature.category).toBe('General');
expect(SETTINGS_SCHEMA.folderTrustFeature.default).toBe(false);
expect(SETTINGS_SCHEMA.folderTrustFeature.showInDialog).toBe(true);
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled,
).toBeDefined();
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled.type,
).toBe('boolean');
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled
.category,
).toBe('Security');
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled
.default,
).toBe(false);
expect(
SETTINGS_SCHEMA.security.properties.folderTrust.properties.enabled
.showInDialog,
).toBe(true);
});
it('should have debugKeystrokeLogging setting in schema', () => {
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging,
).toBeDefined();
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.type,
).toBe('boolean');
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.category,
).toBe('General');
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.default,
).toBe(false);
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging
.requiresRestart,
).toBe(false);
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.showInDialog,
).toBe(true);
expect(
SETTINGS_SCHEMA.general.properties.debugKeystrokeLogging.description,
).toBe('Enable debug logging of keystrokes to the console.');
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,7 @@
*/
// Mock 'os' first.
import * as osActual from 'os';
import * as osActual from 'node:os';
vi.mock('os', async (importOriginal) => {
const actualOs = await importOriginal<typeof osActual>();
return {
@@ -25,9 +25,9 @@ import {
type Mocked,
type Mock,
} from 'vitest';
import * as fs from 'fs';
import * as fs from 'node:fs';
import stripJsonComments from 'strip-json-comments';
import * as path from 'path';
import * as path from 'node:path';
import {
loadTrustedFolders,
@@ -35,7 +35,7 @@ import {
TrustLevel,
isWorkspaceTrusted,
} from './trustedFolders.js';
import { Settings } from './settings.js';
import type { Settings } from './settings.js';
vi.mock('fs', async (importOriginal) => {
const actualFs = await importOriginal<typeof fs>();
@@ -132,8 +132,12 @@ describe('isWorkspaceTrusted', () => {
let mockCwd: string;
const mockRules: Record<string, TrustLevel> = {};
const mockSettings: Settings = {
folderTrustFeature: true,
folderTrust: true,
security: {
folderTrust: {
featureEnabled: true,
enabled: true,
},
},
};
beforeEach(() => {

View File

@@ -4,11 +4,11 @@
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs';
import * as path from 'path';
import { homedir } from 'os';
import * as fs from 'node:fs';
import * as path from 'node:path';
import { homedir } from 'node:os';
import { getErrorMessage, isWithinRoot } from '@qwen-code/qwen-code-core';
import { Settings } from './settings.js';
import type { Settings } from './settings.js';
import stripJsonComments from 'strip-json-comments';
export const TRUSTED_FOLDERS_FILENAME = 'trustedFolders.json';
@@ -111,8 +111,9 @@ export function saveTrustedFolders(
}
export function isWorkspaceTrusted(settings: Settings): boolean | undefined {
const folderTrustFeature = settings.folderTrustFeature ?? false;
const folderTrustSetting = settings.folderTrust ?? true;
const folderTrustFeature =
settings.security?.folderTrust?.featureEnabled ?? false;
const folderTrustSetting = settings.security?.folderTrust?.enabled ?? true;
const folderTrustEnabled = folderTrustFeature && folderTrustSetting;
if (!folderTrustEnabled) {

View File

@@ -4,19 +4,18 @@
* SPDX-License-Identifier: Apache-2.0
*/
import stripAnsi from 'strip-ansi';
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
main,
setupUnhandledRejectionHandler,
validateDnsResolutionOrder,
startInteractiveUI,
} from './gemini.js';
import {
LoadedSettings,
SettingsFile,
loadSettings,
} from './config/settings.js';
import type { SettingsFile } from './config/settings.js';
import { LoadedSettings, loadSettings } from './config/settings.js';
import { appEvents, AppEvent } from './utils/events.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { FatalConfigError } from '@qwen-code/qwen-code-core';
// Custom error to identify mock process.exit calls
class MockProcessExitError extends Error {
@@ -76,7 +75,6 @@ vi.mock('./utils/sandbox.js', () => ({
}));
describe('gemini.tsx main function', () => {
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
let loadSettingsMock: ReturnType<typeof vi.mocked<typeof loadSettings>>;
let originalEnvGeminiSandbox: string | undefined;
let originalEnvSandbox: string | undefined;
@@ -98,7 +96,6 @@ describe('gemini.tsx main function', () => {
delete process.env['GEMINI_SANDBOX'];
delete process.env['SANDBOX'];
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
initialUnhandledRejectionListeners =
process.listeners('unhandledRejection');
});
@@ -127,7 +124,7 @@ describe('gemini.tsx main function', () => {
vi.restoreAllMocks();
});
it('should call process.exit(1) if settings have errors', async () => {
it('should throw InvalidConfigurationError if settings have errors', async () => {
const settingsError = {
message: 'Test settings error',
path: '/test/settings.json',
@@ -144,37 +141,23 @@ describe('gemini.tsx main function', () => {
path: '/system/settings.json',
settings: {},
};
const systemDefaultsFile: SettingsFile = {
path: '/system/system-defaults.json',
settings: {},
};
const mockLoadedSettings = new LoadedSettings(
systemSettingsFile,
systemDefaultsFile,
userSettingsFile,
workspaceSettingsFile,
[settingsError],
true,
new Set(),
);
loadSettingsMock.mockReturnValue(mockLoadedSettings);
try {
await main();
// If main completes without throwing, the test should fail because process.exit was expected
expect.fail('main function did not exit as expected');
} catch (error) {
expect(error).toBeInstanceOf(MockProcessExitError);
if (error instanceof MockProcessExitError) {
expect(error.code).toBe(1);
}
}
// Verify console.error was called with the error message
expect(consoleErrorSpy).toHaveBeenCalledTimes(2);
expect(stripAnsi(String(consoleErrorSpy.mock.calls[0][0]))).toBe(
'Error in /test/settings.json: Test settings error',
);
expect(stripAnsi(String(consoleErrorSpy.mock.calls[1][0]))).toBe(
'Please fix /test/settings.json and try again.',
);
// Verify process.exit was called.
expect(processExitSpy).toHaveBeenCalledWith(1);
await expect(main()).rejects.toThrow(FatalConfigError);
});
it('should log unhandled promise rejections and open debug console on first error', async () => {
@@ -250,3 +233,100 @@ describe('validateDnsResolutionOrder', () => {
);
});
});
describe('startInteractiveUI', () => {
// Mock dependencies
const mockConfig = {
getProjectRoot: () => '/root',
getScreenReader: () => false,
} as Config;
const mockSettings = {
merged: {
ui: {
hideWindowTitle: false,
},
},
} as LoadedSettings;
const mockStartupWarnings = ['warning1'];
const mockWorkspaceRoot = '/root';
vi.mock('./utils/version.js', () => ({
getCliVersion: vi.fn(() => Promise.resolve('1.0.0')),
}));
vi.mock('./ui/utils/kittyProtocolDetector.js', () => ({
detectAndEnableKittyProtocol: vi.fn(() => Promise.resolve()),
}));
vi.mock('./ui/utils/updateCheck.js', () => ({
checkForUpdates: vi.fn(() => Promise.resolve(null)),
}));
vi.mock('./utils/cleanup.js', () => ({
cleanupCheckpoints: vi.fn(() => Promise.resolve()),
registerCleanup: vi.fn(),
}));
vi.mock('ink', () => ({
render: vi.fn().mockReturnValue({ unmount: vi.fn() }),
}));
beforeEach(() => {
vi.clearAllMocks();
});
it('should render the UI with proper React context and exitOnCtrlC disabled', async () => {
const { render } = await import('ink');
const renderSpy = vi.mocked(render);
await startInteractiveUI(
mockConfig,
mockSettings,
mockStartupWarnings,
mockWorkspaceRoot,
);
// Verify render was called with correct options
expect(renderSpy).toHaveBeenCalledTimes(1);
const [reactElement, options] = renderSpy.mock.calls[0];
// Verify render options
expect(options).toEqual({
exitOnCtrlC: false,
isScreenReaderEnabled: false,
});
// Verify React element structure is valid (but don't deep dive into JSX internals)
expect(reactElement).toBeDefined();
});
it('should perform all startup tasks in correct order', async () => {
const { getCliVersion } = await import('./utils/version.js');
const { detectAndEnableKittyProtocol } = await import(
'./ui/utils/kittyProtocolDetector.js'
);
const { checkForUpdates } = await import('./ui/utils/updateCheck.js');
const { registerCleanup } = await import('./utils/cleanup.js');
await startInteractiveUI(
mockConfig,
mockSettings,
mockStartupWarnings,
mockWorkspaceRoot,
);
// Verify all startup tasks were called
expect(getCliVersion).toHaveBeenCalledTimes(1);
expect(detectAndEnableKittyProtocol).toHaveBeenCalledTimes(1);
expect(registerCleanup).toHaveBeenCalledTimes(1);
// Verify cleanup handler is registered with unmount function
const cleanupFn = vi.mocked(registerCleanup).mock.calls[0][0];
expect(typeof cleanupFn).toBe('function');
// checkForUpdates should be called asynchronously (not waited for)
// We need a small delay to let it execute
await new Promise((resolve) => setTimeout(resolve, 0));
expect(checkForUpdates).toHaveBeenCalledTimes(1);
});
});

View File

@@ -4,49 +4,47 @@
* SPDX-License-Identifier: Apache-2.0
*/
import React from 'react';
import { render } from 'ink';
import { AppWrapper } from './ui/App.js';
import { loadCliConfig, parseArguments } from './config/config.js';
import { readStdin } from './utils/readStdin.js';
import { basename } from 'node:path';
import v8 from 'node:v8';
import os from 'node:os';
import dns from 'node:dns';
import { spawn } from 'node:child_process';
import { start_sandbox } from './utils/sandbox.js';
import type { Config } from '@qwen-code/qwen-code-core';
import {
DnsResolutionOrder,
LoadedSettings,
loadSettings,
SettingScope,
} from './config/settings.js';
import { themeManager } from './ui/themes/theme-manager.js';
import { getStartupWarnings } from './utils/startupWarnings.js';
import { getUserStartupWarnings } from './utils/userStartupWarnings.js';
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
import { runNonInteractive } from './nonInteractiveCli.js';
import { loadExtensions } from './config/extension.js';
import { cleanupCheckpoints, registerCleanup } from './utils/cleanup.js';
import { getCliVersion } from './utils/version.js';
import {
Config,
sessionId,
logUserPrompt,
AuthType,
FatalConfigError,
getOauthClient,
logIdeConnection,
IdeConnectionEvent,
IdeConnectionType,
logIdeConnection,
logUserPrompt,
sessionId,
} from '@qwen-code/qwen-code-core';
import { render } from 'ink';
import { spawn } from 'node:child_process';
import dns from 'node:dns';
import os from 'node:os';
import { basename } from 'node:path';
import v8 from 'node:v8';
import React from 'react';
import { validateAuthMethod } from './config/auth.js';
import { loadCliConfig, parseArguments } from './config/config.js';
import { loadExtensions } from './config/extension.js';
import type { DnsResolutionOrder, LoadedSettings } from './config/settings.js';
import { loadSettings, SettingScope } from './config/settings.js';
import { runNonInteractive } from './nonInteractiveCli.js';
import { AppWrapper } from './ui/App.js';
import { setMaxSizedBoxDebugging } from './ui/components/shared/MaxSizedBox.js';
import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js';
import { SettingsContext } from './ui/contexts/SettingsContext.js';
import { themeManager } from './ui/themes/theme-manager.js';
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
import { detectAndEnableKittyProtocol } from './ui/utils/kittyProtocolDetector.js';
import { checkForUpdates } from './ui/utils/updateCheck.js';
import { cleanupCheckpoints, registerCleanup } from './utils/cleanup.js';
import { AppEvent, appEvents } from './utils/events.js';
import { handleAutoUpdate } from './utils/handleAutoUpdate.js';
import { appEvents, AppEvent } from './utils/events.js';
import { SettingsContext } from './ui/contexts/SettingsContext.js';
import { readStdin } from './utils/readStdin.js';
import { start_sandbox } from './utils/sandbox.js';
import { getStartupWarnings } from './utils/startupWarnings.js';
import { getUserStartupWarnings } from './utils/userStartupWarnings.js';
import { getCliVersion } from './utils/version.js';
import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js';
import { runZedIntegration } from './zed-integration/zedIntegration.js';
export function validateDnsResolutionOrder(
order: string | undefined,
@@ -108,7 +106,6 @@ async function relaunchWithAdditionalArgs(additionalArgs: string[]) {
await new Promise((resolve) => child.on('close', resolve));
process.exit(0);
}
import { runZedIntegration } from './zed-integration/zedIntegration.js';
export function setupUnhandledRejectionHandler() {
let unhandledRejectionOccurred = false;
@@ -132,6 +129,44 @@ ${reason.stack}`
});
}
export async function startInteractiveUI(
config: Config,
settings: LoadedSettings,
startupWarnings: string[],
workspaceRoot: string,
) {
const version = await getCliVersion();
// Detect and enable Kitty keyboard protocol once at startup
await detectAndEnableKittyProtocol();
setWindowTitle(basename(workspaceRoot), settings);
const instance = render(
<React.StrictMode>
<SettingsContext.Provider value={settings}>
<AppWrapper
config={config}
settings={settings}
startupWarnings={startupWarnings}
version={version}
/>
</SettingsContext.Provider>
</React.StrictMode>,
{ exitOnCtrlC: false, isScreenReaderEnabled: config.getScreenReader() },
);
checkForUpdates()
.then((info) => {
handleAutoUpdate(info, settings, config.getProjectRoot());
})
.catch((err) => {
// Silently ignore update check errors.
if (config.getDebugMode()) {
console.error('Update check failed:', err);
}
});
registerCleanup(() => instance.unmount());
}
export async function main() {
setupUnhandledRejectionHandler();
const workspaceRoot = process.cwd();
@@ -139,18 +174,15 @@ export async function main() {
await cleanupCheckpoints();
if (settings.errors.length > 0) {
for (const error of settings.errors) {
let errorMessage = `Error in ${error.path}: ${error.message}`;
if (!process.env['NO_COLOR']) {
errorMessage = `\x1b[31m${errorMessage}\x1b[0m`;
}
console.error(errorMessage);
console.error(`Please fix ${error.path} and try again.`);
}
process.exit(1);
const errorMessages = settings.errors.map(
(error) => `Error in ${error.path}: ${error.message}`,
);
throw new FatalConfigError(
`${errorMessages.join('\n')}\nPlease fix the configuration file(s) and try again.`,
);
}
const argv = await parseArguments();
const argv = await parseArguments(settings.merged);
const extensions = loadExtensions(workspaceRoot);
const config = await loadCliConfig(
settings.merged,
@@ -167,7 +199,7 @@ export async function main() {
registerCleanup(consolePatcher.cleanup);
dns.setDefaultResultOrder(
validateDnsResolutionOrder(settings.merged.dnsResolutionOrder),
validateDnsResolutionOrder(settings.merged.advanced?.dnsResolutionOrder),
);
if (argv.promptInteractive && !process.stdin.isTTY) {
@@ -186,7 +218,7 @@ export async function main() {
}
// Set a default auth type if one isn't set.
if (!settings.merged.selectedAuthType) {
if (!settings.merged.security?.auth?.selectedType) {
if (process.env['CLOUD_SHELL'] === 'true') {
settings.setValue(
SettingScope.User,
@@ -195,6 +227,14 @@ export async function main() {
);
}
}
// Empty key causes issues with the GoogleGenAI package.
if (process.env['GEMINI_API_KEY']?.trim() === '') {
delete process.env['GEMINI_API_KEY'];
}
if (process.env['GOOGLE_API_KEY']?.trim() === '') {
delete process.env['GOOGLE_API_KEY'];
}
setMaxSizedBoxDebugging(config.getDebugMode());
@@ -206,40 +246,72 @@ export async function main() {
}
// Load custom themes from settings
themeManager.loadCustomThemes(settings.merged.customThemes);
themeManager.loadCustomThemes(settings.merged.ui?.customThemes);
if (settings.merged.theme) {
if (!themeManager.setActiveTheme(settings.merged.theme)) {
if (settings.merged.ui?.theme) {
if (!themeManager.setActiveTheme(settings.merged.ui?.theme)) {
// If the theme is not found during initial load, log a warning and continue.
// The useThemeCommand hook in App.tsx will handle opening the dialog.
console.warn(`Warning: Theme "${settings.merged.theme}" not found.`);
console.warn(`Warning: Theme "${settings.merged.ui?.theme}" not found.`);
}
}
// hop into sandbox if we are outside and sandboxing is enabled
if (!process.env['SANDBOX']) {
const memoryArgs = settings.merged.autoConfigureMaxOldSpaceSize
const memoryArgs = settings.merged.advanced?.autoConfigureMemory
? getNodeMemoryArgs(config)
: [];
const sandboxConfig = config.getSandbox();
if (sandboxConfig) {
if (
settings.merged.selectedAuthType &&
!settings.merged.useExternalAuth
settings.merged.security?.auth?.selectedType &&
!settings.merged.security?.auth?.useExternal
) {
// Validate authentication here because the sandbox will interfere with the Oauth2 web redirect.
try {
const err = validateAuthMethod(settings.merged.selectedAuthType);
const err = validateAuthMethod(
settings.merged.security.auth.selectedType,
);
if (err) {
throw new Error(err);
}
await config.refreshAuth(settings.merged.selectedAuthType);
await config.refreshAuth(settings.merged.security.auth.selectedType);
} catch (err) {
console.error('Error authenticating:', err);
process.exit(1);
}
}
await start_sandbox(sandboxConfig, memoryArgs, config);
let stdinData = '';
if (!process.stdin.isTTY) {
stdinData = await readStdin();
}
// This function is a copy of the one from sandbox.ts
// It is moved here to decouple sandbox.ts from the CLI's argument structure.
const injectStdinIntoArgs = (
args: string[],
stdinData?: string,
): string[] => {
const finalArgs = [...args];
if (stdinData) {
const promptIndex = finalArgs.findIndex(
(arg) => arg === '--prompt' || arg === '-p',
);
if (promptIndex > -1 && finalArgs.length > promptIndex + 1) {
// If there's a prompt argument, prepend stdin to it
finalArgs[promptIndex + 1] =
`${stdinData}\n\n${finalArgs[promptIndex + 1]}`;
} else {
// If there's no prompt argument, add stdin as the prompt
finalArgs.push('--prompt', stdinData);
}
}
return finalArgs;
};
const sandboxArgs = injectStdinIntoArgs(process.argv, stdinData);
await start_sandbox(sandboxConfig, memoryArgs, config, sandboxArgs);
process.exit(0);
} else {
// Not in a sandbox and not entering one, so relaunch with additional
@@ -252,11 +324,12 @@ export async function main() {
}
if (
settings.merged.selectedAuthType === AuthType.LOGIN_WITH_GOOGLE &&
settings.merged.security?.auth?.selectedType ===
AuthType.LOGIN_WITH_GOOGLE &&
config.isBrowserLaunchSuppressed()
) {
// Do oauth before app renders to make copying the link possible.
await getOauthClient(settings.merged.selectedAuthType, config);
await getOauthClient(settings.merged.security.auth.selectedType, config);
}
if (config.getExperimentalZedIntegration()) {
@@ -271,36 +344,7 @@ export async function main() {
// Render UI, passing necessary config values. Check that there is no command line question.
if (config.isInteractive()) {
const version = await getCliVersion();
// Detect and enable Kitty keyboard protocol once at startup
await detectAndEnableKittyProtocol();
setWindowTitle(basename(workspaceRoot), settings);
const instance = render(
<React.StrictMode>
<SettingsContext.Provider value={settings}>
<AppWrapper
config={config}
settings={settings}
startupWarnings={startupWarnings}
version={version}
/>
</SettingsContext.Provider>
</React.StrictMode>,
{ exitOnCtrlC: false },
);
checkForUpdates()
.then((info) => {
handleAutoUpdate(info, settings, config.getProjectRoot());
})
.catch((err) => {
// Silently ignore update check errors.
if (config.getDebugMode()) {
console.error('Update check failed:', err);
}
});
registerCleanup(() => instance.unmount());
await startInteractiveUI(config, settings, startupWarnings, workspaceRoot);
return;
}
// If not a TTY, read from stdin
@@ -312,7 +356,9 @@ export async function main() {
}
}
if (!input) {
console.error('No input provided via stdin.');
console.error(
`No input provided via stdin. Input can be provided by piping data into gemini or using the --prompt option.`,
);
process.exit(1);
}
@@ -327,17 +373,21 @@ export async function main() {
});
const nonInteractiveConfig = await validateNonInteractiveAuth(
settings.merged.selectedAuthType,
settings.merged.useExternalAuth,
settings.merged.security?.auth?.selectedType,
settings.merged.security?.auth?.useExternal,
config,
);
if (config.getDebugMode()) {
console.log('Session ID: %s', sessionId);
}
await runNonInteractive(nonInteractiveConfig, input, prompt_id);
process.exit(0);
}
function setWindowTitle(title: string, settings: LoadedSettings) {
if (!settings.merged.hideWindowTitle) {
if (!settings.merged.ui?.hideWindowTitle) {
const windowTitle = (process.env['CLI_TITLE'] || `Qwen - ${title}`).replace(
// eslint-disable-next-line no-control-regex
/[\x00-\x1F\x7F]/g,

View File

@@ -5,19 +5,20 @@
*/
import {
Config,
type Config,
type ToolRegistry,
executeToolCall,
ToolRegistry,
ToolErrorType,
shutdownTelemetry,
GeminiEventType,
ServerGeminiStreamEvent,
type ServerGeminiStreamEvent,
} from '@qwen-code/qwen-code-core';
import { Part } from '@google/genai';
import { type Part } from '@google/genai';
import { runNonInteractive } from './nonInteractiveCli.js';
import { vi } from 'vitest';
// Mock core modules
vi.mock('./ui/hooks/atCommandProcessor.js');
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const original =
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
@@ -35,20 +36,16 @@ describe('runNonInteractive', () => {
let mockCoreExecuteToolCall: vi.Mock;
let mockShutdownTelemetry: vi.Mock;
let consoleErrorSpy: vi.SpyInstance;
let processExitSpy: vi.SpyInstance;
let processStdoutSpy: vi.SpyInstance;
let mockGeminiClient: {
sendMessageStream: vi.Mock;
};
beforeEach(() => {
beforeEach(async () => {
mockCoreExecuteToolCall = vi.mocked(executeToolCall);
mockShutdownTelemetry = vi.mocked(shutdownTelemetry);
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
processExitSpy = vi
.spyOn(process, 'exit')
.mockImplementation((() => {}) as (code?: number) => never);
processStdoutSpy = vi
.spyOn(process.stdout, 'write')
.mockImplementation(() => true);
@@ -72,6 +69,14 @@ describe('runNonInteractive', () => {
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
getDebugMode: vi.fn().mockReturnValue(false),
} as unknown as Config;
const { handleAtCommand } = await import(
'./ui/hooks/atCommandProcessor.js'
);
vi.mocked(handleAtCommand).mockImplementation(async ({ query }) => ({
processedQuery: [{ text: query }],
shouldProceed: true,
}));
});
afterEach(() => {
@@ -163,14 +168,16 @@ describe('runNonInteractive', () => {
mockCoreExecuteToolCall.mockResolvedValue({
error: new Error('Execution failed'),
errorType: ToolErrorType.EXECUTION_FAILED,
responseParts: {
functionResponse: {
name: 'errorTool',
response: {
output: 'Error: Execution failed',
responseParts: [
{
functionResponse: {
name: 'errorTool',
response: {
output: 'Error: Execution failed',
},
},
},
},
],
resultDisplay: 'Execution failed',
});
const finalResponse: ServerGeminiStreamEvent[] = [
@@ -189,7 +196,6 @@ describe('runNonInteractive', () => {
expect(consoleErrorSpy).toHaveBeenCalledWith(
'Error executing tool errorTool: Execution failed',
);
expect(processExitSpy).not.toHaveBeenCalled();
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
expect(mockGeminiClient.sendMessageStream).toHaveBeenNthCalledWith(
2,
@@ -215,12 +221,9 @@ describe('runNonInteractive', () => {
throw apiError;
});
await runNonInteractive(mockConfig, 'Initial fail', 'prompt-id-4');
expect(consoleErrorSpy).toHaveBeenCalledWith(
'[API Error: API connection failed]',
);
expect(processExitSpy).toHaveBeenCalledWith(1);
await expect(
runNonInteractive(mockConfig, 'Initial fail', 'prompt-id-4'),
).rejects.toThrow(apiError);
});
it('should not exit if a tool is not found, and should send error back to model', async () => {
@@ -259,7 +262,6 @@ describe('runNonInteractive', () => {
expect(consoleErrorSpy).toHaveBeenCalledWith(
'Error executing tool nonexistentTool: Tool "nonexistentTool" not found in registry.',
);
expect(processExitSpy).not.toHaveBeenCalled();
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledTimes(2);
expect(processStdoutSpy).toHaveBeenCalledWith(
"Sorry, I can't find that tool.",
@@ -268,9 +270,54 @@ describe('runNonInteractive', () => {
it('should exit when max session turns are exceeded', async () => {
vi.mocked(mockConfig.getMaxSessionTurns).mockReturnValue(0);
await runNonInteractive(mockConfig, 'Trigger loop', 'prompt-id-6');
expect(consoleErrorSpy).toHaveBeenCalledWith(
'\n Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
await expect(
runNonInteractive(mockConfig, 'Trigger loop', 'prompt-id-6'),
).rejects.toThrow(
'Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
);
});
it('should preprocess @include commands before sending to the model', async () => {
// 1. Mock the imported atCommandProcessor
const { handleAtCommand } = await import(
'./ui/hooks/atCommandProcessor.js'
);
const mockHandleAtCommand = vi.mocked(handleAtCommand);
// 2. Define the raw input and the expected processed output
const rawInput = 'Summarize @file.txt';
const processedParts: Part[] = [
{ text: 'Summarize @file.txt' },
{ text: '\n--- Content from referenced files ---\n' },
{ text: 'This is the content of the file.' },
{ text: '\n--- End of content ---' },
];
// 3. Setup the mock to return the processed parts
mockHandleAtCommand.mockResolvedValue({
processedQuery: processedParts,
shouldProceed: true,
});
// Mock a simple stream response from the Gemini client
const events: ServerGeminiStreamEvent[] = [
{ type: GeminiEventType.Content, value: 'Summary complete.' },
];
mockGeminiClient.sendMessageStream.mockReturnValue(
createStreamFromEvents(events),
);
// 4. Run the non-interactive mode with the raw input
await runNonInteractive(mockConfig, rawInput, 'prompt-id-7');
// 5. Assert that sendMessageStream was called with the PROCESSED parts, not the raw input
expect(mockGeminiClient.sendMessageStream).toHaveBeenCalledWith(
processedParts,
expect.any(AbortSignal),
'prompt-id-7',
);
// 6. Assert the final output is correct
expect(processStdoutSpy).toHaveBeenCalledWith('Summary complete.');
});
});

View File

@@ -4,18 +4,20 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config, ToolCallRequestInfo } from '@qwen-code/qwen-code-core';
import {
Config,
ToolCallRequestInfo,
executeToolCall,
shutdownTelemetry,
isTelemetrySdkInitialized,
GeminiEventType,
parseAndFormatApiError,
FatalInputError,
FatalTurnLimitedError,
} from '@qwen-code/qwen-code-core';
import { Content, Part, FunctionCall } from '@google/genai';
import type { Content, Part } from '@google/genai';
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
import { handleAtCommand } from './ui/hooks/atCommandProcessor.js';
export async function runNonInteractive(
config: Config,
@@ -40,9 +42,28 @@ export async function runNonInteractive(
const geminiClient = config.getGeminiClient();
const abortController = new AbortController();
const { processedQuery, shouldProceed } = await handleAtCommand({
query: input,
config,
addItem: (_item, _timestamp) => 0,
onDebugMessage: () => {},
messageId: Date.now(),
signal: abortController.signal,
});
if (!shouldProceed || !processedQuery) {
// An error occurred during @include processing (e.g., file not found).
// The error message is already logged by handleAtCommand.
throw new FatalInputError(
'Exiting due to an error processing the @ command.',
);
}
let currentMessages: Content[] = [
{ role: 'user', parts: [{ text: input }] },
{ role: 'user', parts: processedQuery as Part[] },
];
let turnCount = 0;
while (true) {
turnCount++;
@@ -50,12 +71,11 @@ export async function runNonInteractive(
config.getMaxSessionTurns() >= 0 &&
turnCount > config.getMaxSessionTurns()
) {
console.error(
'\n Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
throw new FatalTurnLimitedError(
'Reached max session turns for this session. Increase the number of turns by specifying maxSessionTurns in settings.json.',
);
return;
}
const functionCalls: FunctionCall[] = [];
const toolCallRequests: ToolCallRequestInfo[] = [];
const responseStream = geminiClient.sendMessageStream(
currentMessages[0]?.parts || [],
@@ -72,29 +92,13 @@ export async function runNonInteractive(
if (event.type === GeminiEventType.Content) {
process.stdout.write(event.value);
} else if (event.type === GeminiEventType.ToolCallRequest) {
const toolCallRequest = event.value;
const fc: FunctionCall = {
name: toolCallRequest.name,
args: toolCallRequest.args,
id: toolCallRequest.callId,
};
functionCalls.push(fc);
toolCallRequests.push(event.value);
}
}
if (functionCalls.length > 0) {
if (toolCallRequests.length > 0) {
const toolResponseParts: Part[] = [];
for (const fc of functionCalls) {
const callId = fc.id ?? `${fc.name}-${Date.now()}`;
const requestInfo: ToolCallRequestInfo = {
callId,
name: fc.name as string,
args: (fc.args ?? {}) as Record<string, unknown>,
isClientInitiated: false,
prompt_id,
};
for (const requestInfo of toolCallRequests) {
const toolResponse = await executeToolCall(
config,
requestInfo,
@@ -103,21 +107,12 @@ export async function runNonInteractive(
if (toolResponse.error) {
console.error(
`Error executing tool ${fc.name}: ${toolResponse.resultDisplay || toolResponse.error.message}`,
`Error executing tool ${requestInfo.name}: ${toolResponse.resultDisplay || toolResponse.error.message}`,
);
}
if (toolResponse.responseParts) {
const parts = Array.isArray(toolResponse.responseParts)
? toolResponse.responseParts
: [toolResponse.responseParts];
for (const part of parts) {
if (typeof part === 'string') {
toolResponseParts.push({ text: part });
} else if (part) {
toolResponseParts.push(part);
}
}
toolResponseParts.push(...toolResponse.responseParts);
}
}
currentMessages = [{ role: 'user', parts: toolResponseParts }];
@@ -133,7 +128,7 @@ export async function runNonInteractive(
config.getContentGeneratorConfig()?.authType,
),
);
process.exit(1);
throw error;
} finally {
consolePatcher.cleanup();
if (isTelemetrySdkInitialized()) {

View File

@@ -22,7 +22,7 @@ vi.mock('../ui/commands/restoreCommand.js', () => ({
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { BuiltinCommandLoader } from './BuiltinCommandLoader.js';
import { Config } from '@qwen-code/qwen-code-core';
import type { Config } from '@qwen-code/qwen-code-core';
import { CommandKind } from '../ui/commands/types.js';
import { ideCommand } from '../ui/commands/ideCommand.js';

View File

@@ -4,9 +4,9 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { ICommandLoader } from './types.js';
import { SlashCommand } from '../ui/commands/types.js';
import { Config } from '@qwen-code/qwen-code-core';
import type { ICommandLoader } from './types.js';
import type { SlashCommand } from '../ui/commands/types.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { aboutCommand } from '../ui/commands/aboutCommand.js';
import { authCommand } from '../ui/commands/authCommand.js';
import { bugCommand } from '../ui/commands/bugCommand.js';

View File

@@ -4,8 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { SlashCommand } from '../ui/commands/types.js';
import { ICommandLoader } from './types.js';
import type { SlashCommand } from '../ui/commands/types.js';
import type { ICommandLoader } from './types.js';
/**
* Orchestrates the discovery and loading of all slash commands for the CLI.

View File

@@ -5,11 +5,8 @@
*/
import * as path from 'node:path';
import {
Config,
getProjectCommandsDir,
getUserCommandsDir,
} from '@qwen-code/qwen-code-core';
import type { Config } from '@qwen-code/qwen-code-core';
import { Storage } from '@qwen-code/qwen-code-core';
import mock from 'mock-fs';
import { FileCommandLoader } from './FileCommandLoader.js';
import { assert, vi } from 'vitest';
@@ -17,15 +14,23 @@ import { createMockCommandContext } from '../test-utils/mockCommandContext.js';
import {
SHELL_INJECTION_TRIGGER,
SHORTHAND_ARGS_PLACEHOLDER,
type PromptPipelineContent,
} from './prompt-processors/types.js';
import {
ConfirmationRequiredError,
ShellProcessor,
} from './prompt-processors/shellProcessor.js';
import { DefaultArgumentProcessor } from './prompt-processors/argumentProcessor.js';
import { CommandContext } from '../ui/commands/types.js';
import type { CommandContext } from '../ui/commands/types.js';
import { AtFileProcessor } from './prompt-processors/atFileProcessor.js';
const mockShellProcess = vi.hoisted(() => vi.fn());
const mockAtFileProcess = vi.hoisted(() => vi.fn());
vi.mock('./prompt-processors/atFileProcessor.js', () => ({
AtFileProcessor: vi.fn().mockImplementation(() => ({
process: mockAtFileProcess,
})),
}));
vi.mock('./prompt-processors/shellProcessor.js', () => ({
ShellProcessor: vi.fn().mockImplementation(() => ({
process: mockShellProcess,
@@ -57,6 +62,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
return {
...original,
Storage: original.Storage,
isCommandAllowed: vi.fn(),
ShellExecutionService: {
execute: vi.fn(),
@@ -70,15 +76,28 @@ describe('FileCommandLoader', () => {
beforeEach(() => {
vi.clearAllMocks();
mockShellProcess.mockImplementation(
(prompt: string, context: CommandContext) => {
(prompt: PromptPipelineContent, context: CommandContext) => {
const userArgsRaw = context?.invocation?.args || '';
const processedPrompt = prompt.replaceAll(
// This is a simplified mock. A real implementation would need to iterate
// through all parts and process only the text parts.
const firstTextPart = prompt.find(
(p) => typeof p === 'string' || 'text' in p,
);
let textContent = '';
if (typeof firstTextPart === 'string') {
textContent = firstTextPart;
} else if (firstTextPart && 'text' in firstTextPart) {
textContent = firstTextPart.text ?? '';
}
const processedText = textContent.replaceAll(
SHORTHAND_ARGS_PLACEHOLDER,
userArgsRaw,
);
return Promise.resolve(processedPrompt);
return Promise.resolve([{ text: processedText }]);
},
);
mockAtFileProcess.mockImplementation(async (prompt: string) => prompt);
});
afterEach(() => {
@@ -86,7 +105,7 @@ describe('FileCommandLoader', () => {
});
it('loads a single command from a file', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "This is a test prompt"',
@@ -112,7 +131,7 @@ describe('FileCommandLoader', () => {
'',
);
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('This is a test prompt');
expect(result.content).toEqual([{ text: 'This is a test prompt' }]);
} else {
assert.fail('Incorrect action type');
}
@@ -127,7 +146,7 @@ describe('FileCommandLoader', () => {
itif(process.platform !== 'win32')(
'loads commands from a symlinked directory',
async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
const realCommandsDir = '/real/commands';
mock({
[realCommandsDir]: {
@@ -152,7 +171,7 @@ describe('FileCommandLoader', () => {
itif(process.platform !== 'win32')(
'loads commands from a symlinked subdirectory',
async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
const realNamespacedDir = '/real/namespaced-commands';
mock({
[userCommandsDir]: {
@@ -176,7 +195,7 @@ describe('FileCommandLoader', () => {
);
it('loads multiple commands', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test1.toml': 'prompt = "Prompt 1"',
@@ -191,7 +210,7 @@ describe('FileCommandLoader', () => {
});
it('creates deeply nested namespaces correctly', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
@@ -213,7 +232,7 @@ describe('FileCommandLoader', () => {
});
it('creates namespaces from nested directories', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
git: {
@@ -232,8 +251,10 @@ describe('FileCommandLoader', () => {
});
it('returns both user and project commands in order', async () => {
const userCommandsDir = getUserCommandsDir();
const projectCommandsDir = getProjectCommandsDir(process.cwd());
const userCommandsDir = Storage.getUserCommandsDir();
const projectCommandsDir = new Storage(
process.cwd(),
).getProjectCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "User prompt"',
@@ -262,7 +283,7 @@ describe('FileCommandLoader', () => {
'',
);
if (userResult?.type === 'submit_prompt') {
expect(userResult.content).toBe('User prompt');
expect(userResult.content).toEqual([{ text: 'User prompt' }]);
} else {
assert.fail('Incorrect action type for user command');
}
@@ -277,14 +298,14 @@ describe('FileCommandLoader', () => {
'',
);
if (projectResult?.type === 'submit_prompt') {
expect(projectResult.content).toBe('Project prompt');
expect(projectResult.content).toEqual([{ text: 'Project prompt' }]);
} else {
assert.fail('Incorrect action type for project command');
}
});
it('ignores files with TOML syntax errors', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'invalid.toml': 'this is not valid toml',
@@ -300,7 +321,7 @@ describe('FileCommandLoader', () => {
});
it('ignores files that are semantically invalid (missing prompt)', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'no_prompt.toml': 'description = "This file is missing a prompt"',
@@ -316,7 +337,7 @@ describe('FileCommandLoader', () => {
});
it('handles filename edge cases correctly', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.v1.toml': 'prompt = "Test prompt"',
@@ -338,7 +359,7 @@ describe('FileCommandLoader', () => {
});
it('uses a default description if not provided', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "Test prompt"',
@@ -353,7 +374,7 @@ describe('FileCommandLoader', () => {
});
it('uses the provided description', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'test.toml': 'prompt = "Test prompt"\ndescription = "My test command"',
@@ -368,7 +389,7 @@ describe('FileCommandLoader', () => {
});
it('should sanitize colons in filenames to prevent namespace conflicts', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'legacy:command.toml': 'prompt = "This is a legacy command"',
@@ -388,7 +409,7 @@ describe('FileCommandLoader', () => {
describe('Processor Instantiation Logic', () => {
it('instantiates only DefaultArgumentProcessor if no {{args}} or !{} are present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'simple.toml': `prompt = "Just a regular prompt"`,
@@ -403,7 +424,7 @@ describe('FileCommandLoader', () => {
});
it('instantiates only ShellProcessor if {{args}} is present (but not !{})', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'args.toml': `prompt = "Prompt with {{args}}"`,
@@ -418,7 +439,7 @@ describe('FileCommandLoader', () => {
});
it('instantiates ShellProcessor and DefaultArgumentProcessor if !{} is present (but not {{args}})', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Prompt with !{cmd}"`,
@@ -433,7 +454,7 @@ describe('FileCommandLoader', () => {
});
it('instantiates only ShellProcessor if both {{args}} and !{} are present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'both.toml': `prompt = "Prompt with {{args}} and !{cmd}"`,
@@ -446,12 +467,62 @@ describe('FileCommandLoader', () => {
expect(ShellProcessor).toHaveBeenCalledTimes(1);
expect(DefaultArgumentProcessor).not.toHaveBeenCalled();
});
it('instantiates AtFileProcessor and DefaultArgumentProcessor if @{} is present', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'at-file.toml': `prompt = "Context: @{./my-file.txt}"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(AtFileProcessor).toHaveBeenCalledTimes(1);
expect(ShellProcessor).not.toHaveBeenCalled();
expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1);
});
it('instantiates ShellProcessor and AtFileProcessor if !{} and @{} are present', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell-and-at.toml': `prompt = "Run !{cmd} with @{file.txt}"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(ShellProcessor).toHaveBeenCalledTimes(1);
expect(AtFileProcessor).toHaveBeenCalledTimes(1);
expect(DefaultArgumentProcessor).toHaveBeenCalledTimes(1); // because no {{args}}
});
it('instantiates only ShellProcessor and AtFileProcessor if {{args}} and @{} are present', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'args-and-at.toml': `prompt = "Run {{args}} with @{file.txt}"`,
},
});
const loader = new FileCommandLoader(null as unknown as Config);
await loader.loadCommands(signal);
expect(ShellProcessor).toHaveBeenCalledTimes(1);
expect(AtFileProcessor).toHaveBeenCalledTimes(1);
expect(DefaultArgumentProcessor).not.toHaveBeenCalled();
});
});
describe('Extension Command Loading', () => {
it('loads commands from active extensions', async () => {
const userCommandsDir = getUserCommandsDir();
const projectCommandsDir = getProjectCommandsDir(process.cwd());
const userCommandsDir = Storage.getUserCommandsDir();
const projectCommandsDir = new Storage(
process.cwd(),
).getProjectCommandsDir();
const extensionDir = path.join(
process.cwd(),
'.gemini/extensions/test-ext',
@@ -499,8 +570,10 @@ describe('FileCommandLoader', () => {
});
it('extension commands have extensionName metadata for conflict resolution', async () => {
const userCommandsDir = getUserCommandsDir();
const projectCommandsDir = getProjectCommandsDir(process.cwd());
const userCommandsDir = Storage.getUserCommandsDir();
const projectCommandsDir = new Storage(
process.cwd(),
).getProjectCommandsDir();
const extensionDir = path.join(
process.cwd(),
'.gemini/extensions/test-ext',
@@ -555,7 +628,7 @@ describe('FileCommandLoader', () => {
);
expect(result0?.type).toBe('submit_prompt');
if (result0?.type === 'submit_prompt') {
expect(result0.content).toBe('User deploy command');
expect(result0.content).toEqual([{ text: 'User deploy command' }]);
}
expect(commands[1].name).toBe('deploy');
@@ -572,7 +645,7 @@ describe('FileCommandLoader', () => {
);
expect(result1?.type).toBe('submit_prompt');
if (result1?.type === 'submit_prompt') {
expect(result1.content).toBe('Project deploy command');
expect(result1.content).toEqual([{ text: 'Project deploy command' }]);
}
expect(commands[2].name).toBe('deploy');
@@ -590,7 +663,7 @@ describe('FileCommandLoader', () => {
);
expect(result2?.type).toBe('submit_prompt');
if (result2?.type === 'submit_prompt') {
expect(result2.content).toBe('Extension deploy command');
expect(result2.content).toEqual([{ text: 'Extension deploy command' }]);
}
});
@@ -733,7 +806,9 @@ describe('FileCommandLoader', () => {
'',
);
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('Nested command from extension a');
expect(result.content).toEqual([
{ text: 'Nested command from extension a' },
]);
} else {
assert.fail('Incorrect action type');
}
@@ -742,7 +817,7 @@ describe('FileCommandLoader', () => {
describe('Argument Handling Integration (via ShellProcessor)', () => {
it('correctly processes a command with {{args}}', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shorthand.toml':
@@ -767,14 +842,16 @@ describe('FileCommandLoader', () => {
);
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('The user wants to: do something cool');
expect(result.content).toEqual([
{ text: 'The user wants to: do something cool' },
]);
}
});
});
describe('Default Argument Processor Integration', () => {
it('correctly processes a command without {{args}}', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'model_led.toml':
@@ -801,14 +878,14 @@ describe('FileCommandLoader', () => {
if (result?.type === 'submit_prompt') {
const expectedContent =
'This is the instruction.\n\n/model_led 1.2.0 added "a feature"';
expect(result.content).toBe(expectedContent);
expect(result.content).toEqual([{ text: expectedContent }]);
}
});
});
describe('Shell Processor Integration', () => {
it('instantiates ShellProcessor if {{args}} is present (even without shell trigger)', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'args_only.toml': `prompt = "Hello {{args}}"`,
@@ -821,7 +898,7 @@ describe('FileCommandLoader', () => {
expect(ShellProcessor).toHaveBeenCalledWith('args_only');
});
it('instantiates ShellProcessor if the trigger is present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run this: ${SHELL_INJECTION_TRIGGER}echo hello}"`,
@@ -835,7 +912,7 @@ describe('FileCommandLoader', () => {
});
it('does not instantiate ShellProcessor if no triggers ({{args}} or !{}) are present', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'regular.toml': `prompt = "Just a regular prompt"`,
@@ -849,13 +926,13 @@ describe('FileCommandLoader', () => {
});
it('returns a "submit_prompt" action if shell processing succeeds', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run !{echo 'hello'}"`,
},
});
mockShellProcess.mockResolvedValue('Run hello');
mockShellProcess.mockResolvedValue([{ text: 'Run hello' }]);
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
@@ -871,12 +948,12 @@ describe('FileCommandLoader', () => {
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toBe('Run hello');
expect(result.content).toEqual([{ text: 'Run hello' }]);
}
});
it('returns a "confirm_shell_commands" action if shell processing requires it', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
const rawInvocation = '/shell rm -rf /';
mock({
[userCommandsDir]: {
@@ -910,7 +987,7 @@ describe('FileCommandLoader', () => {
});
it('re-throws other errors from the processor', async () => {
const userCommandsDir = getUserCommandsDir();
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'shell.toml': `prompt = "Run !{something}"`,
@@ -934,23 +1011,36 @@ describe('FileCommandLoader', () => {
),
).rejects.toThrow('Something else went wrong');
});
it('assembles the processor pipeline in the correct order (Shell -> Default)', async () => {
const userCommandsDir = getUserCommandsDir();
it('assembles the processor pipeline in the correct order (AtFile -> Shell -> Default)', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
// This prompt uses !{} but NOT {{args}}, so both processors should be active.
// This prompt uses !{}, @{}, but NOT {{args}}, so all processors should be active.
'pipeline.toml': `
prompt = "Shell says: ${SHELL_INJECTION_TRIGGER}echo foo}."
prompt = "Shell says: !{echo foo}. File says: @{./bar.txt}"
`,
},
'./bar.txt': 'bar content',
});
const defaultProcessMock = vi
.fn()
.mockImplementation((p) => Promise.resolve(`${p}-default-processed`));
.mockImplementation((p: PromptPipelineContent) =>
Promise.resolve([
{ text: `${(p[0] as { text: string }).text}-default-processed` },
]),
);
mockShellProcess.mockImplementation((p) =>
Promise.resolve(`${p}-shell-processed`),
mockShellProcess.mockImplementation((p: PromptPipelineContent) =>
Promise.resolve([
{ text: `${(p[0] as { text: string }).text}-shell-processed` },
]),
);
mockAtFileProcess.mockImplementation((p: PromptPipelineContent) =>
Promise.resolve([
{ text: `${(p[0] as { text: string }).text}-at-file-processed` },
]),
);
vi.mocked(DefaultArgumentProcessor).mockImplementation(
@@ -968,35 +1058,115 @@ describe('FileCommandLoader', () => {
const result = await command!.action!(
createMockCommandContext({
invocation: {
raw: '/pipeline bar',
raw: '/pipeline baz',
name: 'pipeline',
args: 'bar',
args: 'baz',
},
}),
'bar',
'baz',
);
expect(mockAtFileProcess.mock.invocationCallOrder[0]).toBeLessThan(
mockShellProcess.mock.invocationCallOrder[0],
);
expect(mockShellProcess.mock.invocationCallOrder[0]).toBeLessThan(
defaultProcessMock.mock.invocationCallOrder[0],
);
// Verify the flow of the prompt through the processors
// 1. Shell processor runs first
expect(mockShellProcess).toHaveBeenCalledWith(
expect.stringContaining(SHELL_INJECTION_TRIGGER),
// 1. AtFile processor runs first
expect(mockAtFileProcess).toHaveBeenCalledWith(
[{ text: expect.stringContaining('@{./bar.txt}') }],
expect.any(Object),
);
// 2. Default processor runs second
// 2. Shell processor runs second
expect(mockShellProcess).toHaveBeenCalledWith(
[{ text: expect.stringContaining('-at-file-processed') }],
expect.any(Object),
);
// 3. Default processor runs third
expect(defaultProcessMock).toHaveBeenCalledWith(
expect.stringContaining('-shell-processed'),
[{ text: expect.stringContaining('-shell-processed') }],
expect.any(Object),
);
if (result?.type === 'submit_prompt') {
expect(result.content).toContain('-shell-processed-default-processed');
const contentAsArray = Array.isArray(result.content)
? result.content
: [result.content];
expect(contentAsArray.length).toBeGreaterThan(0);
const firstPart = contentAsArray[0];
if (typeof firstPart === 'object' && firstPart && 'text' in firstPart) {
expect(firstPart.text).toContain(
'-at-file-processed-shell-processed-default-processed',
);
} else {
assert.fail(
'First part of content is not a text part or is a string',
);
}
} else {
assert.fail('Incorrect action type');
}
});
});
describe('@-file Processor Integration', () => {
it('correctly processes a command with @{file}', async () => {
const userCommandsDir = Storage.getUserCommandsDir();
mock({
[userCommandsDir]: {
'at-file.toml':
'prompt = "Context from file: @{./test.txt}"\ndescription = "@-file test"',
},
'./test.txt': 'file content',
});
mockAtFileProcess.mockImplementation(
async (prompt: PromptPipelineContent) => {
// A simplified mock of AtFileProcessor's behavior
const textContent = (prompt[0] as { text: string }).text;
if (textContent.includes('@{./test.txt}')) {
return [
{
text: textContent.replace('@{./test.txt}', 'file content'),
},
];
}
return prompt;
},
);
// Prevent default processor from interfering
vi.mocked(DefaultArgumentProcessor).mockImplementation(
() =>
({
process: (p: PromptPipelineContent) => Promise.resolve(p),
}) as unknown as DefaultArgumentProcessor,
);
const loader = new FileCommandLoader(null as unknown as Config);
const commands = await loader.loadCommands(signal);
const command = commands.find((c) => c.name === 'at-file');
expect(command).toBeDefined();
const result = await command!.action?.(
createMockCommandContext({
invocation: {
raw: '/at-file',
name: 'at-file',
args: '',
},
}),
'',
);
expect(result?.type).toBe('submit_prompt');
if (result?.type === 'submit_prompt') {
expect(result.content).toEqual([
{ text: 'Context from file: file content' },
]);
}
});
});
});

Some files were not shown because too many files have changed in this diff Show More