diff --git a/.gcp/release-docker.yaml b/.gcp/release-docker.yml similarity index 95% rename from .gcp/release-docker.yaml rename to .gcp/release-docker.yml index 59220b8d..f413da5b 100644 --- a/.gcp/release-docker.yaml +++ b/.gcp/release-docker.yml @@ -22,8 +22,8 @@ steps: id: 'Determine Docker Image Tag' entrypoint: 'bash' args: - - -c - - | + - '-c' + - |- SHELL_TAG_NAME="$TAG_NAME" FINAL_TAG="$SHORT_SHA" # Default to SHA if [[ "$$SHELL_TAG_NAME" == *"-nightly"* ]]; then @@ -44,8 +44,8 @@ steps: id: 'Build sandbox Docker image' entrypoint: 'bash' args: - - -c - - | + - '-c' + - |- export GEMINI_SANDBOX_IMAGE_TAG=$$(cat /workspace/image_tag.txt) echo "Using Docker image tag for build: $$GEMINI_SANDBOX_IMAGE_TAG" npm run build:sandbox -- --output-file /workspace/final_image_uri.txt @@ -57,8 +57,8 @@ steps: id: 'Publish sandbox Docker image' entrypoint: 'bash' args: - - -c - - | + - '-c' + - |- set -e FINAL_IMAGE_URI=$$(cat /workspace/final_image_uri.txt) @@ -68,7 +68,7 @@ steps: - 'GEMINI_SANDBOX=$_CONTAINER_TOOL' options: - defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET + defaultLogsBucketBehavior: 'REGIONAL_USER_OWNED_BUCKET' dynamicSubstitutions: true substitutions: diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index dbaad445..0779e966 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -2,31 +2,31 @@ name: Bug Report description: Report a bug to help us improve Qwen Code labels: ['kind/bug', 'status/need-triage'] body: - - type: markdown + - type: 'markdown' attributes: - value: | - > [!IMPORTANT] + value: |- + > [!IMPORTANT] > Thanks for taking the time to fill out this bug report! > > Please search **[existing issues](https://github.com/QwenLM/qwen-code/issues)** to see if an issue already exists for the bug you encountered. - - type: textarea - id: problem + - type: 'textarea' + id: 'problem' attributes: - label: What happened? - description: A clear and concise description of what the bug is. + label: 'What happened?' + description: 'A clear and concise description of what the bug is.' validations: required: true - - type: textarea - id: expected + - type: 'textarea' + id: 'expected' attributes: - label: What did you expect to happen? + label: 'What did you expect to happen?' validations: required: true - - type: textarea - id: info + - type: 'textarea' + id: 'info' attributes: label: Client information description: Please paste the full text from the `/about` command run from Qwen Code. Also include which platform (macOS, Windows, Linux). @@ -42,14 +42,14 @@ body: validations: required: true - - type: textarea - id: login-info + - type: 'textarea' + id: 'login-info' attributes: label: Login information description: Describe how you are logging in (e.g., API Config). - - type: textarea - id: additional-context + - type: 'textarea' + id: 'additional-context' attributes: - label: Anything else we need to know? - description: Add any other context about the problem here. + label: 'Anything else we need to know?' + description: 'Add any other context about the problem here.' diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index af423290..febece87 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,33 +1,35 @@ -name: Feature Request -description: Suggest an idea for this project -labels: ['kind/enhancement', 'status/need-triage'] +name: 'Feature Request' +description: 'Suggest an idea for this project' +labels: + - 'kind/enhancement' + - 'status/need-triage' body: - - type: markdown + - type: 'markdown' attributes: - value: | - > [!IMPORTANT] + value: |- + > [!IMPORTANT] > Thanks for taking the time to suggest an enhancement! > > Please search **[existing issues](https://github.com/QwenLM/qwen-code/issues)** to see if a similar feature has already been requested. - - type: textarea - id: feature + - type: 'textarea' + id: 'feature' attributes: - label: What would you like to be added? - description: A clear and concise description of the enhancement. + label: 'What would you like to be added?' + description: 'A clear and concise description of the enhancement.' validations: required: true - - type: textarea - id: rationale + - type: 'textarea' + id: 'rationale' attributes: - label: Why is this needed? - description: A clear and concise description of why this enhancement is needed. + label: 'Why is this needed?' + description: 'A clear and concise description of why this enhancement is needed.' validations: required: true - - type: textarea - id: additional-context + - type: 'textarea' + id: 'additional-context' attributes: - label: Additional context - description: Add any other context or screenshots about the feature request here. + label: 'Additional context' + description: 'Add any other context or screenshots about the feature request here.' diff --git a/.github/actions/post-coverage-comment/action.yml b/.github/actions/post-coverage-comment/action.yml index 10a4afeb..6862e6be 100644 --- a/.github/actions/post-coverage-comment/action.yml +++ b/.github/actions/post-coverage-comment/action.yml @@ -27,79 +27,88 @@ inputs: runs: using: 'composite' steps: - - name: Prepare Coverage Comment - id: prep_coverage_comment - shell: bash - run: | - cli_json_file="${{ inputs.cli_json_file }}" - core_json_file="${{ inputs.core_json_file }}" - cli_full_text_summary_file="${{ inputs.cli_full_text_summary_file }}" - core_full_text_summary_file="${{ inputs.core_full_text_summary_file }}" - comment_file="coverage-comment.md" - + - name: 'Prepare Coverage Comment' + id: 'prep_coverage_comment' + shell: 'bash' + env: + CLI_JSON_FILE: '${{ inputs.cli_json_file }}' + CORE_JSON_FILE: '${{ inputs.core_json_file }}' + CLI_FULL_TEXT_SUMMARY_FILE: '${{ inputs.cli_full_text_summary_file }}' + CORE_FULL_TEXT_SUMMARY_FILE: '${{ inputs.core_full_text_summary_file }}' + COMMENT_FILE: 'coverage-comment.md' + NODE_VERSION: '${{ inputs.node_version }}' + OS: '${{ inputs.os }}' + run: |- # Extract percentages using jq for the main table - if [ -f "$cli_json_file" ]; then - cli_lines_pct=$(jq -r '.total.lines.pct' "$cli_json_file") - cli_statements_pct=$(jq -r '.total.statements.pct' "$cli_json_file") - cli_functions_pct=$(jq -r '.total.functions.pct' "$cli_json_file") - cli_branches_pct=$(jq -r '.total.branches.pct' "$cli_json_file") + if [ -f "${CLI_JSON_FILE}" ]; then + cli_lines_pct="$(jq -r '.total.lines.pct' "${CLI_JSON_FILE}")" + cli_statements_pct="$(jq -r '.total.statements.pct' "${CLI_JSON_FILE}")" + cli_functions_pct="$(jq -r '.total.functions.pct' "${CLI_JSON_FILE}")" + cli_branches_pct="$(jq -r '.total.branches.pct' "${CLI_JSON_FILE}")" else - cli_lines_pct="N/A"; cli_statements_pct="N/A"; cli_functions_pct="N/A"; cli_branches_pct="N/A" - echo "CLI coverage-summary.json not found at: $cli_json_file" >&2 # Error to stderr + cli_lines_pct="N/A" + cli_statements_pct="N/A" + cli_functions_pct="N/A" + cli_branches_pct="N/A" + echo "CLI coverage-summary.json not found at: ${CLI_JSON_FILE}" >&2 # Error to stderr fi - if [ -f "$core_json_file" ]; then - core_lines_pct=$(jq -r '.total.lines.pct' "$core_json_file") - core_statements_pct=$(jq -r '.total.statements.pct' "$core_json_file") - core_functions_pct=$(jq -r '.total.functions.pct' "$core_json_file") - core_branches_pct=$(jq -r '.total.branches.pct' "$core_json_file") + if [ -f "${CORE_JSON_FILE}" ]; then + core_lines_pct="$(jq -r '.total.lines.pct' "${CORE_JSON_FILE}")" + core_statements_pct="$(jq -r '.total.statements.pct' "${CORE_JSON_FILE}")" + core_functions_pct="$(jq -r '.total.functions.pct' "${CORE_JSON_FILE}")" + core_branches_pct="$(jq -r '.total.branches.pct' "${CORE_JSON_FILE}")" else - core_lines_pct="N/A"; core_statements_pct="N/A"; core_functions_pct="N/A"; core_branches_pct="N/A" - echo "Core coverage-summary.json not found at: $core_json_file" >&2 # Error to stderr + core_lines_pct="N/A" + core_statements_pct="N/A" + core_functions_pct="N/A" + core_branches_pct="N/A" + echo "Core coverage-summary.json not found at: ${CORE_JSON_FILE}" >&2 # Error to stderr fi - echo "## Code Coverage Summary" > "$comment_file" - echo "" >> "$comment_file" - echo "| Package | Lines | Statements | Functions | Branches |" >> "$comment_file" - echo "|---|---|---|---|---|" >> "$comment_file" - echo "| CLI | ${cli_lines_pct}% | ${cli_statements_pct}% | ${cli_functions_pct}% | ${cli_branches_pct}% |" >> "$comment_file" - echo "| Core | ${core_lines_pct}% | ${core_statements_pct}% | ${core_functions_pct}% | ${core_branches_pct}% |" >> "$comment_file" - echo "" >> "$comment_file" + echo "## Code Coverage Summary" > "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo "| Package | Lines | Statements | Functions | Branches |" >> "${COMMENT_FILE}" + echo "|---|---|---|---|---|" >> "${COMMENT_FILE}" + echo "| CLI | ${cli_lines_pct}% | ${cli_statements_pct}% | ${cli_functions_pct}% | ${cli_branches_pct}% |" >> "${COMMENT_FILE}" + echo "| Core | ${core_lines_pct}% | ${core_statements_pct}% | ${core_functions_pct}% | ${core_branches_pct}% |" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" # CLI Package - Collapsible Section (with full text summary from file) - echo "
" >> "$comment_file" - echo "CLI Package - Full Text Report" >> "$comment_file" - echo "" >> "$comment_file" - echo '```text' >> "$comment_file" - if [ -f "$cli_full_text_summary_file" ]; then - cat "$cli_full_text_summary_file" >> "$comment_file" + echo "
" >> "${COMMENT_FILE}" + echo "CLI Package - Full Text Report" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo '```text' >> "${COMMENT_FILE}" + if [ -f "${CLI_FULL_TEXT_SUMMARY_FILE}" ]; then + cat "${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" else - echo "CLI full-text-summary.txt not found at: $cli_full_text_summary_file" >> "$comment_file" + echo "CLI full-text-summary.txt not found at: ${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" fi - echo '```' >> "$comment_file" - echo "
" >> "$comment_file" - echo "" >> "$comment_file" + echo '```' >> "${COMMENT_FILE}" + echo "
" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" # Core Package - Collapsible Section (with full text summary from file) - echo "
" >> "$comment_file" - echo "Core Package - Full Text Report" >> "$comment_file" - echo "" >> "$comment_file" - echo '```text' >> "$comment_file" - if [ -f "$core_full_text_summary_file" ]; then - cat "$core_full_text_summary_file" >> "$comment_file" + echo "
" >> "${COMMENT_FILE}" + echo "Core Package - Full Text Report" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo '```text' >> "${COMMENT_FILE}" + if [ -f "${CORE_FULL_TEXT_SUMMARY_FILE}" ]; then + cat "${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" else - echo "Core full-text-summary.txt not found at: $core_full_text_summary_file" >> "$comment_file" + echo "Core full-text-summary.txt not found at: ${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" fi - echo '```' >> "$comment_file" - echo "
" >> "$comment_file" - echo "" >> "$comment_file" + echo '```' >> "${COMMENT_FILE}" + echo "
" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" - echo "_For detailed HTML reports, please see the 'coverage-reports-${{ inputs.node_version }}-${{ inputs.os }}' artifact from the main CI run._" >> "$comment_file" + echo "_For detailed HTML reports, please see the 'coverage-reports-${NODE_VERSION}-${OS}' artifact from the main CI run._" >> "${COMMENT_FILE}" - - name: Post Coverage Comment - uses: thollander/actions-comment-pull-request@v3 - if: always() + - name: 'Post Coverage Comment' + uses: 'thollander/actions-comment-pull-request@65f9e5c9a1f2cd378bd74b2e057c9736982a8e74' # ratchet:thollander/actions-comment-pull-request@v3 + if: |- + ${{ always() }} with: - file-path: coverage-comment.md # Use the generated file directly - comment-tag: code-coverage-summary - github-token: ${{ inputs.github_token }} + file-path: 'coverage-comment.md' # Use the generated file directly + comment-tag: 'code-coverage-summary' + github-token: '${{ inputs.github_token }}' diff --git a/.github/scripts/pr-triage.sh b/.github/scripts/pr-triage.sh index 6b60432b..985b3ffc 100755 --- a/.github/scripts/pr-triage.sh +++ b/.github/scripts/pr-triage.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail # Initialize a comma-separated string to hold PR numbers that need a comment @@ -6,13 +6,23 @@ PRS_NEEDING_COMMENT="" # Function to process a single PR process_pr() { + if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then + echo "‼️ Missing \$GITHUB_REPOSITORY - this must be run from GitHub Actions" + return 1 + fi + + if [[ -z "${GITHUB_OUTPUT:-}" ]]; then + echo "‼️ Missing \$GITHUB_OUTPUT - this must be run from GitHub Actions" + return 1 + fi + local PR_NUMBER=$1 - echo "πŸ”„ Processing PR #$PR_NUMBER" + echo "πŸ”„ Processing PR #${PR_NUMBER}" # Get PR body with error handling local PR_BODY - if ! PR_BODY=$(gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json body -q .body 2>/dev/null); then - echo " ⚠️ Could not fetch PR #$PR_NUMBER details" + if ! PR_BODY=$(gh pr view "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --json body -q .body 2>/dev/null); then + echo " ⚠️ Could not fetch PR #${PR_NUMBER} details" return 1 fi @@ -20,67 +30,67 @@ process_pr() { local ISSUE_NUMBER="" # Pattern 1: Direct reference like #123 - if [ -z "$ISSUE_NUMBER" ]; then - ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") + if [[ -z "${ISSUE_NUMBER}" ]]; then + ISSUE_NUMBER=$(echo "${PR_BODY}" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") fi # Pattern 2: Closes/Fixes/Resolves patterns (case-insensitive) - if [ -z "$ISSUE_NUMBER" ]; then - ISSUE_NUMBER=$(echo "$PR_BODY" | grep -iE '(closes?|fixes?|resolves?) #[0-9]+' | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") + if [[ -z "${ISSUE_NUMBER}" ]]; then + ISSUE_NUMBER=$(echo "${PR_BODY}" | grep -iE '(closes?|fixes?|resolves?) #[0-9]+' | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") fi - if [ -z "$ISSUE_NUMBER" ]; then - echo "⚠️ No linked issue found for PR #$PR_NUMBER, adding status/need-issue label" - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --add-label "status/need-issue" 2>/dev/null; then + if [[ -z "${ISSUE_NUMBER}" ]]; then + echo "⚠️ No linked issue found for PR #${PR_NUMBER}, adding status/need-issue label" + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --add-label "status/need-issue" 2>/dev/null; then echo " ⚠️ Failed to add label (may already exist or have permission issues)" fi # Add PR number to the list - if [ -z "$PRS_NEEDING_COMMENT" ]; then - PRS_NEEDING_COMMENT="$PR_NUMBER" + if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then + PRS_NEEDING_COMMENT="${PR_NUMBER}" else - PRS_NEEDING_COMMENT="$PRS_NEEDING_COMMENT,$PR_NUMBER" + PRS_NEEDING_COMMENT="${PRS_NEEDING_COMMENT},${PR_NUMBER}" fi - echo "needs_comment=true" >> $GITHUB_OUTPUT + echo "needs_comment=true" >> "${GITHUB_OUTPUT}" else - echo "πŸ”— Found linked issue #$ISSUE_NUMBER" + echo "πŸ”— Found linked issue #${ISSUE_NUMBER}" # Remove status/need-issue label if present - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --remove-label "status/need-issue" 2>/dev/null; then + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --remove-label "status/need-issue" 2>/dev/null; then echo " status/need-issue label not present or could not be removed" fi # Get issue labels - echo "πŸ“₯ Fetching labels from issue #$ISSUE_NUMBER" + echo "πŸ“₯ Fetching labels from issue #${ISSUE_NUMBER}" local ISSUE_LABELS="" - if ! ISSUE_LABELS=$(gh issue view "$ISSUE_NUMBER" --repo "$GITHUB_REPOSITORY" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then - echo " ⚠️ Could not fetch issue #$ISSUE_NUMBER (may not exist or be in different repo)" + if ! ISSUE_LABELS=$(gh issue view "${ISSUE_NUMBER}" --repo "${GITHUB_REPOSITORY}" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then + echo " ⚠️ Could not fetch issue #${ISSUE_NUMBER} (may not exist or be in different repo)" ISSUE_LABELS="" fi # Get PR labels - echo "πŸ“₯ Fetching labels from PR #$PR_NUMBER" + echo "πŸ“₯ Fetching labels from PR #${PR_NUMBER}" local PR_LABELS="" - if ! PR_LABELS=$(gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then + if ! PR_LABELS=$(gh pr view "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then echo " ⚠️ Could not fetch PR labels" PR_LABELS="" fi - echo " Issue labels: $ISSUE_LABELS" - echo " PR labels: $PR_LABELS" + echo " Issue labels: ${ISSUE_LABELS}" + echo " PR labels: ${PR_LABELS}" # Convert comma-separated strings to arrays local ISSUE_LABEL_ARRAY PR_LABEL_ARRAY - IFS=',' read -ra ISSUE_LABEL_ARRAY <<< "$ISSUE_LABELS" - IFS=',' read -ra PR_LABEL_ARRAY <<< "$PR_LABELS" + IFS=',' read -ra ISSUE_LABEL_ARRAY <<< "${ISSUE_LABELS}" + IFS=',' read -ra PR_LABEL_ARRAY <<< "${PR_LABELS}" # Find labels to add (on issue but not on PR) local LABELS_TO_ADD="" for label in "${ISSUE_LABEL_ARRAY[@]}"; do - if [ -n "$label" ] && [[ ! " ${PR_LABEL_ARRAY[*]} " =~ " ${label} " ]]; then - if [ -z "$LABELS_TO_ADD" ]; then - LABELS_TO_ADD="$label" + if [[ -n "${label}" ]] && [[ " ${PR_LABEL_ARRAY[*]} " != *" ${label} "* ]]; then + if [[ -z "${LABELS_TO_ADD}" ]]; then + LABELS_TO_ADD="${label}" else - LABELS_TO_ADD="$LABELS_TO_ADD,$label" + LABELS_TO_ADD="${LABELS_TO_ADD},${label}" fi fi done @@ -88,65 +98,65 @@ process_pr() { # Find labels to remove (on PR but not on issue) local LABELS_TO_REMOVE="" for label in "${PR_LABEL_ARRAY[@]}"; do - if [ -n "$label" ] && [[ ! " ${ISSUE_LABEL_ARRAY[*]} " =~ " ${label} " ]]; then + if [[ -n "${label}" ]] && [[ " ${ISSUE_LABEL_ARRAY[*]} " != *" ${label} "* ]]; then # Don't remove status/need-issue since we already handled it - if [ "$label" != "status/need-issue" ]; then - if [ -z "$LABELS_TO_REMOVE" ]; then - LABELS_TO_REMOVE="$label" + if [[ "${label}" != "status/need-issue" ]]; then + if [[ -z "${LABELS_TO_REMOVE}" ]]; then + LABELS_TO_REMOVE="${label}" else - LABELS_TO_REMOVE="$LABELS_TO_REMOVE,$label" + LABELS_TO_REMOVE="${LABELS_TO_REMOVE},${label}" fi fi fi done # Apply label changes - if [ -n "$LABELS_TO_ADD" ]; then - echo "βž• Adding labels: $LABELS_TO_ADD" - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --add-label "$LABELS_TO_ADD" 2>/dev/null; then + if [[ -n "${LABELS_TO_ADD}" ]]; then + echo "βž• Adding labels: ${LABELS_TO_ADD}" + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --add-label "${LABELS_TO_ADD}" 2>/dev/null; then echo " ⚠️ Failed to add some labels" fi fi - if [ -n "$LABELS_TO_REMOVE" ]; then - echo "βž– Removing labels: $LABELS_TO_REMOVE" - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --remove-label "$LABELS_TO_REMOVE" 2>/dev/null; then + if [[ -n "${LABELS_TO_REMOVE}" ]]; then + echo "βž– Removing labels: ${LABELS_TO_REMOVE}" + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --remove-label "${LABELS_TO_REMOVE}" 2>/dev/null; then echo " ⚠️ Failed to remove some labels" fi fi - if [ -z "$LABELS_TO_ADD" ] && [ -z "$LABELS_TO_REMOVE" ]; then + if [[ -z "${LABELS_TO_ADD}" ]] && [[ -z "${LABELS_TO_REMOVE}" ]]; then echo "βœ… Labels already synchronized" fi - echo "needs_comment=false" >> $GITHUB_OUTPUT + echo "needs_comment=false" >> "${GITHUB_OUTPUT}" fi } # If PR_NUMBER is set, process only that PR -if [ -n "${PR_NUMBER:-}" ]; then - if ! process_pr "$PR_NUMBER"; then - echo "❌ Failed to process PR #$PR_NUMBER" +if [[ -n "${PR_NUMBER:-}" ]]; then + if ! process_pr "${PR_NUMBER}"; then + echo "❌ Failed to process PR #${PR_NUMBER}" exit 1 fi else # Otherwise, get all open PRs and process them # The script logic will determine which ones need issue linking or label sync echo "πŸ“₯ Getting all open pull requests..." - if ! PR_NUMBERS=$(gh pr list --repo "$GITHUB_REPOSITORY" --state open --limit 1000 --json number -q '.[].number' 2>/dev/null); then + if ! PR_NUMBERS=$(gh pr list --repo "${GITHUB_REPOSITORY}" --state open --limit 1000 --json number -q '.[].number' 2>/dev/null); then echo "❌ Failed to fetch PR list" exit 1 fi - - if [ -z "$PR_NUMBERS" ]; then + + if [[ -z "${PR_NUMBERS}" ]]; then echo "βœ… No open PRs found" else # Count the number of PRs - PR_COUNT=$(echo "$PR_NUMBERS" | wc -w | tr -d ' ') - echo "πŸ“Š Found $PR_COUNT open PRs to process" - - for pr_number in $PR_NUMBERS; do - if ! process_pr "$pr_number"; then - echo "⚠️ Failed to process PR #$pr_number, continuing with next PR..." + PR_COUNT=$(echo "${PR_NUMBERS}" | wc -w | tr -d ' ') + echo "πŸ“Š Found ${PR_COUNT} open PRs to process" + + for pr_number in ${PR_NUMBERS}; do + if ! process_pr "${pr_number}"; then + echo "⚠️ Failed to process PR #${pr_number}, continuing with next PR..." continue fi done @@ -154,10 +164,10 @@ else fi # Ensure output is always set, even if empty -if [ -z "$PRS_NEEDING_COMMENT" ]; then - echo "prs_needing_comment=[]" >> $GITHUB_OUTPUT +if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then + echo "prs_needing_comment=[]" >> "${GITHUB_OUTPUT}" else - echo "prs_needing_comment=[$PRS_NEEDING_COMMENT]" >> $GITHUB_OUTPUT + echo "prs_needing_comment=[${PRS_NEEDING_COMMENT}]" >> "${GITHUB_OUTPUT}" fi -echo "βœ… PR triage completed" \ No newline at end of file +echo "βœ… PR triage completed" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 99b13777..efbea4c8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,158 +4,368 @@ name: Qwen Code CI on: push: - branches: [main, release] + branches: + - 'main' + - 'release' pull_request: - branches: [main, release] + branches: + - 'main' + - 'release' merge_group: -jobs: - lint: - name: Lint - runs-on: ubuntu-latest - permissions: - contents: read # For checkout - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 +concurrency: + group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}' + cancel-in-progress: |- + ${{ github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/heads/release/') }} - - name: Set up Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 +permissions: + checks: 'write' + contents: 'read' + statuses: 'write' + +defaults: + run: + shell: 'bash' + +env: + ACTIONLINT_VERSION: '1.7.7' + SHELLCHECK_VERSION: '0.11.0' + YAMLLINT_VERSION: '1.35.1' + +jobs: + # + # Lint: GitHub Actions + # + lint_github_actions: + name: 'Lint (GitHub Actions)' + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + fetch-depth: 1 + + - name: 'Install shellcheck' # Actionlint uses shellcheck + run: |- + mkdir -p "${RUNNER_TEMP}/shellcheck" + curl -sSLo "${RUNNER_TEMP}/.shellcheck.txz" "https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/shellcheck-v${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" + tar -xf "${RUNNER_TEMP}/.shellcheck.txz" -C "${RUNNER_TEMP}/shellcheck" --strip-components=1 + echo "${RUNNER_TEMP}/shellcheck" >> "${GITHUB_PATH}" + + - name: 'Install actionlint' + run: |- + mkdir -p "${RUNNER_TEMP}/actionlint" + curl -sSLo "${RUNNER_TEMP}/.actionlint.tgz" "https://github.com/rhysd/actionlint/releases/download/v${ACTIONLINT_VERSION}/actionlint_${ACTIONLINT_VERSION}_linux_amd64.tar.gz" + tar -xzf "${RUNNER_TEMP}/.actionlint.tgz" -C "${RUNNER_TEMP}/actionlint" + echo "${RUNNER_TEMP}/actionlint" >> "${GITHUB_PATH}" + + # For actionlint, we specifically ignore shellcheck rules that are + # annoying or unhelpful. See the shellcheck action for a description. + - name: 'Run actionlint' + run: |- + actionlint \ + -color \ + -format '{{range $err := .}}::error file={{$err.Filepath}},line={{$err.Line}},col={{$err.Column}}::{{$err.Filepath}}@{{$err.Line}} {{$err.Message}}%0A```%0A{{replace $err.Snippet "\\n" "%0A"}}%0A```\n{{end}}' \ + -ignore 'SC2002:' \ + -ignore 'SC2016:' \ + -ignore 'SC2129:' \ + -ignore 'label ".+" is unknown' + + - name: 'Run ratchet' + uses: 'sethvargo/ratchet@8b4ca256dbed184350608a3023620f267f0a5253' # ratchet:sethvargo/ratchet@v0.11.4 + with: + files: |- + .github/workflows/*.yml + .github/actions/**/*.yml + + # + # Lint: Javascript + # + lint_javascript: + name: 'Lint (Javascript)' + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + fetch-depth: 1 + + - name: 'Set up Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4.4.0 with: node-version-file: '.nvmrc' cache: 'npm' - - name: Install dependencies - run: npm ci + - name: 'Install dependencies' + run: |- + npm ci - - name: Run formatter check - run: | + - name: 'Run formatter check' + run: |- npm run format git diff --exit-code - - name: Run linter - run: npm run lint:ci + - name: 'Run linter' + run: |- + npm run lint:ci - - name: Run linter on integration tests - run: npx eslint integration-tests --max-warnings 0 + - name: 'Run linter on integration tests' + run: |- + npx eslint integration-tests --max-warnings 0 - - name: Run formatter on integration tests - run: | + - name: 'Run formatter on integration tests' + run: |- npx prettier --check integration-tests git diff --exit-code - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Run type check - run: npm run typecheck + - name: 'Run type check' + run: |- + npm run typecheck - test: - name: Test - runs-on: ${{ matrix.os }} - needs: lint - permissions: - contents: read - checks: write - pull-requests: write - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - node-version: [20.x, 22.x, 24.x] + # + # Lint: Shell + # + lint_shell: + name: 'Lint (Shell)' + runs-on: 'ubuntu-latest' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - - - name: Set up Node.js ${{ matrix.node-version }} - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 with: - node-version: ${{ matrix.node-version }} + fetch-depth: 1 + + - name: 'Install shellcheck' + run: |- + mkdir -p "${RUNNER_TEMP}/shellcheck" + curl -sSLo "${RUNNER_TEMP}/.shellcheck.txz" "https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/shellcheck-v${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" + tar -xf "${RUNNER_TEMP}/.shellcheck.txz" -C "${RUNNER_TEMP}/shellcheck" --strip-components=1 + echo "${RUNNER_TEMP}/shellcheck" >> "${GITHUB_PATH}" + + - name: 'Install shellcheck problem matcher' + run: |- + cat > "${RUNNER_TEMP}/shellcheck/problem-matcher-lint-shell.json" <<"EOF" + { + "problemMatcher": [ + { + "owner": "lint_shell", + "pattern": [ + { + "regexp": "^(.*):(\\\\d+):(\\\\d+):\\\\s+(?:fatal\\\\s+)?(warning|error):\\\\s+(.*)$", + "file": 1, + "line": 2, + "column": 3, + "severity": 4, + "message": 5 + } + ] + } + ] + } + EOF + echo "::add-matcher::${RUNNER_TEMP}/shellcheck/problem-matcher-lint-shell.json" + + # Note that only warning and error severity show up in the github files + # page. So we replace 'style' and 'note' with 'warning' to make it show + # up. + # + # We also try and find all bash scripts even if they don't have an + # explicit extension. + # + # We explicitly ignore the following rules: + # + # - SC2002: This rule suggests using "cmd < file" instead of "cat | cmd". + # While < is more efficient, pipes are much more readable and expected. + # + # - SC2129: This rule suggests grouping multiple writes to a file in + # braces like "{ cmd1; cmd2; } >> file". This is unexpected and less + # readable. + # + # - SC2310: This is an optional warning that only appears with "set -e" + # and when a command is used as a conditional. + - name: 'Run shellcheck' + run: |- + git ls-files | grep -E '^([^.]+|.*\.(sh|zsh|bash))$' | xargs file --mime-type \ + | grep "text/x-shellscript" | awk '{ print substr($1, 1, length($1)-1) }' \ + | xargs shellcheck \ + --check-sourced \ + --enable=all \ + --exclude=SC2002,SC2129,SC2310 \ + --severity=style \ + --format=gcc \ + --color=never | sed -e 's/note:/warning:/g' -e 's/style:/warning:/g' + + # + # Lint: YAML + # + lint_yaml: + name: 'Lint (YAML)' + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + fetch-depth: 1 + + - name: 'Setup Python' + uses: 'actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065' # ratchet:actions/setup-python@v5 + with: + python-version: '3' + + - name: 'Install yamllint' + run: |- + pip install --user "yamllint==${YAMLLINT_VERSION}" + + - name: 'Run yamllint' + run: |- + git ls-files | grep -E '\.(yaml|yml)' | xargs yamllint --format github + + # + # Lint: All + # + # This is a virtual job that other jobs depend on to wait for all linters to + # finish. It's also used to ensure linting happens on CI via required + # workflows. + lint: + name: 'Lint' + needs: + - 'lint_github_actions' + - 'lint_javascript' + - 'lint_shell' + - 'lint_yaml' + runs-on: 'ubuntu-latest' + steps: + - run: |- + echo 'All linters finished!' + + # + # Test: Node + # + test: + name: 'Test' + runs-on: '${{ matrix.os }}' + needs: + - 'lint' + permissions: + contents: 'read' + checks: 'write' + pull-requests: 'write' + strategy: + fail-fast: false # So we can see all test failures + matrix: + os: + - 'macos-latest' + - 'ubuntu-latest' + - 'windows-latest' + node-version: + - '20.x' + - '22.x' + - '24.x' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Set up Node.js ${{ matrix.node-version }}' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 + with: + node-version: '${{ matrix.node-version }}' cache: 'npm' - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Install dependencies for testing - run: npm ci # Install fresh dependencies using the downloaded package-lock.json + - name: 'Install dependencies for testing' + run: |- + npm ci - - name: Run tests and generate reports - run: npm run test:ci + - name: 'Run tests and generate reports' env: NO_COLOR: true + run: 'npm run test:ci' - - name: Publish Test Report (for non-forks) - if: always() && (github.event.pull_request.head.repo.full_name == github.repository) - uses: dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3 # v2 + - name: 'Publish Test Report (for non-forks)' + if: |- + ${{ always() && (github.event.pull_request.head.repo.full_name == github.repository) }} + uses: 'dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3' # ratchet:dorny/test-reporter@v2 with: - name: Test Results (Node ${{ matrix.node-version }}) - path: packages/*/junit.xml - reporter: java-junit + name: 'Test Results (Node ${{ matrix.node-version }})' + path: 'packages/*/junit.xml' + reporter: 'java-junit' fail-on-error: 'false' - - name: Upload Test Results Artifact (for forks) - if: always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + - name: 'Upload Test Results Artifact (for forks)' + if: |- + ${{ always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) }} + uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4 with: - name: test-results-fork-${{ matrix.node-version }}-${{ matrix.os }} - path: packages/*/junit.xml + name: 'test-results-fork-${{ matrix.node-version }}-${{ matrix.os }}' + path: 'packages/*/junit.xml' - - name: Upload coverage reports - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 - if: always() + - name: 'Upload coverage reports' + if: |- + ${{ always() }} + uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4 with: - name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }} - path: packages/*/coverage + name: 'coverage-reports-${{ matrix.node-version }}-${{ matrix.os }}' + path: 'packages/*/coverage' post_coverage_comment: - name: Post Coverage Comment - runs-on: ubuntu-latest - needs: test - if: always() && github.event_name == 'pull_request' && (github.event.pull_request.head.repo.full_name == github.repository) + name: 'Post Coverage Comment' + runs-on: 'ubuntu-latest' + needs: 'test' + if: |- + ${{ always() && github.event_name == 'pull_request' && (github.event.pull_request.head.repo.full_name == github.repository) }} continue-on-error: true permissions: - contents: read # For checkout - pull-requests: write # For commenting + contents: 'read' # For checkout + pull-requests: 'write' # For commenting strategy: matrix: # Reduce noise by only posting the comment once - os: [ubuntu-latest] - node-version: [22.x] + os: + - 'ubuntu-latest' + node-version: + - '22.x' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Download coverage reports artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + - name: 'Download coverage reports artifact' + uses: 'actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0' # ratchet:actions/download-artifact@v5 with: - name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }} - path: coverage_artifact # Download to a specific directory + name: 'coverage-reports-${{ matrix.node-version }}-${{ matrix.os }}' + path: 'coverage_artifact' # Download to a specific directory - - name: Post Coverage Comment using Composite Action - uses: ./.github/actions/post-coverage-comment # Path to the composite action directory + - name: 'Post Coverage Comment using Composite Action' + uses: './.github/actions/post-coverage-comment' # Path to the composite action directory with: - cli_json_file: coverage_artifact/cli/coverage/coverage-summary.json - core_json_file: coverage_artifact/core/coverage/coverage-summary.json - cli_full_text_summary_file: coverage_artifact/cli/coverage/full-text-summary.txt - core_full_text_summary_file: coverage_artifact/core/coverage/full-text-summary.txt - node_version: ${{ matrix.node-version }} - os: ${{ matrix.os }} - github_token: ${{ secrets.GITHUB_TOKEN }} + cli_json_file: 'coverage_artifact/cli/coverage/coverage-summary.json' + core_json_file: 'coverage_artifact/core/coverage/coverage-summary.json' + cli_full_text_summary_file: 'coverage_artifact/cli/coverage/full-text-summary.txt' + core_full_text_summary_file: 'coverage_artifact/core/coverage/full-text-summary.txt' + node_version: '${{ matrix.node-version }}' + os: '${{ matrix.os }}' + github_token: '${{ secrets.GITHUB_TOKEN }}' codeql: - name: CodeQL - runs-on: ubuntu-latest + name: 'CodeQL' + runs-on: 'ubuntu-latest' permissions: - actions: read - contents: read - security-events: write + actions: 'read' + contents: 'read' + security-events: 'write' steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3 + - name: 'Initialize CodeQL' + uses: 'github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2' # ratchet:github/codeql-action/init@v3 with: - languages: javascript + languages: 'javascript' - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3 + - name: 'Perform CodeQL Analysis' + uses: 'github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2' # ratchet:github/codeql-action/analyze@v3 diff --git a/.github/workflows/community-report.yml b/.github/workflows/community-report.yml index 28aa2cba..59fd427f 100644 --- a/.github/workflows/community-report.yml +++ b/.github/workflows/community-report.yml @@ -1,4 +1,4 @@ -name: Generate Weekly Community Report πŸ“Š +name: 'Generate Weekly Community Report πŸ“Š' on: schedule: @@ -12,56 +12,57 @@ on: jobs: generate-report: - name: Generate Report πŸ“ - if: ${{ github.repository == 'google-gemini/gemini-cli' }} - runs-on: ubuntu-latest + name: 'Generate Report πŸ“' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' permissions: - issues: write - pull-requests: read - discussions: read - contents: read - id-token: write + issues: 'write' + pull-requests: 'read' + discussions: 'read' + contents: 'read' + id-token: 'write' steps: - - name: Generate GitHub App Token πŸ”‘ - id: generate_token - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2 + - name: 'Generate GitHub App Token πŸ”‘' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.PRIVATE_KEY }} + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' - - name: Generate Report πŸ“œ - id: report + - name: 'Generate Report πŸ“œ' + id: 'report' env: - GH_TOKEN: ${{ steps.generate_token.outputs.token }} - REPO: ${{ github.repository }} - DAYS: ${{ github.event.inputs.days || '7' }} - run: | + GH_TOKEN: '${{ steps.generate_token.outputs.token }}' + REPO: '${{ github.repository }}' + DAYS: '${{ github.event.inputs.days || 7 }}' + run: |- set -e - START_DATE=$(date -u -d "$DAYS days ago" +'%Y-%m-%d') - END_DATE=$(date -u +'%Y-%m-%d') - echo "⏳ Generating report for contributions from $START_DATE to $END_DATE..." + START_DATE="$(date -u -d "$DAYS days ago" +'%Y-%m-%d')" + END_DATE="$(date -u +'%Y-%m-%d')" + echo "⏳ Generating report for contributions from ${START_DATE} to ${END_DATE}..." declare -A author_is_googler check_googler_status() { - local author=$1 - if [[ "$author" == *"[bot]" ]]; then - author_is_googler[$author]=1 + local author="$1" + if [[ "${author}" == *"[bot]" ]]; then + author_is_googler[${author}]=1 return 1 fi - if [[ -v "author_is_googler[$author]" ]]; then - return ${author_is_googler[$author]} + if [[ -v "author_is_googler[${author}]" ]]; then + return "${author_is_googler[${author}]}" fi - if gh api "orgs/googlers/members/$author" --silent 2>/dev/null; then - echo "πŸ§‘β€πŸ’» $author is a Googler." - author_is_googler[$author]=0 + if gh api "orgs/googlers/members/${author}" --silent 2>/dev/null; then + echo "πŸ§‘β€πŸ’» ${author} is a Googler." + author_is_googler[${author}]=0 else - echo "🌍 $author is a community contributor." - author_is_googler[$author]=1 + echo "🌍 ${author} is a community contributor." + author_is_googler[${author}]=1 fi - return ${author_is_googler[$author]} + return "${author_is_googler[${author}]}" } googler_issues=0 @@ -70,27 +71,27 @@ jobs: non_googler_prs=0 echo "πŸ”Ž Fetching issues and pull requests..." - ITEMS_JSON=$(gh search issues --repo "$REPO" "created:>$START_DATE" --json author,isPullRequest --limit 1000) + ITEMS_JSON="$(gh search issues --repo "${REPO}" "created:>${START_DATE}" --json author,isPullRequest --limit 1000)" for row in $(echo "${ITEMS_JSON}" | jq -r '.[] | @base64'); do _jq() { - echo ${row} | base64 --decode | jq -r ${1} + echo "${row}" | base64 --decode | jq -r "${1}" } - author=$(_jq '.author.login') - is_pr=$(_jq '.isPullRequest') + author="$(_jq '.author.login')" + is_pr="$(_jq '.isPullRequest')" - if [[ -z "$author" || "$author" == "null" ]]; then + if [[ -z "${author}" || "${author}" == "null" ]]; then continue fi - if check_googler_status "$author"; then - if [[ "$is_pr" == "true" ]]; then + if check_googler_status "${author}"; then + if [[ "${is_pr}" == "true" ]]; then ((googler_prs++)) else ((googler_issues++)) fi else - if [[ "$is_pr" == "true" ]]; then + if [[ "${is_pr}" == "true" ]]; then ((non_googler_prs++)) else ((non_googler_issues++)) @@ -114,19 +115,19 @@ jobs: } } }''' - DISCUSSIONS_JSON=$(gh api graphql -f q="repo:$REPO created:>$START_DATE" -f query="$DISCUSSION_QUERY") + DISCUSSIONS_JSON="$(gh api graphql -f q="repo:${REPO} created:>${START_DATE}" -f query="${DISCUSSION_QUERY}")" for row in $(echo "${DISCUSSIONS_JSON}" | jq -r '.data.search.nodes[] | @base64'); do _jq() { - echo ${row} | base64 --decode | jq -r ${1} + echo "${row}" | base64 --decode | jq -r "${1}" } - author=$(_jq '.author.login') + author="$(_jq '.author.login')" - if [[ -z "$author" || "$author" == "null" ]]; then + if [[ -z "${author}" || "${author}" == "null" ]]; then continue fi - if check_googler_status "$author"; then + if check_googler_status "${author}"; then ((googler_discussions++)) else ((non_googler_discussions++)) @@ -134,7 +135,6 @@ jobs: done echo "✍️ Generating report content..." - REPORT_TITLE="Community Contribution Report: $START_DATE to $END_DATE" TOTAL_ISSUES=$((googler_issues + non_googler_issues)) TOTAL_PRS=$((googler_prs + non_googler_prs)) TOTAL_DISCUSSIONS=$((googler_discussions + non_googler_discussions)) @@ -142,7 +142,7 @@ jobs: REPORT_BODY=$(cat <> $GITHUB_OUTPUT - echo "$REPORT_BODY" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT + echo "report_body<> "${GITHUB_OUTPUT}" + echo "${REPORT_BODY}" >> "${GITHUB_OUTPUT}" + echo "EOF" >> "${GITHUB_OUTPUT}" echo "πŸ“Š Community Contribution Report:" - echo "$REPORT_BODY" + echo "${REPORT_BODY}" - - name: πŸ€– Get Insights from Report - if: steps.report.outputs.report_body != '' - uses: google-gemini/gemini-cli-action@df3f890f003d28c60a2a09d2c29e0126e4d1e2ff + - name: 'πŸ€– Get Insights from Report' + if: |- + ${{ steps.report.outputs.report_body != '' }} + uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0 env: - GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' + REPOSITORY: '${{ github.repository }}' with: - version: 0.1.8-rc.0 - GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} - OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }} - OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }} - settings_json: | + gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}' + gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}' + gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}' + gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}' + gemini_api_key: '${{ secrets.GEMINI_API_KEY }}' + use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}' + use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}' + settings: |- { "coreTools": [ "run_shell_command(gh issue list)", @@ -180,7 +185,7 @@ jobs: "run_shell_command(gh search prs)" ] } - prompt: | + prompt: |- You are a helpful assistant that analyzes community contribution reports. Based on the following report, please provide a brief summary and highlight any interesting trends or potential areas for improvement. diff --git a/.github/workflows/docs-page-action.yml b/.github/workflows/docs-page-action.yml new file mode 100644 index 00000000..2d485278 --- /dev/null +++ b/.github/workflows/docs-page-action.yml @@ -0,0 +1,50 @@ +name: 'Deploy GitHub Pages' + +on: + push: + tags: 'v*' + workflow_dispatch: + +permissions: + contents: 'read' + pages: 'write' + id-token: 'write' + +# Allow only one concurrent deployment, skipping runs queued between the run +# in-progress and latest queued. However, do NOT cancel in-progress runs as we +# want to allow these production deployments to complete. +concurrency: + group: '${{ github.workflow }}' + cancel-in-progress: false + +jobs: + build: + if: |- + ${{ !contains(github.ref_name, 'nightly') }} + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Setup Pages' + uses: 'actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b' # ratchet:actions/configure-pages@v5 + + - name: 'Build with Jekyll' + uses: 'actions/jekyll-build-pages@44a6e6beabd48582f863aeeb6cb2151cc1716697' # ratchet:actions/jekyll-build-pages@v1 + with: + source: './' + destination: './_site' + + - name: 'Upload artifact' + uses: 'actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa' # ratchet:actions/upload-pages-artifact@v3 + + deploy: + environment: + name: 'github-pages' + url: '${{ steps.deployment.outputs.page_url }}' + runs-on: 'ubuntu-latest' + needs: 'build' + steps: + - name: 'Deploy to GitHub Pages' + id: 'deployment' + uses: 'actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e' # ratchet:actions/deploy-pages@v4 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index a27c7a5c..2aa3f860 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -1,75 +1,89 @@ -# .github/workflows/e2e.yml - -name: E2E Tests +name: 'E2E Tests' on: push: - branches: [main] + branches: + - 'main' merge_group: jobs: e2e-test-linux: - name: E2E Test (Linux) - ${{ matrix.sandbox }} - runs-on: ubuntu-latest + name: 'E2E Test (Linux) - ${{ matrix.sandbox }}' + runs-on: 'ubuntu-latest' strategy: matrix: - sandbox: [sandbox:none, sandbox:docker] - node-version: [20.x, 22.x, 24.x] + sandbox: + - 'sandbox:none' + - 'sandbox:docker' + node-version: + - '20.x' + - '22.x' + - '24.x' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Set up Node.js ${{ matrix.node-version }} - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Set up Node.js ${{ matrix.node-version }}' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: ${{ matrix.node-version }} + node-version: '${{ matrix.node-version }}' cache: 'npm' - - name: Install dependencies - run: npm ci + - name: 'Install dependencies' + run: |- + npm ci - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Set up Docker - if: matrix.sandbox == 'sandbox:docker' - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 + - name: 'Set up Docker' + if: |- + ${{ matrix.sandbox == 'sandbox:docker' }} + uses: 'docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435' # ratchet:docker/setup-buildx-action@v3 - - name: Set up Podman - if: matrix.sandbox == 'sandbox:podman' - uses: redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603 # v1 + - name: 'Set up Podman' + if: |- + ${{ matrix.sandbox == 'sandbox:podman' }} + uses: 'redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603' # ratchet:redhat-actions/podman-login@v1 with: - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: 'docker.io' + username: '${{ secrets.DOCKERHUB_USERNAME }}' + password: '${{ secrets.DOCKERHUB_TOKEN }}' - - name: Run E2E tests + - name: 'Run E2E tests' env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}' OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }} OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }} - run: npm run test:integration:${{ matrix.sandbox }} -- --verbose --keep-output + KEEP_OUTPUT: 'true' + SANDBOX: '${{ matrix.sandbox }}' + VERBOSE: 'true' + run: |- + npm run "test:integration:${SANDBOX}" e2e-test-macos: - name: E2E Test - macOS - runs-on: macos-latest + name: 'E2E Test - macOS' + runs-on: 'macos-latest' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Set up Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Set up Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: 20.x + node-version-file: '.nvmrc' cache: 'npm' - - name: Install dependencies - run: npm ci + - name: 'Install dependencies' + run: |- + npm ci - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Run E2E tests + - name: 'Run E2E tests' env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }} diff --git a/.github/workflows/gemini-automated-issue-triage.yml b/.github/workflows/gemini-automated-issue-triage.yml index b9343033..48404294 100644 --- a/.github/workflows/gemini-automated-issue-triage.yml +++ b/.github/workflows/gemini-automated-issue-triage.yml @@ -71,13 +71,13 @@ jobs: ## Steps - 1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels. + 1. Run: `gh label list --repo "${REPOSITORY}" --limit 100` to get all available labels. 2. Review the issue title and body provided in the environment variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}". 3. Ignore any existing priorities or tags on the issue. Just report your findings. - 4. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case. - 6. Apply the selected labels to this issue using: `gh issue edit ${{ github.event.issue.number }} --repo ${{ github.repository }} --add-label "label1,label2"`. + 4. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case. + 6. Apply the selected labels to this issue using: `gh issue edit "${ISSUE_NUMBER}" --repo "${REPOSITORY}" --add-label "label1,label2"`. 7. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 for anything more than 6 versions older than the most recent should add the status/need-retesting label. - 8. If you see that the issue doesn’t look like it has sufficient information recommend the status/need-information label. + 8. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label. 9. Use Area definitions mentioned below to help you narrow down issues. ## Guidelines @@ -89,7 +89,7 @@ jobs: - Apply only one kind/ label. - Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these. - Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario. - Categorization Guidelines: + Categorization Guidelines: P0: Critical / Blocker - A P0 bug is a catastrophic failure that demands immediate attention. It represents a complete showstopper for a significant portion of users or for the development process itself. Impact: @@ -127,16 +127,16 @@ jobs: - If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue - This product is designed to use different models eg.. using pro, downgrading to flash etc. when users report that they dont expect the model to change those would be categorized as feature requests. Definition of Areas - area/ux: + area/ux: - Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance. - - I am seeing my screen flicker when using Gemini CLI - - I am seeing the output malformed - - Theme changes aren't taking effect + - I am seeing my screen flicker when using Gemini CLI + - I am seeing the output malformed + - Theme changes aren't taking effect - My keyboard inputs arent' being recognzied - area/platform: + area/platform: - Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework. area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features. - area/models: + area/models: - i am not getting a response that is reasonable or expected. this can include things like - I am calling a tool and the tool is not performing as expected. - i am expecting a tool to be called and it is not getting called , @@ -150,10 +150,10 @@ jobs: - Memory compression - unexpected responses, - poor quality of generated code - area/tools: - - These are primarily issues related to Model Context Protocol - - These are issues that mention MCP support - - feature requests asking for support for new tools. + area/tools: + - These are primarily issues related to Model Context Protocol + - These are issues that mention MCP support + - feature requests asking for support for new tools. area/core: Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality area/contribution: Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure. area/authentication: Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc.. @@ -169,19 +169,22 @@ jobs: - name: 'Post Issue Triage Failure Comment' if: |- ${{ failure() && steps.gemini_issue_triage.outcome == 'failure' }} - uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7 + env: + REPOSITORY: '${{ github.repository }}' + RUN_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}' with: github-token: '${{ steps.generate_token.outputs.token }}' script: |- github.rest.issues.createComment({ - owner: '${{ github.repository }}'.split('/')[0], - repo: '${{ github.repository }}'.split('/')[1], + owner: process.env.REPOSITORY.split('/')[0], + repo: process.env.REPOSITORY.split('/')[1], issue_number: '${{ github.event.issue.number }}', - body: 'There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.' + body: `There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${process.env.RUN_URL}) for details.` }) deduplicate-issues: - if: > + if: |- github.repository == 'google-gemini/gemini-cli' && vars.TRIAGE_DEDUPLICATE_ISSUES != '' && (github.event_name == 'issues' || @@ -195,25 +198,25 @@ jobs: timeout-minutes: 20 runs-on: 'ubuntu-latest' steps: - - name: 'Checkout repository' - uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - name: 'Generate GitHub App Token' id: 'generate_token' - uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: app-id: '${{ secrets.APP_ID }}' private-key: '${{ secrets.PRIVATE_KEY }}' - - name: Log in to GitHub Container Registry - uses: docker/login-action@v3 + - name: 'Log in to GitHub Container Registry' + uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3 with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + registry: 'ghcr.io' + username: '${{ github.actor }}' + password: '${{ secrets.GITHUB_TOKEN }}' - name: 'Run Gemini Issue Deduplication' - uses: 'google-github-actions/run-gemini-cli@20351b5ea2b4179431f1ae8918a246a0808f8747' + uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0 id: 'gemini_issue_deduplication' env: GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' diff --git a/.github/workflows/gemini-scheduled-issue-triage.yml b/.github/workflows/gemini-scheduled-issue-triage.yml index ac698dd7..d6780d8d 100644 --- a/.github/workflows/gemini-scheduled-issue-triage.yml +++ b/.github/workflows/gemini-scheduled-issue-triage.yml @@ -85,28 +85,28 @@ jobs: ## Steps - 1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels. + 1. Run: `gh label list --repo "${REPOSITORY}" --limit 100` to get all available labels. 2. Check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues) 3. Review the issue title, body and any comments provided in the environment variables. 4. Ignore any existing priorities or tags on the issue. - 5. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. - 6. Get the list of labels already on the issue using `gh issue view ISSUE_NUMBER --repo ${{ github.repository }} --json labels -t '{{range .labels}}{{.name}}{{"\n"}}{{end}}' - 7. For area/* and kind/* limit yourself to only the single most applicable label in each case. + 5. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. + 6. Get the list of labels already on the issue using `gh issue view ISSUE_NUMBER --repo "${REPOSITORY}" --json labels -t '{{range .labels}}{{.name}}{{"\n"}}{{end}}' + 7. For area/* and kind/* limit yourself to only the single most applicable label in each case. 8. Give me a single short paragraph about why you are selecting each label in the process. use the format Issue ID: , Title, Label applied:, Label removed, ovearll explanation 9. Parse the JSON array from step 2 and for EACH INDIVIDUAL issue, apply appropriate labels using separate commands: - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label1"` - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label2"` + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --add-label "label1"` + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --add-label "label2"` - Continue for each label separately - IMPORTANT: Label each issue individually, one command per issue, one label at a time if needed. - - Make sure after you apply labels there is only one area/* and one kind/* label per issue. - - To do this look for labels found in step 6 that no longer apply remove them one at a time using - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name1"` - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name2"` + - Make sure after you apply labels there is only one area/* and one kind/* label per issue. + - To do this look for labels found in step 6 that no longer apply remove them one at a time using + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --remove-label "label-name1"` + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --remove-label "label-name2"` - IMPORTANT: Remove each label one at a time, one command per issue if needed. - 10. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 + 10. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 - Anything more than 6 versions older than the most recent should add the status/need-retesting label - 11. If you see that the issue doesn’t look like it has sufficient information recommend the status/need-information label - - After applying appropriate labels to an issue, remove the "status/need-triage" label if present: `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "status/need-triage"` + 11. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label + - After applying appropriate labels to an issue, remove the "status/need-triage" label if present: `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --remove-label "status/need-triage"` - Execute one `gh issue edit` command per issue, wait for success before proceeding to the next Process each issue sequentially and confirm each labeling operation before moving to the next issue. @@ -117,22 +117,22 @@ jobs: - Do not remove labels titled help wanted or good first issue. - Triage only the current issue. - Apply only one area/ label - - Apply only one kind/ label (Do not apply kind/duplicate or kind/parent-issue) + - Apply only one kind/ label (Do not apply kind/duplicate or kind/parent-issue) - Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these. - Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario. - Categorization Guidelines: + Categorization Guidelines: P0: Critical / Blocker - A P0 bug is a catastrophic failure that demands immediate attention. It represents a complete showstopper for a significant portion of users or for the development process itself. Impact: - Blocks development or testing for the entire team. - Major security vulnerability that could compromise user data or system integrity. - Causes data loss or corruption with no workaround. - - Crashes the application or makes a core feature completely unusable for all or most users in a production environment. Will it cause severe quality degration? + - Crashes the application or makes a core feature completely unusable for all or most users in a production environment. Will it cause severe quality degration? - Is it preventing contributors from contributing to the repository or is it a release blocker? Qualifier: Is the main function of the software broken? Example: The gemini auth login command fails with an unrecoverable error, preventing any user from authenticating and using the rest of the CLI. P1: High - - A P1 bug is a serious issue that significantly degrades the user experience or impacts a core feature. While not a complete blocker, it's a major problem that needs a fast resolution. + - A P1 bug is a serious issue that significantly degrades the user experience or impacts a core feature. While not a complete blocker, it's a major problem that needs a fast resolution. - Feature requests are almost never P1. Impact: - A core feature is broken or behaving incorrectly for a large number of users or large number of use cases. @@ -156,21 +156,21 @@ jobs: - An edge-case bug that is very difficult to reproduce and affects a tiny fraction of users. Qualifier: Is it a "nice-to-fix" issue? Example: Spelling mistakes etc. - Additional Context: + Additional Context: - If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue - - This product is designed to use different models eg.. using pro, downgrading to flash etc. + - This product is designed to use different models eg.. using pro, downgrading to flash etc. - When users report that they dont expect the model to change those would be categorized as feature requests. Definition of Areas - area/ux: + area/ux: - Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance. - - I am seeing my screen flicker when using Gemini CLI - - I am seeing the output malformed - - Theme changes aren't taking effect + - I am seeing my screen flicker when using Gemini CLI + - I am seeing the output malformed + - Theme changes aren't taking effect - My keyboard inputs arent' being recognzied - area/platform: + area/platform: - Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework. area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features. - area/models: + area/models: - i am not getting a response that is reasonable or expected. this can include things like - I am calling a tool and the tool is not performing as expected. - i am expecting a tool to be called and it is not getting called , @@ -184,21 +184,21 @@ jobs: - Memory compression - unexpected responses, - poor quality of generated code - area/tools: - - These are primarily issues related to Model Context Protocol - - These are issues that mention MCP support - - feature requests asking for support for new tools. - area/core: + area/tools: + - These are primarily issues related to Model Context Protocol + - These are issues that mention MCP support + - feature requests asking for support for new tools. + area/core: - Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality - area/contribution: + area/contribution: - Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure. - area/authentication: + area/authentication: - Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc.. - area/security-privacy: + area/security-privacy: - Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access. - area/extensibility: + area/extensibility: - Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc.. - area/performance: + area/performance: - Issues focused on model performance - Issues with running out of capacity, - 429 errors etc.. diff --git a/.github/workflows/gemini-scheduled-pr-triage.yml b/.github/workflows/gemini-scheduled-pr-triage.yml index 640a62a2..ab0c3a30 100644 --- a/.github/workflows/gemini-scheduled-pr-triage.yml +++ b/.github/workflows/gemini-scheduled-pr-triage.yml @@ -3,23 +3,23 @@ name: Qwen Scheduled PR Triage πŸš€ on: schedule: - cron: '*/15 * * * *' # Runs every 15 minutes - workflow_dispatch: {} + workflow_dispatch: jobs: audit-prs: timeout-minutes: 15 if: ${{ github.repository == 'QwenLM/qwen-code' }} permissions: - contents: read - id-token: write - issues: write - pull-requests: write - runs-on: ubuntu-latest + contents: 'read' + id-token: 'write' + issues: 'write' + pull-requests: 'write' + runs-on: 'ubuntu-latest' outputs: - prs_needing_comment: ${{ steps.run_triage.outputs.prs_needing_comment }} + prs_needing_comment: '${{ steps.run_triage.outputs.prs_needing_comment }}' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - name: Run PR Triage Script id: run_triage diff --git a/.github/workflows/no-response.yml b/.github/workflows/no-response.yml index 3d3d8e7e..abaad9db 100644 --- a/.github/workflows/no-response.yml +++ b/.github/workflows/no-response.yml @@ -1,32 +1,33 @@ -name: No Response +name: 'No Response' # Run as a daily cron at 1:45 AM on: schedule: - cron: '45 1 * * *' - workflow_dispatch: {} + workflow_dispatch: jobs: no-response: - runs-on: ubuntu-latest - if: ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} permissions: - issues: write - pull-requests: write + issues: 'write' + pull-requests: 'write' concurrency: - group: ${{ github.workflow }}-no-response + group: '${{ github.workflow }}-no-response' cancel-in-progress: true steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 + - uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9 with: - repo-token: ${{ secrets.GITHUB_TOKEN }} + repo-token: '${{ secrets.GITHUB_TOKEN }}' days-before-stale: -1 days-before-close: 14 stale-issue-label: 'status/need-information' - close-issue-message: > + close-issue-message: >- This issue was marked as needing more information and has not received a response in 14 days. Closing it for now. If you still face this problem, feel free to reopen with more details. Thank you! stale-pr-label: 'status/need-information' - close-pr-message: > + close-pr-message: >- This pull request was marked as needing more information and has had no updates in 14 days. Closing it for now. You are welcome to reopen with the required info. Thanks for contributing! diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c78daf9c..ac025b9f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: Release +name: 'Release' on: schedule: @@ -9,73 +9,78 @@ on: version: description: 'The version to release (e.g., v0.1.11). Required for manual patch releases.' required: false # Not required for scheduled runs - type: string + type: 'string' ref: description: 'The branch or ref (full git sha) to release from.' required: true - type: string + type: 'string' default: 'main' dry_run: description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.' required: true - type: boolean + type: 'boolean' default: true create_nightly_release: description: 'Auto apply the nightly release tag, input version is ignored.' required: false - type: boolean + type: 'boolean' default: false force_skip_tests: description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests' required: false - type: boolean + type: 'boolean' default: false jobs: release: - runs-on: ubuntu-latest + runs-on: 'ubuntu-latest' environment: name: production-release url: ${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ steps.version.outputs.RELEASE_TAG }} if: github.repository == 'QwenLM/qwen-code' permissions: - contents: write - packages: write - id-token: write - issues: write # For creating issues on failure + contents: 'write' + packages: 'write' + id-token: 'write' + issues: 'write' # For creating issues on failure outputs: - RELEASE_TAG: ${{ steps.version.outputs.RELEASE_TAG }} + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' steps: - - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 with: - ref: ${{ github.sha }} + ref: '${{ github.sha }}' fetch-depth: 0 - - name: Set booleans for simplified logic - id: vars - run: | + - name: 'Set booleans for simplified logic' + env: + CREATE_NIGHTLY_RELEASE: '${{ github.event.inputs.create_nightly_release }}' + EVENT_NAME: '${{ github.event_name }}' + DRY_RUN_INPUT: '${{ github.event.inputs.dry_run }}' + id: 'vars' + run: |- is_nightly="false" - if [[ "${{ github.event_name }}" == "schedule" || "${{ github.event.inputs.create_nightly_release }}" == "true" ]]; then + if [[ "${EVENT_NAME}" == "schedule" || "${CREATE_NIGHTLY_RELEASE}" == "true" ]]; then is_nightly="true" fi - echo "is_nightly=${is_nightly}" >> $GITHUB_OUTPUT + echo "is_nightly=${is_nightly}" >> "${GITHUB_OUTPUT}" is_dry_run="false" - if [[ "${{ github.event.inputs.dry_run }}" == "true" ]]; then + if [[ "${DRY_RUN_INPUT}" == "true" ]]; then is_dry_run="true" fi - echo "is_dry_run=${is_dry_run}" >> $GITHUB_OUTPUT + echo "is_dry_run=${is_dry_run}" >> "${GITHUB_OUTPUT}" - - name: Setup Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Setup Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: '20' + node-version-file: '.nvmrc' cache: 'npm' - - name: Install Dependencies - run: npm ci + - name: 'Install Dependencies' + run: |- + npm ci - name: Get the version id: version @@ -109,35 +114,43 @@ jobs: git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" - - name: Create and switch to a release branch - id: release_branch - run: | - BRANCH_NAME="release/${{ steps.version.outputs.RELEASE_TAG }}" - git switch -c $BRANCH_NAME - echo "BRANCH_NAME=${BRANCH_NAME}" >> $GITHUB_OUTPUT + - name: 'Create and switch to a release branch' + id: 'release_branch' + env: + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + run: |- + BRANCH_NAME="release/${RELEASE_TAG}" + git switch -c "${BRANCH_NAME}" + echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}" - - name: Update package versions - run: | - npm run release:version ${{ steps.version.outputs.RELEASE_VERSION }} + - name: 'Update package versions' + env: + RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}' + run: |- + npm run release:version "${RELEASE_VERSION}" - - name: Commit and Conditionally Push package versions - run: | + - name: 'Commit and Conditionally Push package versions' + env: + BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}' + IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + run: |- git add package.json package-lock.json packages/*/package.json - git commit -m "chore(release): ${{ steps.version.outputs.RELEASE_TAG }}" - if [[ "${{ steps.vars.outputs.is_dry_run }}" == "false" ]]; then + git commit -m "chore(release): ${RELEASE_TAG}" + if [[ "${IS_DRY_RUN}" == "false" ]]; then echo "Pushing release branch to remote..." - git push --set-upstream origin ${{ steps.release_branch.outputs.BRANCH_NAME }} --follow-tags + git push --set-upstream origin "${BRANCH_NAME}" --follow-tags else echo "Dry run enabled. Skipping push." fi - - name: Build and Prepare Packages - run: | + - name: 'Build and Prepare Packages' + run: |- npm run build:packages npm run prepare:package - - name: Configure npm for publishing - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Configure npm for publishing' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: node-version: '20' registry-url: 'https://registry.npmjs.org' @@ -178,12 +191,15 @@ jobs: # Execute the release command eval $RELEASE_CMD - - name: Create Issue on Failure - if: failure() - run: | - gh issue create \ - --title "Release Failed for ${{ steps.version.outputs.RELEASE_TAG || 'N/A' }} on $(date +'%Y-%m-%d')" \ - --body "The release workflow failed. See the full run for details: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ - --label "kind/bug,release-failure" + - name: 'Create Issue on Failure' + if: |- + ${{ failure() }} env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }} || "N/A"' + DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}' + run: |- + gh issue create \ + --title "Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')" \ + --body "The release workflow failed. See the full run for details: ${DETAILS_URL}" \ + --label "kind/bug,release-failure" diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 914e9d57..87354b57 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,38 +1,39 @@ -name: Mark stale issues and pull requests +name: 'Mark stale issues and pull requests' # Run as a daily cron at 1:30 AM on: schedule: - cron: '30 1 * * *' - workflow_dispatch: {} + workflow_dispatch: jobs: stale: - runs-on: ubuntu-latest - if: ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} permissions: - issues: write - pull-requests: write + issues: 'write' + pull-requests: 'write' concurrency: - group: ${{ github.workflow }}-stale + group: '${{ github.workflow }}-stale' cancel-in-progress: true steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 + - uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9 with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: > + repo-token: '${{ secrets.GITHUB_TOKEN }}' + stale-issue-message: >- This issue has been automatically marked as stale due to 60 days of inactivity. It will be closed in 14 days if no further activity occurs. - stale-pr-message: > + stale-pr-message: >- This pull request has been automatically marked as stale due to 60 days of inactivity. It will be closed in 14 days if no further activity occurs. - close-issue-message: > + close-issue-message: >- This issue has been closed due to 14 additional days of inactivity after being marked as stale. If you believe this is still relevant, feel free to comment or reopen the issue. Thank you! - close-pr-message: > + close-pr-message: >- This pull request has been closed due to 14 additional days of inactivity after being marked as stale. If this is still relevant, you are welcome to reopen or leave a comment. Thanks for contributing! days-before-stale: 60 days-before-close: 14 - exempt-issue-labels: pinned,security - exempt-pr-labels: pinned,security + exempt-issue-labels: 'pinned,security' + exempt-pr-labels: 'pinned,security' diff --git a/.vscode/launch.json b/.vscode/launch.json index 496e7233..72d16ce1 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -7,22 +7,9 @@ { "type": "node", "request": "launch", - "name": "Launch CLI", + "name": "Build & Launch CLI", "runtimeExecutable": "npm", - "runtimeArgs": ["run", "start"], - "skipFiles": ["/**"], - "cwd": "${workspaceFolder}", - "console": "integratedTerminal", - "env": { - "GEMINI_SANDBOX": "false" - } - }, - { - "type": "node", - "request": "launch", - "name": "Launch E2E", - "program": "${workspaceFolder}/integration-tests/run-tests.js", - "args": ["--verbose", "--keep-output", "list_directory"], + "runtimeArgs": ["run", "build-and-start"], "skipFiles": ["/**"], "cwd": "${workspaceFolder}", "console": "integratedTerminal", diff --git a/.yamllint.yml b/.yamllint.yml new file mode 100644 index 00000000..b4612e07 --- /dev/null +++ b/.yamllint.yml @@ -0,0 +1,88 @@ +rules: + anchors: + forbid-duplicated-anchors: true + forbid-undeclared-aliases: true + forbid-unused-anchors: true + + braces: + forbid: 'non-empty' + min-spaces-inside-empty: 0 + max-spaces-inside-empty: 0 + + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: 0 + max-spaces-inside-empty: 0 + + colons: + max-spaces-before: 0 + max-spaces-after: 1 + + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + + comments: + require-starting-space: true + ignore-shebangs: true + min-spaces-from-content: 1 + + comments-indentation: 'disable' + + document-end: + present: false + + document-start: + present: false + + empty-lines: + max: 2 + max-start: 0 + max-end: 1 + + empty-values: + forbid-in-block-mappings: false + forbid-in-flow-mappings: true + + float-values: + forbid-inf: false + forbid-nan: false + forbid-scientific-notation: false + require-numeral-before-decimal: false + + hyphens: + max-spaces-after: 1 + + indentation: + spaces: 2 + indent-sequences: true + check-multi-line-strings: false + + key-duplicates: {} + + new-line-at-end-of-file: {} + + new-lines: + type: 'unix' + + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: false + + quoted-strings: + quote-type: 'single' + required: true + allow-quoted-quotes: true + + trailing-spaces: {} + + truthy: + allowed-values: ['true', 'false', 'on'] # GitHub Actions uses "on" + check-keys: true + +ignore: + - 'thirdparty/' + - 'third_party/' + - 'vendor/' diff --git a/QWEN.md b/QWEN.md index 74185b4b..82f69c8a 100644 --- a/QWEN.md +++ b/QWEN.md @@ -97,17 +97,17 @@ TypeScript's power lies in its ability to provide static type checking, catching - **Preferring `unknown` over `any`**: When you absolutely cannot determine the type of a value at compile time, and you're tempted to reach for any, consider using unknown instead. unknown is a type-safe counterpart to any. While a variable of type unknown can hold any value, you must perform type narrowing (e.g., using typeof or instanceof checks, or a type assertion) before you can perform any operations on it. This forces you to handle the unknown type explicitly, preventing accidental runtime errors. - ``` + ```ts function processValue(value: unknown) { - if (typeof value === 'string') { - // value is now safely a string - console.log(value.toUpperCase()); - } else if (typeof value === 'number') { - // value is now safely a number - console.log(value * 2); - } - // Without narrowing, you cannot access properties or methods on 'value' - // console.log(value.someProperty); // Error: Object is of type 'unknown'. + if (typeof value === 'string') { + // value is now safely a string + console.log(value.toUpperCase()); + } else if (typeof value === 'number') { + // value is now safely a number + console.log(value * 2); + } + // Without narrowing, you cannot access properties or methods on 'value' + // console.log(value.someProperty); // Error: Object is of type 'unknown'. } ``` @@ -115,6 +115,14 @@ TypeScript's power lies in its ability to provide static type checking, catching - **Bypassing Type Checking**: Like `any`, type assertions bypass TypeScript's safety checks. If your assertion is incorrect, you introduce a runtime error that TypeScript would not have warned you about. - **Code Smell in Testing**: A common scenario where `any` or type assertions might be tempting is when trying to test "private" implementation details (e.g., spying on or stubbing an unexported function within a module). This is a strong indication of a "code smell" in your testing strategy and potentially your code structure. Instead of trying to force access to private internals, consider whether those internal details should be refactored into a separate module with a well-defined public API. This makes them inherently testable without compromising encapsulation. +### Type narrowing `switch` clauses + +Use the `checkExhaustive` helper in the default clause of a switch statement. +This will ensure that all of the possible options within the value or +enumeration are used. + +This helper method can be found in `packages/cli/src/utils/checks.ts` + ### Embracing JavaScript's Array Operators To further enhance code cleanliness and promote safe functional programming practices, leverage JavaScript's rich set of array operators as much as possible. Methods like `.map()`, `.filter()`, `.reduce()`, `.slice()`, `.sort()`, and others are incredibly powerful for transforming and manipulating data collections in an immutable and declarative way. diff --git a/docs/cli/configuration.md b/docs/cli/configuration.md index 18932ba8..5e8c9002 100644 --- a/docs/cli/configuration.md +++ b/docs/cli/configuration.md @@ -432,6 +432,13 @@ Arguments passed directly when running the CLI can override other configurations - Displays the current memory usage. - **`--yolo`**: - Enables YOLO mode, which automatically approves all tool calls. +- **`--approval-mode `**: + - Sets the approval mode for tool calls. Available modes: + - `default`: Prompt for approval on each tool call (default behavior) + - `auto_edit`: Automatically approve edit tools (replace, write_file) while prompting for others + - `yolo`: Automatically approve all tool calls (equivalent to `--yolo`) + - Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach. + - Example: `gemini --approval-mode auto_edit` - **`--telemetry`**: - Enables [telemetry](../telemetry.md). - **`--telemetry-target`**: @@ -532,7 +539,7 @@ Sandboxing is disabled by default, but you can enable it in a few ways: - Using `--sandbox` or `-s` flag. - Setting `GEMINI_SANDBOX` environment variable. -- Sandbox is enabled in `--yolo` mode by default. +- Sandbox is enabled when using `--yolo` or `--approval-mode=yolo` by default. By default, it uses a pre-built `qwen-code-sandbox` Docker image. diff --git a/docs/ide-integration.md b/docs/ide-integration.md new file mode 100644 index 00000000..a0bd4976 --- /dev/null +++ b/docs/ide-integration.md @@ -0,0 +1,141 @@ +# IDE Integration + +Gemini CLI can integrate with your IDE to provide a more seamless and context-aware experience. This integration allows the CLI to understand your workspace better and enables powerful features like native in-editor diffing. + +Currently, the only supported IDE is [Visual Studio Code](https://code.visualstudio.com/) and other editors that support VS Code extensions. + +## Features + +- **Workspace Context:** The CLI automatically gains awareness of your workspace to provide more relevant and accurate responses. This context includes: + - The **10 most recently accessed files** in your workspace. + - Your active cursor position. + - Any text you have selected (up to a 16KB limit; longer selections will be truncated). + +- **Native Diffing:** When Gemini suggests code modifications, you can view the changes directly within your IDE's native diff viewer. This allows you to review, edit, and accept or reject the suggested changes seamlessly. + +- **VS Code Commands:** You can access Gemini CLI features directly from the VS Code Command Palette (`Cmd+Shift+P` or `Ctrl+Shift+P`): + - `Gemini CLI: Run`: Starts a new Gemini CLI session in the integrated terminal. + - `Gemini CLI: Accept Diff`: Accepts the changes in the active diff editor. + - `Gemini CLI: Close Diff Editor`: Rejects the changes and closes the active diff editor. + - `Gemini CLI: View Third-Party Notices`: Displays the third-party notices for the extension. + +## Installation and Setup + +There are three ways to set up the IDE integration: + +### 1. Automatic Nudge (Recommended) + +When you run Gemini CLI inside a supported editor, it will automatically detect your environment and prompt you to connect. Answering "Yes" will automatically run the necessary setup, which includes installing the companion extension and enabling the connection. + +### 2. Manual Installation from CLI + +If you previously dismissed the prompt or want to install the extension manually, you can run the following command inside Gemini CLI: + +``` +/ide install +``` + +This will find the correct extension for your IDE and install it. + +### 3. Manual Installation from a Marketplace + +You can also install the extension directly from a marketplace. + +- **For Visual Studio Code:** Install from the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=google.gemini-cli-vscode-ide-companion). +- **For VS Code Forks:** To support forks of VS Code, the extension is also published on the [Open VSX Registry](https://open-vsx.org/extension/google/gemini-cli-vscode-ide-companion). Follow your editor's instructions for installing extensions from this registry. + +After any installation method, it's recommended to open a new terminal window to ensure the integration is activated correctly. Once installed, you can use `/ide enable` to connect. + +## Usage + +### Enabling and Disabling + +You can control the IDE integration from within the CLI: + +- To enable the connection to the IDE, run: + ``` + /ide enable + ``` +- To disable the connection, run: + ``` + /ide disable + ``` + +When enabled, Gemini CLI will automatically attempt to connect to the IDE companion extension. + +### Checking the Status + +To check the connection status and see the context the CLI has received from the IDE, run: + +``` +/ide status +``` + +If connected, this command will show the IDE it's connected to and a list of recently opened files it is aware of. + +(Note: The file list is limited to 10 recently accessed files within your workspace and only includes local files on disk.) + +### Working with Diffs + +When you ask Gemini to modify a file, it can open a diff view directly in your editor. + +**To accept a diff**, you can perform any of the following actions: + +- Click the **checkmark icon** in the diff editor's title bar. +- Save the file (e.g., with `Cmd+S` or `Ctrl+S`). +- Open the Command Palette and run **Gemini CLI: Accept Diff**. +- Respond with `yes` in the CLI when prompted. + +**To reject a diff**, you can: + +- Click the **'x' icon** in the diff editor's title bar. +- Close the diff editor tab. +- Open the Command Palette and run **Gemini CLI: Close Diff Editor**. +- Respond with `no` in the CLI when prompted. + +You can also **modify the suggested changes** directly in the diff view before accepting them. + +If you select β€˜Yes, allow always’ in the CLI, changes will no longer show up in the IDE as they will be auto-accepted. + +## Using with Sandboxing + +If you are using Gemini CLI within a sandbox, please be aware of the following: + +- **On macOS:** The IDE integration requires network access to communicate with the IDE companion extension. You must use a Seatbelt profile that allows network access. +- **In a Docker Container:** If you run Gemini CLI inside a Docker (or Podman) container, the IDE integration can still connect to the VS Code extension running on your host machine. The CLI is configured to automatically find the IDE server on `host.docker.internal`. No special configuration is usually required, but you may need to ensure your Docker networking setup allows connections from the container to the host. + +## Troubleshooting + +If you encounter issues with IDE integration, here are some common error messages and how to resolve them. + +### Connection Errors + +- **Message:** `πŸ”΄ Disconnected: Failed to connect to IDE companion extension for [IDE Name]. Please ensure the extension is running and try restarting your terminal. To install the extension, run /ide install.` + - **Cause:** Gemini CLI could not find the necessary environment variables (`GEMINI_CLI_IDE_WORKSPACE_PATH` or `GEMINI_CLI_IDE_SERVER_PORT`) to connect to the IDE. This usually means the IDE companion extension is not running or did not initialize correctly. + - **Solution:** + 1. Make sure you have installed the **Gemini CLI Companion** extension in your IDE and that it is enabled. + 2. Open a new terminal window in your IDE to ensure it picks up the correct environment. + +- **Message:** `πŸ”΄ Disconnected: IDE connection error. The connection was lost unexpectedly. Please try reconnecting by running /ide enable` + - **Cause:** The connection to the IDE companion was lost. + - **Solution:** Run `/ide enable` to try and reconnect. If the issue continues, open a new terminal window or restart your IDE. + +### Configuration Errors + +- **Message:** `πŸ”΄ Disconnected: Directory mismatch. Gemini CLI is running in a different location than the open workspace in [IDE Name]. Please run the CLI from the same directory as your project's root folder.` + - **Cause:** The CLI's current working directory is outside the folder or workspace you have open in your IDE. + - **Solution:** `cd` into the same directory that is open in your IDE and restart the CLI. + +- **Message:** `πŸ”΄ Disconnected: To use this feature, please open a single workspace folder in [IDE Name] and try again.` + - **Cause:** You have multiple workspace folders open in your IDE, or no folder is open at all. The IDE integration requires a single root workspace folder to operate correctly. + - **Solution:** Open a single project folder in your IDE and restart the CLI. + +### General Errors + +- **Message:** `IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: [List of IDEs]` + - **Cause:** You are running Gemini CLI in a terminal or environment that is not a supported IDE. + - **Solution:** Run Gemini CLI from the integrated terminal of a supported IDE, like VS Code. + +- **Message:** `No installer is available for [IDE Name]. Please install the IDE companion manually from its marketplace.` + - **Cause:** You ran `/ide install`, but the CLI does not have an automated installer for your specific IDE. + - **Solution:** Open your IDE's extension marketplace, search for "Gemini CLI Companion", and install it manually. diff --git a/docs/index.md b/docs/index.md index 03710f76..9c405645 100644 --- a/docs/index.md +++ b/docs/index.md @@ -18,6 +18,7 @@ This documentation is organized into the following sections: - **[Configuration](./cli/configuration.md):** Information on configuring the CLI. - **[Checkpointing](./checkpointing.md):** Documentation for the checkpointing feature. - **[Extensions](./extension.md):** How to extend the CLI with new functionality. + - **[IDE Integration](./ide-integration.md):** Connect the CLI to your editor. - **[Telemetry](./telemetry.md):** Overview of telemetry in the CLI. - **Core Details:** Documentation for `packages/core`. - **[Core Introduction](./core/index.md):** Overview of the core component. diff --git a/docs/integration-tests.md b/docs/integration-tests.md index cb611a4c..90e65a9a 100644 --- a/docs/integration-tests.md +++ b/docs/integration-tests.md @@ -67,13 +67,9 @@ The integration test runner provides several options for diagnostics to help tra You can preserve the temporary files created during a test run for inspection. This is useful for debugging issues with file system operations. -To keep the test output, you can either use the `--keep-output` flag or set the `KEEP_OUTPUT` environment variable to `true`. +To keep the test output set the `KEEP_OUTPUT` environment variable to `true`. ```bash -# Using the flag -npm run test:integration:sandbox:none -- --keep-output - -# Using the environment variable KEEP_OUTPUT=true npm run test:integration:sandbox:none ``` @@ -81,20 +77,20 @@ When output is kept, the test runner will print the path to the unique directory ### Verbose output -For more detailed debugging, the `--verbose` flag streams the real-time output from the `qwen` command to the console. +For more detailed debugging, set the `VERBOSE` environment variable to `true`. ```bash -npm run test:integration:sandbox:none -- --verbose +VERBOSE=true npm run test:integration:sandbox:none ``` -When using `--verbose` and `--keep-output` in the same command, the output is streamed to the console and also saved to a log file within the test's temporary directory. +When using `VERBOSE=true` and `KEEP_OUTPUT=true` in the same command, the output is streamed to the console and also saved to a log file within the test's temporary directory. The verbose output is formatted to clearly identify the source of the logs: ``` ---- TEST: : --- -... output from the qwen command ... ---- END TEST: : --- +--- TEST: : --- +... output from the gemini command ... +--- END TEST: : --- ``` ## Linting and formatting diff --git a/docs/npm.md b/docs/npm.md index f32c50bc..43332193 100644 --- a/docs/npm.md +++ b/docs/npm.md @@ -58,7 +58,7 @@ To install the latest nightly build, use the `@nightly` tag: npm install -g @qwen-code/qwen-code@nightly ``` -We also run a Google cloud build called [release-docker.yml](../.gcp/release-docker.yaml). Which publishes the sandbox docker to match your release. This will also be moved to GH and combined with the main release file once service account permissions are sorted out. +We also run a Google cloud build called [release-docker.yml](../.gcp/release-docker.yml). Which publishes the sandbox docker to match your release. This will also be moved to GH and combined with the main release file once service account permissions are sorted out. ### After the Release diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 067ff0b5..e9252dba 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -19,6 +19,11 @@ This guide provides solutions to common issues and debugging tips, including top [Google AI Studio](http://aistudio.google.com/app/apikey), which also includes a separate free tier. +- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY` or `unable to get local issuer certificate`** + - **Cause:** You may be on a corporate network with a firewall that intercepts and inspects SSL/TLS traffic. This often requires a custom root CA certificate to be trusted by Node.js. + - **Solution:** Set the `NODE_EXTRA_CA_CERTS` environment variable to the absolute path of your corporate root CA certificate file. + - Example: `export NODE_EXTRA_CA_CERTS=/path/to/your/corporate-ca.crt` + ## Frequently asked questions (FAQs) - **Q: How do I update Qwen Code to the latest version?** diff --git a/eslint.config.js b/eslint.config.js index e123329e..4c78455b 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -117,7 +117,12 @@ export default tseslint.config( 'import/no-internal-modules': [ 'error', { - allow: ['react-dom/test-utils', 'memfs/lib/volume.js', 'yargs/**'], + allow: [ + 'react-dom/test-utils', + 'memfs/lib/volume.js', + 'yargs/**', + 'msw/node', + ], }, ], 'import/no-relative-packages': 'error', diff --git a/integration-tests/file-system.test.ts b/integration-tests/file-system.test.ts index d43f047f..5a7028e0 100644 --- a/integration-tests/file-system.test.ts +++ b/integration-tests/file-system.test.ts @@ -4,86 +4,90 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { strict as assert } from 'assert'; -import { test } from 'node:test'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to read a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to read a file'); - rig.createFile('test.txt', 'hello world'); +describe('file-system', () => { + it('should be able to read a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to read a file'); + rig.createFile('test.txt', 'hello world'); - const result = await rig.run( - `read the file test.txt and show me its contents`, - ); + const result = await rig.run( + `read the file test.txt and show me its contents`, + ); - const foundToolCall = await rig.waitForToolCall('read_file'); + const foundToolCall = await rig.waitForToolCall('read_file'); - // Add debugging information - if (!foundToolCall || !result.includes('hello world')) { - printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains hello world': result.includes('hello world'), - }); - } + // Add debugging information + if (!foundToolCall || !result.includes('hello world')) { + printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains hello world': result.includes('hello world'), + }); + } - assert.ok(foundToolCall, 'Expected to find a read_file tool call'); + expect( + foundToolCall, + 'Expected to find a read_file tool call', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'hello world', 'File read test'); -}); - -test('should be able to write a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to write a file'); - rig.createFile('test.txt', ''); - - const result = await rig.run(`edit test.txt to have a hello world message`); - - // Accept multiple valid tools for editing files - const foundToolCall = await rig.waitForAnyToolCall([ - 'write_file', - 'edit', - 'replace', - ]); - - // Add debugging information - if (!foundToolCall) { - printDebugInfo(rig, result); - } - - assert.ok( - foundToolCall, - 'Expected to find a write_file, edit, or replace tool call', - ); - - // Validate model output - will throw if no output - validateModelOutput(result, null, 'File write test'); - - const fileContent = rig.readFile('test.txt'); - - // Add debugging for file content - if (!fileContent.toLowerCase().includes('hello')) { - const writeCalls = rig - .readToolLogs() - .filter((t) => t.toolRequest.name === 'write_file') - .map((t) => t.toolRequest.args); - - printDebugInfo(rig, result, { - 'File content mismatch': true, - 'Expected to contain': 'hello', - 'Actual content': fileContent, - 'Write tool calls': JSON.stringify(writeCalls), - }); - } - - assert.ok( - fileContent.toLowerCase().includes('hello'), - 'Expected file to contain hello', - ); - - // Log success info if verbose - if (process.env.VERBOSE === 'true') { - console.log('File written successfully with hello message.'); - } + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'hello world', 'File read test'); + }); + + it('should be able to write a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to write a file'); + rig.createFile('test.txt', ''); + + const result = await rig.run(`edit test.txt to have a hello world message`); + + // Accept multiple valid tools for editing files + const foundToolCall = await rig.waitForAnyToolCall([ + 'write_file', + 'edit', + 'replace', + ]); + + // Add debugging information + if (!foundToolCall) { + printDebugInfo(rig, result); + } + + expect( + foundToolCall, + 'Expected to find a write_file, edit, or replace tool call', + ).toBeTruthy(); + + // Validate model output - will throw if no output + validateModelOutput(result, null, 'File write test'); + + const fileContent = rig.readFile('test.txt'); + + // Add debugging for file content + if (!fileContent.toLowerCase().includes('hello')) { + const writeCalls = rig + .readToolLogs() + .filter((t) => t.toolRequest.name === 'write_file') + .map((t) => t.toolRequest.args); + + printDebugInfo(rig, result, { + 'File content mismatch': true, + 'Expected to contain': 'hello', + 'Actual content': fileContent, + 'Write tool calls': JSON.stringify(writeCalls), + }); + } + + expect( + fileContent.toLowerCase().includes('hello'), + 'Expected file to contain hello', + ).toBeTruthy(); + + // Log success info if verbose + if (process.env.VERBOSE === 'true') { + console.log('File written successfully with hello message.'); + } + }); }); diff --git a/integration-tests/globalSetup.ts b/integration-tests/globalSetup.ts new file mode 100644 index 00000000..89ca203f --- /dev/null +++ b/integration-tests/globalSetup.ts @@ -0,0 +1,55 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { mkdir, readdir, rm } from 'fs/promises'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const rootDir = join(__dirname, '..'); +const integrationTestsDir = join(rootDir, '.integration-tests'); +let runDir = ''; // Make runDir accessible in teardown + +export async function setup() { + runDir = join(integrationTestsDir, `${Date.now()}`); + await mkdir(runDir, { recursive: true }); + + // Clean up old test runs, but keep the latest few for debugging + try { + const testRuns = await readdir(integrationTestsDir); + if (testRuns.length > 5) { + const oldRuns = testRuns.sort().slice(0, testRuns.length - 5); + await Promise.all( + oldRuns.map((oldRun) => + rm(join(integrationTestsDir, oldRun), { + recursive: true, + force: true, + }), + ), + ); + } + } catch (e) { + console.error('Error cleaning up old test runs:', e); + } + + process.env.INTEGRATION_TEST_FILE_DIR = runDir; + process.env.GEMINI_CLI_INTEGRATION_TEST = 'true'; + process.env.TELEMETRY_LOG_FILE = join(runDir, 'telemetry.log'); + + if (process.env.KEEP_OUTPUT) { + console.log(`Keeping output for test run in: ${runDir}`); + } + process.env.VERBOSE = process.env.VERBOSE ?? 'false'; + + console.log(`\nIntegration test output directory: ${runDir}`); +} + +export async function teardown() { + // Cleanup the test run directory unless KEEP_OUTPUT is set + if (process.env.KEEP_OUTPUT !== 'true' && runDir) { + await rm(runDir, { recursive: true, force: true }); + } +} diff --git a/integration-tests/list_directory.test.ts b/integration-tests/list_directory.test.ts index 023eca12..38416f4f 100644 --- a/integration-tests/list_directory.test.ts +++ b/integration-tests/list_directory.test.ts @@ -4,59 +4,63 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; import { existsSync } from 'fs'; import { join } from 'path'; -test('should be able to list a directory', async () => { - const rig = new TestRig(); - await rig.setup('should be able to list a directory'); - rig.createFile('file1.txt', 'file 1 content'); - rig.mkdir('subdir'); - rig.sync(); +describe('list_directory', () => { + it('should be able to list a directory', async () => { + const rig = new TestRig(); + await rig.setup('should be able to list a directory'); + rig.createFile('file1.txt', 'file 1 content'); + rig.mkdir('subdir'); + rig.sync(); - // Poll for filesystem changes to propagate in containers - await rig.poll( - () => { - // Check if the files exist in the test directory - const file1Path = join(rig.testDir!, 'file1.txt'); - const subdirPath = join(rig.testDir!, 'subdir'); - return existsSync(file1Path) && existsSync(subdirPath); - }, - 1000, // 1 second max wait - 50, // check every 50ms - ); - - const prompt = `Can you list the files in the current directory. Display them in the style of 'ls'`; - - const result = await rig.run(prompt); - - const foundToolCall = await rig.waitForToolCall('list_directory'); - - // Add debugging information - if ( - !foundToolCall || - !result.includes('file1.txt') || - !result.includes('subdir') - ) { - const allTools = printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains file1.txt': result.includes('file1.txt'), - 'Contains subdir': result.includes('subdir'), - }); - - console.error( - 'List directory calls:', - allTools - .filter((t) => t.toolRequest.name === 'list_directory') - .map((t) => t.toolRequest.args), + // Poll for filesystem changes to propagate in containers + await rig.poll( + () => { + // Check if the files exist in the test directory + const file1Path = join(rig.testDir!, 'file1.txt'); + const subdirPath = join(rig.testDir!, 'subdir'); + return existsSync(file1Path) && existsSync(subdirPath); + }, + 1000, // 1 second max wait + 50, // check every 50ms ); - } - assert.ok(foundToolCall, 'Expected to find a list_directory tool call'); + const prompt = `Can you list the files in the current directory. Display them in the style of 'ls'`; - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, ['file1.txt', 'subdir'], 'List directory test'); + const result = await rig.run(prompt); + + const foundToolCall = await rig.waitForToolCall('list_directory'); + + // Add debugging information + if ( + !foundToolCall || + !result.includes('file1.txt') || + !result.includes('subdir') + ) { + const allTools = printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains file1.txt': result.includes('file1.txt'), + 'Contains subdir': result.includes('subdir'), + }); + + console.error( + 'List directory calls:', + allTools + .filter((t) => t.toolRequest.name === 'list_directory') + .map((t) => t.toolRequest.args), + ); + } + + expect( + foundToolCall, + 'Expected to find a list_directory tool call', + ).toBeTruthy(); + + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, ['file1.txt', 'subdir'], 'List directory test'); + }); }); diff --git a/integration-tests/mcp_server_cyclic_schema.test.js b/integration-tests/mcp_server_cyclic_schema.test.ts similarity index 92% rename from integration-tests/mcp_server_cyclic_schema.test.js rename to integration-tests/mcp_server_cyclic_schema.test.ts index 1ace98f1..18c1bcde 100644 --- a/integration-tests/mcp_server_cyclic_schema.test.js +++ b/integration-tests/mcp_server_cyclic_schema.test.ts @@ -9,15 +9,11 @@ * and then detect and warn about the potential tools that caused the error. */ -import { test, describe, before } from 'node:test'; -import { strict as assert } from 'node:assert'; +import { describe, it, beforeAll, expect } from 'vitest'; import { TestRig } from './test-helper.js'; import { join } from 'path'; -import { fileURLToPath } from 'url'; import { writeFileSync } from 'fs'; -const __dirname = fileURLToPath(new URL('.', import.meta.url)); - // Create a minimal MCP server that doesn't require external dependencies // This implements the MCP protocol directly using Node.js built-ins const serverScript = `#!/usr/bin/env node @@ -160,7 +156,7 @@ rpc.send({ describe('mcp server with cyclic tool schema is detected', () => { const rig = new TestRig(); - before(async () => { + beforeAll(async () => { // Setup test directory with MCP server configuration await rig.setup('cyclic-schema-mcp-server', { settings: { @@ -174,7 +170,7 @@ describe('mcp server with cyclic tool schema is detected', () => { }); // Create server script in the test directory - const testServerPath = join(rig.testDir, 'mcp-server.cjs'); + const testServerPath = join(rig.testDir!, 'mcp-server.cjs'); writeFileSync(testServerPath, serverScript); // Make the script executable (though running with 'node' should work anyway) @@ -184,15 +180,14 @@ describe('mcp server with cyclic tool schema is detected', () => { } }); - test('should error and suggest disabling the cyclic tool', async () => { + it('should error and suggest disabling the cyclic tool', async () => { // Just run any command to trigger the schema depth error. // If this test starts failing, check `isSchemaDepthError` from // geminiChat.ts to see if it needs to be updated. // Or, possibly it could mean that gemini has fixed the issue. const output = await rig.run('hello'); - assert.match( - output, + expect(output).toMatch( /Skipping tool 'tool_with_cyclic_schema' from MCP server 'cyclic-schema-server' because it has missing types in its parameter schema/, ); }); diff --git a/integration-tests/read_many_files.test.ts b/integration-tests/read_many_files.test.ts index 74d2f358..8e839a6a 100644 --- a/integration-tests/read_many_files.test.ts +++ b/integration-tests/read_many_files.test.ts @@ -4,47 +4,48 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to read multiple files', async () => { - const rig = new TestRig(); - await rig.setup('should be able to read multiple files'); - rig.createFile('file1.txt', 'file 1 content'); - rig.createFile('file2.txt', 'file 2 content'); +describe('read_many_files', () => { + it('should be able to read multiple files', async () => { + const rig = new TestRig(); + await rig.setup('should be able to read multiple files'); + rig.createFile('file1.txt', 'file 1 content'); + rig.createFile('file2.txt', 'file 2 content'); - const prompt = `Please use read_many_files to read file1.txt and file2.txt and show me what's in them`; + const prompt = `Please use read_many_files to read file1.txt and file2.txt and show me what's in them`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - // Check for either read_many_files or multiple read_file calls - const allTools = rig.readToolLogs(); - const readManyFilesCall = await rig.waitForToolCall('read_many_files'); - const readFileCalls = allTools.filter( - (t) => t.toolRequest.name === 'read_file', - ); + // Check for either read_many_files or multiple read_file calls + const allTools = rig.readToolLogs(); + const readManyFilesCall = await rig.waitForToolCall('read_many_files'); + const readFileCalls = allTools.filter( + (t) => t.toolRequest.name === 'read_file', + ); - // Accept either read_many_files OR at least 2 read_file calls - const foundValidPattern = readManyFilesCall || readFileCalls.length >= 2; + // Accept either read_many_files OR at least 2 read_file calls + const foundValidPattern = readManyFilesCall || readFileCalls.length >= 2; - // Add debugging information - if (!foundValidPattern) { - printDebugInfo(rig, result, { - 'read_many_files called': readManyFilesCall, - 'read_file calls': readFileCalls.length, - }); - } + // Add debugging information + if (!foundValidPattern) { + printDebugInfo(rig, result, { + 'read_many_files called': readManyFilesCall, + 'read_file calls': readFileCalls.length, + }); + } - assert.ok( - foundValidPattern, - 'Expected to find either read_many_files or multiple read_file tool calls', - ); + expect( + foundValidPattern, + 'Expected to find either read_many_files or multiple read_file tool calls', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput( - result, - ['file 1 content', 'file 2 content'], - 'Read many files test', - ); + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput( + result, + ['file 1 content', 'file 2 content'], + 'Read many files test', + ); + }); }); diff --git a/integration-tests/replace.test.ts b/integration-tests/replace.test.ts index 1ac6f5a4..3a2d979b 100644 --- a/integration-tests/replace.test.ts +++ b/integration-tests/replace.test.ts @@ -4,63 +4,60 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to replace content in a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to replace content in a file'); +describe('replace', () => { + it('should be able to replace content in a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to replace content in a file'); - const fileName = 'file_to_replace.txt'; - const originalContent = 'original content'; - const expectedContent = 'replaced content'; + const fileName = 'file_to_replace.txt'; + const originalContent = 'original content'; + const expectedContent = 'replaced content'; - rig.createFile(fileName, originalContent); - const prompt = `Can you replace 'original' with 'replaced' in the file 'file_to_replace.txt'`; + rig.createFile(fileName, originalContent); + const prompt = `Can you replace 'original' with 'replaced' in the file 'file_to_replace.txt'`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('replace'); + const foundToolCall = await rig.waitForToolCall('replace'); - // Add debugging information - if (!foundToolCall) { - printDebugInfo(rig, result); - } + // Add debugging information + if (!foundToolCall) { + printDebugInfo(rig, result); + } - assert.ok(foundToolCall, 'Expected to find a replace tool call'); + expect(foundToolCall, 'Expected to find a replace tool call').toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput( - result, - ['replaced', 'file_to_replace.txt'], - 'Replace content test', - ); - - const newFileContent = rig.readFile(fileName); - - // Add debugging for file content - if (newFileContent !== expectedContent) { - console.error('File content mismatch - Debug info:'); - console.error('Expected:', expectedContent); - console.error('Actual:', newFileContent); - console.error( - 'Tool calls:', - rig.readToolLogs().map((t) => ({ - name: t.toolRequest.name, - args: t.toolRequest.args, - })), + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput( + result, + ['replaced', 'file_to_replace.txt'], + 'Replace content test', ); - } - assert.strictEqual( - newFileContent, - expectedContent, - 'File content should be updated correctly', - ); + const newFileContent = rig.readFile(fileName); - // Log success info if verbose - if (process.env.VERBOSE === 'true') { - console.log('File replaced successfully. New content:', newFileContent); - } + // Add debugging for file content + if (newFileContent !== expectedContent) { + console.error('File content mismatch - Debug info:'); + console.error('Expected:', expectedContent); + console.error('Actual:', newFileContent); + console.error( + 'Tool calls:', + rig.readToolLogs().map((t) => ({ + name: t.toolRequest.name, + args: t.toolRequest.args, + })), + ); + } + + expect(newFileContent).toBe(expectedContent); + + // Log success info if verbose + if (process.env.VERBOSE === 'true') { + console.log('File replaced successfully. New content:', newFileContent); + } + }); }); diff --git a/integration-tests/run-tests.js b/integration-tests/run-tests.js deleted file mode 100644 index b33e1afa..00000000 --- a/integration-tests/run-tests.js +++ /dev/null @@ -1,182 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import { spawnSync, spawn } from 'child_process'; -import { mkdirSync, rmSync, createWriteStream } from 'fs'; -import { join, dirname, basename } from 'path'; -import { fileURLToPath } from 'url'; -import { glob } from 'glob'; - -async function main() { - const __dirname = dirname(fileURLToPath(import.meta.url)); - const rootDir = join(__dirname, '..'); - const integrationTestsDir = join(rootDir, '.integration-tests'); - - if (process.env.GEMINI_SANDBOX === 'docker' && !process.env.IS_DOCKER) { - console.log('Building sandbox for Docker...'); - const buildResult = spawnSync('npm', ['run', 'build:all'], { - stdio: 'inherit', - }); - if (buildResult.status !== 0) { - console.error('Sandbox build failed.'); - process.exit(1); - } - } - - const runId = `${Date.now()}`; - const runDir = join(integrationTestsDir, runId); - - mkdirSync(runDir, { recursive: true }); - - const args = process.argv.slice(2); - const keepOutput = - process.env.KEEP_OUTPUT === 'true' || args.includes('--keep-output'); - if (keepOutput) { - const keepOutputIndex = args.indexOf('--keep-output'); - if (keepOutputIndex > -1) { - args.splice(keepOutputIndex, 1); - } - console.log(`Keeping output for test run in: ${runDir}`); - } - - const verbose = args.includes('--verbose'); - if (verbose) { - const verboseIndex = args.indexOf('--verbose'); - if (verboseIndex > -1) { - args.splice(verboseIndex, 1); - } - } - - const testPatterns = - args.length > 0 - ? args.map((arg) => `integration-tests/${arg}.test.ts`) - : ['integration-tests/*.test.ts']; - const testFiles = glob.sync(testPatterns, { cwd: rootDir, absolute: true }); - - for (const testFile of testFiles) { - const testFileName = basename(testFile); - console.log(` Found test file: ${testFileName}`); - } - - const MAX_RETRIES = 3; - let allTestsPassed = true; - - for (const testFile of testFiles) { - const testFileName = basename(testFile); - const testFileDir = join(runDir, testFileName); - mkdirSync(testFileDir, { recursive: true }); - - console.log( - `------------- Running test file: ${testFileName} ------------------------------`, - ); - - let attempt = 0; - let testFilePassed = false; - let lastStdout = []; - let lastStderr = []; - - while (attempt < MAX_RETRIES && !testFilePassed) { - attempt++; - if (attempt > 1) { - console.log( - `--- Retrying ${testFileName} (attempt ${attempt} of ${MAX_RETRIES}) ---`, - ); - } - - const nodeArgs = ['--test']; - if (verbose) { - nodeArgs.push('--test-reporter=spec'); - } - nodeArgs.push(testFile); - - const child = spawn('npx', ['tsx', ...nodeArgs], { - stdio: 'pipe', - env: { - ...process.env, - GEMINI_CLI_INTEGRATION_TEST: 'true', - INTEGRATION_TEST_FILE_DIR: testFileDir, - KEEP_OUTPUT: keepOutput.toString(), - VERBOSE: verbose.toString(), - TEST_FILE_NAME: testFileName, - TELEMETRY_LOG_FILE: join(testFileDir, 'telemetry.log'), - }, - }); - - let outputStream; - if (keepOutput) { - const outputFile = join(testFileDir, `output-attempt-${attempt}.log`); - outputStream = createWriteStream(outputFile); - console.log(`Output for ${testFileName} written to: ${outputFile}`); - } - - const stdout = []; - const stderr = []; - - child.stdout.on('data', (data) => { - if (verbose) { - process.stdout.write(data); - } else { - stdout.push(data); - } - if (outputStream) { - outputStream.write(data); - } - }); - - child.stderr.on('data', (data) => { - if (verbose) { - process.stderr.write(data); - } else { - stderr.push(data); - } - if (outputStream) { - outputStream.write(data); - } - }); - - const exitCode = await new Promise((resolve) => { - child.on('close', (code) => { - if (outputStream) { - outputStream.end(() => { - resolve(code); - }); - } else { - resolve(code); - } - }); - }); - - if (exitCode === 0) { - testFilePassed = true; - } else { - lastStdout = stdout; - lastStderr = stderr; - } - } - - if (!testFilePassed) { - console.error( - `Test file failed after ${MAX_RETRIES} attempts: ${testFileName}`, - ); - if (!verbose) { - process.stdout.write(Buffer.concat(lastStdout).toString('utf8')); - process.stderr.write(Buffer.concat(lastStderr).toString('utf8')); - } - allTestsPassed = false; - } - } - - if (!keepOutput) { - rmSync(runDir, { recursive: true, force: true }); - } - - if (!allTestsPassed) { - console.error('One or more test files failed.'); - process.exit(1); - } -} - -main(); diff --git a/integration-tests/run_shell_command.test.ts b/integration-tests/run_shell_command.test.ts index 2a5f9ed4..a1aa08ae 100644 --- a/integration-tests/run_shell_command.test.ts +++ b/integration-tests/run_shell_command.test.ts @@ -4,60 +4,67 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to run a shell command', async () => { - const rig = new TestRig(); - await rig.setup('should be able to run a shell command'); +describe('run_shell_command', () => { + it('should be able to run a shell command', async () => { + const rig = new TestRig(); + await rig.setup('should be able to run a shell command'); - const prompt = `Please run the command "echo hello-world" and show me the output`; + const prompt = `Please run the command "echo hello-world" and show me the output`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('run_shell_command'); + const foundToolCall = await rig.waitForToolCall('run_shell_command'); - // Add debugging information - if (!foundToolCall || !result.includes('hello-world')) { - printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains hello-world': result.includes('hello-world'), - }); - } + // Add debugging information + if (!foundToolCall || !result.includes('hello-world')) { + printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains hello-world': result.includes('hello-world'), + }); + } - assert.ok(foundToolCall, 'Expected to find a run_shell_command tool call'); + expect( + foundToolCall, + 'Expected to find a run_shell_command tool call', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - // Model often reports exit code instead of showing output - validateModelOutput( - result, - ['hello-world', 'exit code 0'], - 'Shell command test', - ); -}); - -test('should be able to run a shell command via stdin', async () => { - const rig = new TestRig(); - await rig.setup('should be able to run a shell command via stdin'); - - const prompt = `Please run the command "echo test-stdin" and show me what it outputs`; - - const result = await rig.run({ stdin: prompt }); - - const foundToolCall = await rig.waitForToolCall('run_shell_command'); - - // Add debugging information - if (!foundToolCall || !result.includes('test-stdin')) { - printDebugInfo(rig, result, { - 'Test type': 'Stdin test', - 'Found tool call': foundToolCall, - 'Contains test-stdin': result.includes('test-stdin'), - }); - } - - assert.ok(foundToolCall, 'Expected to find a run_shell_command tool call'); - - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'test-stdin', 'Shell command stdin test'); + // Validate model output - will throw if no output, warn if missing expected content + // Model often reports exit code instead of showing output + validateModelOutput( + result, + ['hello-world', 'exit code 0'], + 'Shell command test', + ); + }); + + it('should be able to run a shell command via stdin', async () => { + const rig = new TestRig(); + await rig.setup('should be able to run a shell command via stdin'); + + const prompt = `Please run the command "echo test-stdin" and show me what it outputs`; + + const result = await rig.run({ stdin: prompt }); + + const foundToolCall = await rig.waitForToolCall('run_shell_command'); + + // Add debugging information + if (!foundToolCall || !result.includes('test-stdin')) { + printDebugInfo(rig, result, { + 'Test type': 'Stdin test', + 'Found tool call': foundToolCall, + 'Contains test-stdin': result.includes('test-stdin'), + }); + } + + expect( + foundToolCall, + 'Expected to find a run_shell_command tool call', + ).toBeTruthy(); + + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'test-stdin', 'Shell command stdin test'); + }); }); diff --git a/integration-tests/save_memory.test.ts b/integration-tests/save_memory.test.ts index 3ec641d4..15b062e9 100644 --- a/integration-tests/save_memory.test.ts +++ b/integration-tests/save_memory.test.ts @@ -4,38 +4,42 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to save to memory', async () => { - const rig = new TestRig(); - await rig.setup('should be able to save to memory'); +describe('save_memory', () => { + it('should be able to save to memory', async () => { + const rig = new TestRig(); + await rig.setup('should be able to save to memory'); - const prompt = `remember that my favorite color is blue. + const prompt = `remember that my favorite color is blue. what is my favorite color? tell me that and surround it with $ symbol`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('save_memory'); + const foundToolCall = await rig.waitForToolCall('save_memory'); - // Add debugging information - if (!foundToolCall || !result.toLowerCase().includes('blue')) { - const allTools = printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains blue': result.toLowerCase().includes('blue'), - }); + // Add debugging information + if (!foundToolCall || !result.toLowerCase().includes('blue')) { + const allTools = printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains blue': result.toLowerCase().includes('blue'), + }); - console.error( - 'Memory tool calls:', - allTools - .filter((t) => t.toolRequest.name === 'save_memory') - .map((t) => t.toolRequest.args), - ); - } + console.error( + 'Memory tool calls:', + allTools + .filter((t) => t.toolRequest.name === 'save_memory') + .map((t) => t.toolRequest.args), + ); + } - assert.ok(foundToolCall, 'Expected to find a save_memory tool call'); + expect( + foundToolCall, + 'Expected to find a save_memory tool call', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'blue', 'Save memory test'); + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'blue', 'Save memory test'); + }); }); diff --git a/integration-tests/simple-mcp-server.test.ts b/integration-tests/simple-mcp-server.test.ts index c4191078..98c81f16 100644 --- a/integration-tests/simple-mcp-server.test.ts +++ b/integration-tests/simple-mcp-server.test.ts @@ -10,8 +10,7 @@ * external dependencies, making it compatible with Docker sandbox mode. */ -import { test, describe, before } from 'node:test'; -import { strict as assert } from 'node:assert'; +import { describe, it, beforeAll, expect } from 'vitest'; import { TestRig, validateModelOutput } from './test-helper.js'; import { join } from 'path'; import { writeFileSync } from 'fs'; @@ -168,7 +167,7 @@ rpc.send({ describe('simple-mcp-server', () => { const rig = new TestRig(); - before(async () => { + beforeAll(async () => { // Setup test directory with MCP server configuration await rig.setup('simple-mcp-server', { settings: { @@ -192,17 +191,20 @@ describe('simple-mcp-server', () => { } }); - test('should add two numbers', async () => { + it('should add two numbers', async () => { // Test directory is already set up in before hook // Just run the command - MCP server config is in settings.json const output = await rig.run('add 5 and 10'); const foundToolCall = await rig.waitForToolCall('add'); - assert.ok(foundToolCall, 'Expected to find an add tool call'); + expect(foundToolCall, 'Expected to find an add tool call').toBeTruthy(); // Validate model output - will throw if no output, fail if missing expected content validateModelOutput(output, '15', 'MCP server test'); - assert.ok(output.includes('15'), 'Expected output to contain the sum (15)'); + expect( + output.includes('15'), + 'Expected output to contain the sum (15)', + ).toBeTruthy(); }); }); diff --git a/integration-tests/test-helper.ts b/integration-tests/test-helper.ts index 2bd067b4..4f62a093 100644 --- a/integration-tests/test-helper.ts +++ b/integration-tests/test-helper.ts @@ -10,7 +10,7 @@ import { mkdirSync, writeFileSync, readFileSync } from 'fs'; import { join, dirname } from 'path'; import { fileURLToPath } from 'url'; import { env } from 'process'; -import { fileExists } from '../scripts/telemetry_utils.js'; +import fs from 'fs'; const __dirname = dirname(fileURLToPath(import.meta.url)); @@ -297,15 +297,12 @@ export class TestRig { } readFile(fileName: string) { - const content = readFileSync(join(this.testDir!, fileName), 'utf-8'); + const filePath = join(this.testDir!, fileName); + const content = readFileSync(filePath, 'utf-8'); if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') { - const testId = `${env.TEST_FILE_NAME!.replace( - '.test.js', - '', - )}:${this.testName!.replace(/ /g, '-')}`; - console.log(`--- FILE: ${testId}/${fileName} ---`); + console.log(`--- FILE: ${filePath} ---`); console.log(content); - console.log(`--- END FILE: ${testId}/${fileName} ---`); + console.log(`--- END FILE: ${filePath} ---`); } return content; } @@ -336,7 +333,7 @@ export class TestRig { // Wait for telemetry file to exist and have content await this.poll( () => { - if (!fileExists(logFilePath)) return false; + if (!fs.existsSync(logFilePath)) return false; try { const content = readFileSync(logFilePath, 'utf-8'); // Check if file has meaningful content (at least one complete JSON object) @@ -547,7 +544,7 @@ export class TestRig { // Try reading from file first const logFilePath = join(this.testDir!, 'telemetry.log'); - if (fileExists(logFilePath)) { + if (fs.existsSync(logFilePath)) { try { const content = readFileSync(logFilePath, 'utf-8'); if (content && content.includes('"event.name"')) { @@ -581,7 +578,7 @@ export class TestRig { } // Check if file exists, if not return empty array (file might not be created yet) - if (!fileExists(logFilePath)) { + if (!fs.existsSync(logFilePath)) { return []; } diff --git a/integration-tests/vitest.config.ts b/integration-tests/vitest.config.ts new file mode 100644 index 00000000..e0c6b848 --- /dev/null +++ b/integration-tests/vitest.config.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + testTimeout: 300000, // 5 minutes + globalSetup: './globalSetup.ts', + reporters: ['default'], + include: ['**/*.test.ts'], + retry: 2, + fileParallelism: false, + }, +}); diff --git a/integration-tests/web_search.test.ts b/integration-tests/web_search.test.ts index 957691ef..0a4e4351 100644 --- a/integration-tests/web_search.test.ts +++ b/integration-tests/web_search.test.ts @@ -4,78 +4,80 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to search the web', async () => { - // Skip if Tavily key is not configured - if (!process.env.TAVILY_API_KEY) { - console.warn('Skipping web search test: TAVILY_API_KEY not set'); - return; - } - const rig = new TestRig(); - await rig.setup('should be able to search the web'); - - let result; - try { - result = await rig.run(`what is the weather in London`); - } catch (error) { - // Network errors can occur in CI environments - if ( - error instanceof Error && - (error.message.includes('network') || error.message.includes('timeout')) - ) { - console.warn( - 'Skipping test due to network error:', - (error as Error).message, - ); - return; // Skip the test +describe('web_search', () => { + it('should be able to search the web', async () => { + // Skip if Tavily key is not configured + if (!process.env.TAVILY_API_KEY) { + console.warn('Skipping web search test: TAVILY_API_KEY not set'); + return; } - throw error; // Re-throw if not a network error - } - const foundToolCall = await rig.waitForToolCall('web_search'); + const rig = new TestRig(); + await rig.setup('should be able to search the web'); - // Add debugging information - if (!foundToolCall) { - const allTools = printDebugInfo(rig, result); + let result; + try { + result = await rig.run(`what is the weather in London`); + } catch (error) { + // Network errors can occur in CI environments + if ( + error instanceof Error && + (error.message.includes('network') || error.message.includes('timeout')) + ) { + console.warn( + 'Skipping test due to network error:', + (error as Error).message, + ); + return; // Skip the test + } + throw error; // Re-throw if not a network error + } - // Check if the tool call failed due to network issues - const failedSearchCalls = allTools.filter( - (t) => t.toolRequest.name === 'web_search' && !t.toolRequest.success, + const foundToolCall = await rig.waitForToolCall('web_search'); + + // Add debugging information + if (!foundToolCall) { + const allTools = printDebugInfo(rig, result); + + // Check if the tool call failed due to network issues + const failedSearchCalls = allTools.filter( + (t) => t.toolRequest.name === 'web_search' && !t.toolRequest.success, + ); + if (failedSearchCalls.length > 0) { + console.warn( + 'web_search tool was called but failed, possibly due to network issues', + ); + console.warn( + 'Failed calls:', + failedSearchCalls.map((t) => t.toolRequest.args), + ); + return; // Skip the test if network issues + } + } + + expect(foundToolCall, 'Expected to find a call to web_search').toBeTruthy(); + + // Validate model output - will throw if no output, warn if missing expected content + const hasExpectedContent = validateModelOutput( + result, + ['weather', 'london'], + 'Web search test', ); - if (failedSearchCalls.length > 0) { - console.warn( - 'web_search tool was called but failed, possibly due to network issues', - ); - console.warn( - 'Failed calls:', - failedSearchCalls.map((t) => t.toolRequest.args), - ); - return; // Skip the test if network issues + + // If content was missing, log the search queries used + if (!hasExpectedContent) { + const searchCalls = rig + .readToolLogs() + .filter((t) => t.toolRequest.name === 'web_search'); + if (searchCalls.length > 0) { + console.warn( + 'Search queries used:', + searchCalls.map((t) => t.toolRequest.args), + ); + } } - } - - assert.ok(foundToolCall, 'Expected to find a call to web_search'); - - // Validate model output - will throw if no output, warn if missing expected content - const hasExpectedContent = validateModelOutput( - result, - ['weather', 'london'], - 'Web search test', - ); - - // If content was missing, log the search queries used - if (!hasExpectedContent) { - const searchCalls = rig - .readToolLogs() - .filter((t) => t.toolRequest.name === 'web_search'); - if (searchCalls.length > 0) { - console.warn( - 'Search queries used:', - searchCalls.map((t) => t.toolRequest.args), - ); - } - } + }); }); diff --git a/integration-tests/write_file.test.ts b/integration-tests/write_file.test.ts index 7809161e..3fe26af6 100644 --- a/integration-tests/write_file.test.ts +++ b/integration-tests/write_file.test.ts @@ -4,8 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, createToolCallErrorMessage, @@ -13,56 +12,57 @@ import { validateModelOutput, } from './test-helper.js'; -test('should be able to write a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to write a file'); - const prompt = `show me an example of using the write tool. put a dad joke in dad.txt`; +describe('write_file', () => { + it('should be able to write a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to write a file'); + const prompt = `show me an example of using the write tool. put a dad joke in dad.txt`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('write_file'); + const foundToolCall = await rig.waitForToolCall('write_file'); - // Add debugging information - if (!foundToolCall) { - printDebugInfo(rig, result); - } + // Add debugging information + if (!foundToolCall) { + printDebugInfo(rig, result); + } - const allTools = rig.readToolLogs(); - assert.ok( - foundToolCall, - createToolCallErrorMessage( - 'write_file', - allTools.map((t) => t.toolRequest.name), - result, - ), - ); - - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'dad.txt', 'Write file test'); - - const newFilePath = 'dad.txt'; - - const newFileContent = rig.readFile(newFilePath); - - // Add debugging for file content - if (newFileContent === '') { - console.error('File was created but is empty'); - console.error( - 'Tool calls:', - rig.readToolLogs().map((t) => ({ - name: t.toolRequest.name, - args: t.toolRequest.args, - })), + const allTools = rig.readToolLogs(); + expect(foundToolCall, 'Expected to find a write_file tool call').toBeTruthy( + createToolCallErrorMessage( + 'write_file', + allTools.map((t) => t.toolRequest.name), + result, + ), ); - } - assert.notEqual(newFileContent, '', 'Expected file to have content'); + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'dad.txt', 'Write file test'); - // Log success info if verbose - if (process.env.VERBOSE === 'true') { - console.log( - 'File created successfully with content:', - newFileContent.substring(0, 100) + '...', - ); - } + const newFilePath = 'dad.txt'; + + const newFileContent = rig.readFile(newFilePath); + + // Add debugging for file content + if (newFileContent === '') { + console.error('File was created but is empty'); + console.error( + 'Tool calls:', + rig.readToolLogs().map((t) => ({ + name: t.toolRequest.name, + args: t.toolRequest.args, + })), + ); + } + + expect(newFileContent).not.toBe(''); + + // Log success info if verbose + if (process.env.VERBOSE === 'true') { + console.log( + 'File created successfully with content:', + newFileContent.substring(0, 100) + '...', + ); + } + }); }); diff --git a/package-lock.json b/package-lock.json index ef109c01..6702a654 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,6 +10,9 @@ "workspaces": [ "packages/*" ], + "dependencies": { + "node-fetch": "^3.3.2" + }, "bin": { "qwen": "bundle/gemini.js" }, @@ -39,6 +42,7 @@ "memfs": "^4.17.2", "mnemonist": "^0.40.3", "mock-fs": "^5.5.0", + "msw": "^2.10.4", "prettier": "^3.5.3", "react-devtools-core": "^4.28.5", "tsx": "^4.20.3", @@ -204,6 +208,53 @@ "node": ">=18" } }, + "node_modules/@bundled-es-modules/cookie": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/cookie/-/cookie-2.0.1.tgz", + "integrity": "sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cookie": "^0.7.2" + } + }, + "node_modules/@bundled-es-modules/statuses": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/statuses/-/statuses-1.0.1.tgz", + "integrity": "sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==", + "dev": true, + "license": "ISC", + "dependencies": { + "statuses": "^2.0.1" + } + }, + "node_modules/@bundled-es-modules/tough-cookie": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/tough-cookie/-/tough-cookie-0.1.6.tgz", + "integrity": "sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@types/tough-cookie": "^4.0.5", + "tough-cookie": "^4.1.4" + } + }, + "node_modules/@bundled-es-modules/tough-cookie/node_modules/tough-cookie": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", + "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/@csstools/color-helpers": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.0.2.tgz", @@ -1031,6 +1082,173 @@ "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==", "license": "ISC" }, + "node_modules/@inquirer/confirm": { + "version": "5.1.14", + "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.14.tgz", + "integrity": "sha512-5yR4IBfe0kXe59r1YCTG8WXkUbl7Z35HK87Sw+WUyGD8wNUx7JvY7laahzeytyE1oLn74bQnL7hstctQxisQ8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@inquirer/core": "^10.1.15", + "@inquirer/type": "^3.0.8" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/core": { + "version": "10.1.15", + "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.15.tgz", + "integrity": "sha512-8xrp836RZvKkpNbVvgWUlxjT4CraKk2q+I3Ksy+seI2zkcE+y6wNs1BVhgcv8VyImFecUhdQrYLdW32pAjwBdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@inquirer/figures": "^1.0.13", + "@inquirer/type": "^3.0.8", + "ansi-escapes": "^4.3.2", + "cli-width": "^4.1.0", + "mute-stream": "^2.0.0", + "signal-exit": "^4.1.0", + "wrap-ansi": "^6.2.0", + "yoctocolors-cjs": "^2.1.2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/core/node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@inquirer/core/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@inquirer/core/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@inquirer/core/node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/figures": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.13.tgz", + "integrity": "sha512-lGPVU3yO9ZNqA7vTYz26jny41lE7yoQansmqdMLBEfqaGsmdg7V3W9mK9Pvb5IL4EVZ9GnSDGMO/cJXud5dMaw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/type": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.8.tgz", + "integrity": "sha512-lg9Whz8onIHRthWaN1Q9EGLa/0LFJjyM8mEUbL1eTi6yMGvBf8gvyDLtxSXztQsxMvhxxNpJYrwa1YHdq+w4Jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -1229,6 +1447,24 @@ "node": ">=18" } }, + "node_modules/@mswjs/interceptors": { + "version": "0.39.5", + "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.39.5.tgz", + "integrity": "sha512-B9nHSJYtsv79uo7QdkZ/b/WoKm20IkVSmTc/WCKarmDtFwM0dRx2ouEniqwNkzCSLn3fydzKmnMzjtfdOWt3VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@open-draft/deferred-promise": "^2.2.0", + "@open-draft/logger": "^0.3.0", + "@open-draft/until": "^2.0.0", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "strict-event-emitter": "^0.5.1" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -1267,6 +1503,31 @@ "node": ">= 8" } }, + "node_modules/@open-draft/deferred-promise": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz", + "integrity": "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@open-draft/logger": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@open-draft/logger/-/logger-0.3.0.tgz", + "integrity": "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-node-process": "^1.2.0", + "outvariant": "^1.4.0" + } + }, + "node_modules/@open-draft/until": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-2.1.0.tgz", + "integrity": "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==", + "dev": true, + "license": "MIT" + }, "node_modules/@opentelemetry/api": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", @@ -2226,6 +2487,13 @@ "@types/node": "*" } }, + "node_modules/@types/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/cors": { "version": "2.8.19", "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.19.tgz", @@ -2491,12 +2759,26 @@ "integrity": "sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==", "license": "MIT" }, + "node_modules/@types/statuses": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/statuses/-/statuses-2.0.6.tgz", + "integrity": "sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/tinycolor2": { "version": "1.4.6", "resolved": "https://registry.npmjs.org/@types/tinycolor2/-/tinycolor2-1.4.6.tgz", "integrity": "sha512-iEN8J0BoMnsWBqjVbWH/c0G0Hh7O21lpR2/+PrvAVgWdzL7eexIFm4JN/Wn10PTcmNdtS6U67r499mlWMXOxNw==", "license": "MIT" }, + "node_modules/@types/tough-cookie": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.5.tgz", + "integrity": "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/unist": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", @@ -3785,6 +4067,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/cli-width": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", + "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 12" + } + }, "node_modules/cliui": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", @@ -4075,6 +4367,15 @@ "devOptional": true, "license": "MIT" }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", + "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, "node_modules/data-urls": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", @@ -5329,6 +5630,29 @@ "reusify": "^1.0.4" } }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", + "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, "node_modules/figures": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", @@ -5468,6 +5792,18 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "license": "MIT", + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -5932,6 +6268,16 @@ "dev": true, "license": "MIT" }, + "node_modules/graphql": { + "version": "16.11.0", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.11.0.tgz", + "integrity": "sha512-mS1lbMsxgQj6hge1XZ6p7GPhbrtFwUFYi3wRzXAC/FmYnyXMTvvI3td3rjmQ2u8ewXueaSvRPWaEcgVVOT9Jnw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, "node_modules/gtoken": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz", @@ -6094,6 +6440,13 @@ "node": ">= 0.4" } }, + "node_modules/headers-polyfill": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.3.tgz", + "integrity": "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==", + "dev": true, + "license": "MIT" + }, "node_modules/highlight.js": { "version": "11.11.1", "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", @@ -6333,9 +6686,9 @@ } }, "node_modules/ink": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ink/-/ink-6.0.1.tgz", - "integrity": "sha512-vhhFrCodTHZAPPSdMYzLEbeI0Ug37R9j6yA0kLKok9kSK53lQtj/RJhEQJUjq6OwT4N33nxqSRd/7yXhEhVPIw==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/ink/-/ink-6.1.1.tgz", + "integrity": "sha512-Bqw78FX+1TSIGxs6bdvohgoy6mTfqjFJVNyYzXn8HIyZyVmwLX8XdnhUtUwyaelLCqLz8uuFseCbomRZWjyo5g==", "license": "MIT", "dependencies": { "@alcalzone/ansi-tokenize": "^0.1.3", @@ -6953,6 +7306,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-node-process": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz", + "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==", + "dev": true, + "license": "MIT" + }, "node_modules/is-npm": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", @@ -7917,6 +8277,81 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "license": "MIT" }, + "node_modules/msw": { + "version": "2.10.4", + "resolved": "https://registry.npmjs.org/msw/-/msw-2.10.4.tgz", + "integrity": "sha512-6R1or/qyele7q3RyPwNuvc0IxO8L8/Aim6Sz5ncXEgcWUNxSKE+udriTOWHtpMwmfkLYlacA2y7TIx4cL5lgHA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@bundled-es-modules/cookie": "^2.0.1", + "@bundled-es-modules/statuses": "^1.0.1", + "@bundled-es-modules/tough-cookie": "^0.1.6", + "@inquirer/confirm": "^5.0.0", + "@mswjs/interceptors": "^0.39.1", + "@open-draft/deferred-promise": "^2.2.0", + "@open-draft/until": "^2.1.0", + "@types/cookie": "^0.6.0", + "@types/statuses": "^2.0.4", + "graphql": "^16.8.1", + "headers-polyfill": "^4.0.2", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "path-to-regexp": "^6.3.0", + "picocolors": "^1.1.1", + "strict-event-emitter": "^0.5.1", + "type-fest": "^4.26.1", + "yargs": "^17.7.2" + }, + "bin": { + "msw": "cli/index.js" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/mswjs" + }, + "peerDependencies": { + "typescript": ">= 4.8.x" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/msw/node_modules/path-to-regexp": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/msw/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mute-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz", + "integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, "node_modules/nanoid": { "version": "3.3.11", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", @@ -7959,6 +8394,44 @@ "dev": true, "license": "MIT" }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "deprecated": "Use your platform's native DOMException instead", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", + "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "license": "MIT", + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, "node_modules/normalize-package-data": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-6.0.2.tgz", @@ -8403,6 +8876,13 @@ "node": ">= 0.8.0" } }, + "node_modules/outvariant": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.3.tgz", + "integrity": "sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA==", + "dev": true, + "license": "MIT" + }, "node_modules/own-keys": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", @@ -8881,6 +9361,19 @@ "node": ">= 0.10" } }, + "node_modules/psl": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", + "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "funding": { + "url": "https://github.com/sponsors/lupomontero" + } + }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -8928,6 +9421,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true, + "license": "MIT" + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -9269,6 +9769,13 @@ "node": ">=0.10.5" } }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, "node_modules/resolve": { "version": "1.22.10", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", @@ -9956,6 +10463,13 @@ "node": ">= 0.4" } }, + "node_modules/strict-event-emitter": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.5.1.tgz", + "integrity": "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==", + "dev": true, + "license": "MIT" + }, "node_modules/string-width": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", @@ -10835,6 +11349,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", @@ -11014,6 +11538,17 @@ "punycode": "^2.1.0" } }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "node_modules/uuid": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", @@ -11271,6 +11806,15 @@ "node": ">=18" } }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, "node_modules/webidl-conversions": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", @@ -11732,6 +12276,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/yoctocolors-cjs": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz", + "integrity": "sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/yoga-layout": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/yoga-layout/-/yoga-layout-3.2.1.tgz", @@ -11770,7 +12327,7 @@ "dotenv": "^17.1.0", "glob": "^10.4.1", "highlight.js": "^11.11.1", - "ink": "^6.0.1", + "ink": "^6.1.1", "ink-big-text": "^2.0.0", "ink-gradient": "^3.0.0", "ink-link": "^4.1.0", @@ -11818,27 +12375,6 @@ "node": ">=20" } }, - "packages/cli/node_modules/@google/genai": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.9.0.tgz", - "integrity": "sha512-w9P93OXKPMs9H1mfAx9+p3zJqQGrWBGdvK/SVc7cLZEXNHr/3+vW2eif7ZShA6wU24rNLn9z9MK2vQFUvNRI2Q==", - "license": "Apache-2.0", - "dependencies": { - "google-auth-library": "^9.14.2", - "ws": "^8.18.0" - }, - "engines": { - "node": ">=20.0.0" - }, - "peerDependencies": { - "@modelcontextprotocol/sdk": "^1.11.0" - }, - "peerDependenciesMeta": { - "@modelcontextprotocol/sdk": { - "optional": true - } - } - }, "packages/cli/node_modules/@testing-library/dom": { "version": "10.4.0", "dev": true, @@ -11965,7 +12501,7 @@ "name": "@qwen-code/qwen-code-core", "version": "0.0.7", "dependencies": { - "@google/genai": "1.9.0", + "@google/genai": "1.13.0", "@modelcontextprotocol/sdk": "^1.11.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0", @@ -12014,27 +12550,6 @@ "node": ">=20" } }, - "packages/core/node_modules/@google/genai": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.9.0.tgz", - "integrity": "sha512-w9P93OXKPMs9H1mfAx9+p3zJqQGrWBGdvK/SVc7cLZEXNHr/3+vW2eif7ZShA6wU24rNLn9z9MK2vQFUvNRI2Q==", - "license": "Apache-2.0", - "dependencies": { - "google-auth-library": "^9.14.2", - "ws": "^8.18.0" - }, - "engines": { - "node": ">=20.0.0" - }, - "peerDependencies": { - "@modelcontextprotocol/sdk": "^1.11.0" - }, - "peerDependenciesMeta": { - "@modelcontextprotocol/sdk": { - "optional": true - } - } - }, "packages/core/node_modules/ajv": { "version": "8.17.1", "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", diff --git a/package.json b/package.json index aaf96516..a0dbfcdd 100644 --- a/package.json +++ b/package.json @@ -23,6 +23,7 @@ "auth": "npm run auth:npm && npm run auth:docker", "generate": "node scripts/generate-git-commit-info.js", "build": "node scripts/build.js", + "build-and-start": "npm run build && npm run start", "build:vscode": "node scripts/build_vscode_companion.js", "build:all": "npm run build && npm run build:sandbox && npm run build:vscode", "build:packages": "npm run build --workspaces", @@ -31,11 +32,11 @@ "test": "npm run test --workspaces --if-present", "test:ci": "npm run test:ci --workspaces --if-present && npm run test:scripts", "test:scripts": "vitest run --config ./scripts/tests/vitest.config.ts", - "test:e2e": "npm run test:integration:sandbox:none -- --verbose --keep-output", + "test:e2e": "cross-env VERBOSE=true KEEP_OUTPUT=true npm run test:integration:sandbox:none", "test:integration:all": "npm run test:integration:sandbox:none && npm run test:integration:sandbox:docker && npm run test:integration:sandbox:podman", - "test:integration:sandbox:none": "GEMINI_SANDBOX=false node integration-tests/run-tests.js", - "test:integration:sandbox:docker": "GEMINI_SANDBOX=docker node integration-tests/run-tests.js", - "test:integration:sandbox:podman": "GEMINI_SANDBOX=podman node integration-tests/run-tests.js", + "test:integration:sandbox:none": "GEMINI_SANDBOX=false vitest run --root ./integration-tests", + "test:integration:sandbox:docker": "npm run build:sandbox && GEMINI_SANDBOX=docker vitest run --root ./integration-tests", + "test:integration:sandbox:podman": "GEMINI_SANDBOX=podman vitest run --root ./integration-tests", "lint": "eslint . --ext .ts,.tsx && eslint integration-tests", "lint:fix": "eslint . --fix && eslint integration-tests --fix", "lint:ci": "eslint . --ext .ts,.tsx --max-warnings 0 && eslint integration-tests --max-warnings 0", @@ -80,13 +81,17 @@ "json": "^11.0.0", "lodash": "^4.17.21", "memfs": "^4.17.2", + "mnemonist": "^0.40.3", "mock-fs": "^5.5.0", + "msw": "^2.10.4", "prettier": "^3.5.3", "react-devtools-core": "^4.28.5", "tsx": "^4.20.3", "typescript-eslint": "^8.30.1", "vitest": "^3.2.4", - "yargs": "^17.7.2", - "mnemonist": "^0.40.3" + "yargs": "^17.7.2" + }, + "dependencies": { + "node-fetch": "^3.3.2" } } diff --git a/packages/cli/package.json b/packages/cli/package.json index 1247e954..e441ce07 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -38,7 +38,7 @@ "dotenv": "^17.1.0", "glob": "^10.4.1", "highlight.js": "^11.11.1", - "ink": "^6.0.1", + "ink": "^6.1.1", "ink-big-text": "^2.0.0", "ink-gradient": "^3.0.0", "ink-link": "^4.1.0", diff --git a/packages/cli/src/acp/acp.ts b/packages/cli/src/acp/acp.ts deleted file mode 100644 index 0a42fdcb..00000000 --- a/packages/cli/src/acp/acp.ts +++ /dev/null @@ -1,464 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -/* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */ - -import { Icon } from '@qwen-code/qwen-code-core'; -import { WritableStream, ReadableStream } from 'node:stream/web'; - -export class ClientConnection implements Client { - #connection: Connection; - - constructor( - agent: (client: Client) => Agent, - input: WritableStream, - output: ReadableStream, - ) { - this.#connection = new Connection(agent(this), input, output); - } - - /** - * Streams part of an assistant response to the client - */ - async streamAssistantMessageChunk( - params: StreamAssistantMessageChunkParams, - ): Promise { - await this.#connection.sendRequest('streamAssistantMessageChunk', params); - } - - /** - * Request confirmation before running a tool - * - * When allowed, the client returns a [`ToolCallId`] which can be used - * to update the tool call's `status` and `content` as it runs. - */ - requestToolCallConfirmation( - params: RequestToolCallConfirmationParams, - ): Promise { - return this.#connection.sendRequest('requestToolCallConfirmation', params); - } - - /** - * pushToolCall allows the agent to start a tool call - * when it does not need to request permission to do so. - * - * The returned id can be used to update the UI for the tool - * call as needed. - */ - pushToolCall(params: PushToolCallParams): Promise { - return this.#connection.sendRequest('pushToolCall', params); - } - - /** - * updateToolCall allows the agent to update the content and status of the tool call. - * - * The new content replaces what is currently displayed in the UI. - * - * The [`ToolCallId`] is included in the response of - * `pushToolCall` or `requestToolCallConfirmation` respectively. - */ - async updateToolCall(params: UpdateToolCallParams): Promise { - await this.#connection.sendRequest('updateToolCall', params); - } -} - -type AnyMessage = AnyRequest | AnyResponse; - -type AnyRequest = { - id: number; - method: string; - params?: unknown; -}; - -type AnyResponse = { jsonrpc: '2.0'; id: number } & Result; - -type Result = - | { - result: T; - } - | { - error: ErrorResponse; - }; - -type ErrorResponse = { - code: number; - message: string; - data?: { details?: string }; -}; - -type PendingResponse = { - resolve: (response: unknown) => void; - reject: (error: ErrorResponse) => void; -}; - -class Connection { - #pendingResponses: Map = new Map(); - #nextRequestId: number = 0; - #delegate: D; - #peerInput: WritableStream; - #writeQueue: Promise = Promise.resolve(); - #textEncoder: TextEncoder; - - constructor( - delegate: D, - peerInput: WritableStream, - peerOutput: ReadableStream, - ) { - this.#peerInput = peerInput; - this.#textEncoder = new TextEncoder(); - - this.#delegate = delegate; - this.#receive(peerOutput); - } - - async #receive(output: ReadableStream) { - let content = ''; - const decoder = new TextDecoder(); - for await (const chunk of output) { - content += decoder.decode(chunk, { stream: true }); - const lines = content.split('\n'); - content = lines.pop() || ''; - - for (const line of lines) { - const trimmedLine = line.trim(); - - if (trimmedLine) { - const message = JSON.parse(trimmedLine); - this.#processMessage(message); - } - } - } - } - - async #processMessage(message: AnyMessage) { - if ('method' in message) { - const response = await this.#tryCallDelegateMethod( - message.method, - message.params, - ); - - await this.#sendMessage({ - jsonrpc: '2.0', - id: message.id, - ...response, - }); - } else { - this.#handleResponse(message); - } - } - - async #tryCallDelegateMethod( - method: string, - params?: unknown, - ): Promise> { - const methodName = method as keyof D; - if (typeof this.#delegate[methodName] !== 'function') { - return RequestError.methodNotFound(method).toResult(); - } - - try { - const result = await this.#delegate[methodName](params); - return { result: result ?? null }; - } catch (error: unknown) { - if (error instanceof RequestError) { - return error.toResult(); - } - - let details; - - if (error instanceof Error) { - details = error.message; - } else if ( - typeof error === 'object' && - error != null && - 'message' in error && - typeof error.message === 'string' - ) { - details = error.message; - } - - return RequestError.internalError(details).toResult(); - } - } - - #handleResponse(response: AnyResponse) { - const pendingResponse = this.#pendingResponses.get(response.id); - if (pendingResponse) { - if ('result' in response) { - pendingResponse.resolve(response.result); - } else if ('error' in response) { - pendingResponse.reject(response.error); - } - this.#pendingResponses.delete(response.id); - } - } - - async sendRequest(method: string, params?: Req): Promise { - const id = this.#nextRequestId++; - const responsePromise = new Promise((resolve, reject) => { - this.#pendingResponses.set(id, { resolve, reject }); - }); - await this.#sendMessage({ jsonrpc: '2.0', id, method, params }); - return responsePromise as Promise; - } - - async #sendMessage(json: AnyMessage) { - const content = JSON.stringify(json) + '\n'; - this.#writeQueue = this.#writeQueue - .then(async () => { - const writer = this.#peerInput.getWriter(); - try { - await writer.write(this.#textEncoder.encode(content)); - } finally { - writer.releaseLock(); - } - }) - .catch((error) => { - // Continue processing writes on error - console.error('ACP write error:', error); - }); - return this.#writeQueue; - } -} - -export class RequestError extends Error { - data?: { details?: string }; - - constructor( - public code: number, - message: string, - details?: string, - ) { - super(message); - this.name = 'RequestError'; - if (details) { - this.data = { details }; - } - } - - static parseError(details?: string): RequestError { - return new RequestError(-32700, 'Parse error', details); - } - - static invalidRequest(details?: string): RequestError { - return new RequestError(-32600, 'Invalid request', details); - } - - static methodNotFound(details?: string): RequestError { - return new RequestError(-32601, 'Method not found', details); - } - - static invalidParams(details?: string): RequestError { - return new RequestError(-32602, 'Invalid params', details); - } - - static internalError(details?: string): RequestError { - return new RequestError(-32603, 'Internal error', details); - } - - toResult(): Result { - return { - error: { - code: this.code, - message: this.message, - data: this.data, - }, - }; - } -} - -// Protocol types - -export const LATEST_PROTOCOL_VERSION = '0.0.9'; - -export type AssistantMessageChunk = - | { - text: string; - } - | { - thought: string; - }; - -export type ToolCallConfirmation = - | { - description?: string | null; - type: 'edit'; - } - | { - description?: string | null; - type: 'execute'; - command: string; - rootCommand: string; - } - | { - description?: string | null; - type: 'mcp'; - serverName: string; - toolDisplayName: string; - toolName: string; - } - | { - description?: string | null; - type: 'fetch'; - urls: string[]; - } - | { - description: string; - type: 'other'; - }; - -export type ToolCallContent = - | { - type: 'markdown'; - markdown: string; - } - | { - type: 'diff'; - newText: string; - oldText: string | null; - path: string; - }; - -export type ToolCallStatus = 'running' | 'finished' | 'error'; - -export type ToolCallId = number; - -export type ToolCallConfirmationOutcome = - | 'allow' - | 'alwaysAllow' - | 'alwaysAllowMcpServer' - | 'alwaysAllowTool' - | 'reject' - | 'cancel'; - -/** - * A part in a user message - */ -export type UserMessageChunk = - | { - text: string; - } - | { - path: string; - }; - -export interface StreamAssistantMessageChunkParams { - chunk: AssistantMessageChunk; -} - -export interface RequestToolCallConfirmationParams { - confirmation: ToolCallConfirmation; - content?: ToolCallContent | null; - icon: Icon; - label: string; - locations?: ToolCallLocation[]; -} - -export interface ToolCallLocation { - line?: number | null; - path: string; -} - -export interface PushToolCallParams { - content?: ToolCallContent | null; - icon: Icon; - label: string; - locations?: ToolCallLocation[]; -} - -export interface UpdateToolCallParams { - content: ToolCallContent | null; - status: ToolCallStatus; - toolCallId: ToolCallId; -} - -export interface RequestToolCallConfirmationResponse { - id: ToolCallId; - outcome: ToolCallConfirmationOutcome; -} - -export interface PushToolCallResponse { - id: ToolCallId; -} - -export interface InitializeParams { - /** - * The version of the protocol that the client supports. - * This should be the latest version supported by the client. - */ - protocolVersion: string; -} - -export interface SendUserMessageParams { - chunks: UserMessageChunk[]; -} - -export interface InitializeResponse { - /** - * Indicates whether the agent is authenticated and - * ready to handle requests. - */ - isAuthenticated: boolean; - /** - * The version of the protocol that the agent supports. - * If the agent supports the requested version, it should respond with the same version. - * Otherwise, the agent should respond with the latest version it supports. - */ - protocolVersion: string; -} - -export interface Error { - code: number; - data?: unknown; - message: string; -} - -export interface Client { - streamAssistantMessageChunk( - params: StreamAssistantMessageChunkParams, - ): Promise; - - requestToolCallConfirmation( - params: RequestToolCallConfirmationParams, - ): Promise; - - pushToolCall(params: PushToolCallParams): Promise; - - updateToolCall(params: UpdateToolCallParams): Promise; -} - -export interface Agent { - /** - * Initializes the agent's state. It should be called before any other method, - * and no other methods should be called until it has completed. - * - * If the agent is not authenticated, then the client should prompt the user to authenticate, - * and then call the `authenticate` method. - * Otherwise the client can send other messages to the agent. - */ - initialize(params: InitializeParams): Promise; - - /** - * Begins the authentication process. - * - * This method should only be called if `initialize` indicates the user isn't already authenticated. - * The Promise MUST not resolve until authentication is complete. - */ - authenticate(): Promise; - - /** - * Allows the user to send a message to the agent. - * This method should complete after the agent is finished, during - * which time the agent may update the client by calling - * streamAssistantMessageChunk and other methods. - */ - sendUserMessage(params: SendUserMessageParams): Promise; - - /** - * Cancels the current generation. - */ - cancelSendMessage(): Promise; -} diff --git a/packages/cli/src/config/config.integration.test.ts b/packages/cli/src/config/config.integration.test.ts index 2d7f6a53..0f495a6b 100644 --- a/packages/cli/src/config/config.integration.test.ts +++ b/packages/cli/src/config/config.integration.test.ts @@ -13,6 +13,25 @@ import { ConfigParameters, ContentGeneratorConfig, } from '@qwen-code/qwen-code-core'; +import { http, HttpResponse } from 'msw'; +import { setupServer } from 'msw/node'; + +export const server = setupServer(); + +// TODO(richieforeman): Consider moving this to test setup globally. +beforeAll(() => { + server.listen({}); +}); + +afterEach(() => { + server.resetHandlers(); +}); + +afterAll(() => { + server.close(); +}); + +const CLEARCUT_URL = 'https://play.googleapis.com/log'; const TEST_CONTENT_GENERATOR_CONFIG: ContentGeneratorConfig = { apiKey: 'test-key', @@ -38,6 +57,8 @@ describe('Configuration Integration Tests', () => { beforeEach(() => { tempDir = fs.mkdtempSync(path.join(tmpdir(), 'qwen-code-test-')); + server.resetHandlers(http.post(CLEARCUT_URL, () => HttpResponse.text())); + originalEnv = { ...process.env }; process.env.GEMINI_API_KEY = 'test-api-key'; vi.clearAllMocks(); @@ -240,4 +261,149 @@ describe('Configuration Integration Tests', () => { expect(config.getExtensionContextFilePaths()).toEqual(contextFiles); }); }); + + describe('Approval Mode Integration Tests', () => { + let parseArguments: typeof import('./config').parseArguments; + + beforeEach(async () => { + // Import the argument parsing function for integration testing + const { parseArguments: parseArgs } = await import('./config'); + parseArguments = parseArgs; + }); + + it('should parse --approval-mode=auto_edit correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + + const argv = await parseArguments(); + + // Verify that the argument was parsed correctly + expect(argv.approvalMode).toBe('auto_edit'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); + } finally { + process.argv = originalArgv; + } + }); + + it('should parse --approval-mode=yolo correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'yolo', + '-p', + 'test', + ]; + + const argv = await parseArguments(); + + expect(argv.approvalMode).toBe('yolo'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); // Should NOT be set when using --approval-mode + } finally { + process.argv = originalArgv; + } + }); + + it('should parse --approval-mode=default correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'default', + '-p', + 'test', + ]; + + const argv = await parseArguments(); + + expect(argv.approvalMode).toBe('default'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); + } finally { + process.argv = originalArgv; + } + }); + + it('should parse legacy --yolo flag correctly', async () => { + const originalArgv = process.argv; + + try { + process.argv = ['node', 'script.js', '--yolo', '-p', 'test']; + + const argv = await parseArguments(); + + expect(argv.yolo).toBe(true); + expect(argv.approvalMode).toBeUndefined(); // Should NOT be set when using --yolo + expect(argv.prompt).toBe('test'); + } finally { + process.argv = originalArgv; + } + }); + + it('should reject invalid approval mode values during argument parsing', async () => { + const originalArgv = process.argv; + + try { + process.argv = ['node', 'script.js', '--approval-mode', 'invalid_mode']; + + // Should throw during argument parsing due to yargs validation + await expect(parseArguments()).rejects.toThrow(); + } finally { + process.argv = originalArgv; + } + }); + + it('should reject conflicting --yolo and --approval-mode flags', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--yolo', + '--approval-mode', + 'default', + ]; + + // Should throw during argument parsing due to conflict validation + await expect(parseArguments()).rejects.toThrow(); + } finally { + process.argv = originalArgv; + } + }); + + it('should handle backward compatibility with mixed scenarios', async () => { + const originalArgv = process.argv; + + try { + // Test that no approval mode arguments defaults to no flags set + process.argv = ['node', 'script.js', '-p', 'test']; + + const argv = await parseArguments(); + + expect(argv.approvalMode).toBeUndefined(); + expect(argv.yolo).toBe(false); + expect(argv.prompt).toBe('test'); + } finally { + process.argv = originalArgv; + } + }); + }); }); diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index 6d41e0c4..78c290b1 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -13,6 +13,11 @@ import { loadCliConfig, parseArguments } from './config.js'; import { Settings } from './settings.js'; import { Extension } from './extension.js'; import * as ServerConfig from '@qwen-code/qwen-code-core'; +import { isWorkspaceTrusted } from './trustedFolders.js'; + +vi.mock('./trustedFolders.js', () => ({ + isWorkspaceTrusted: vi.fn(), +})); vi.mock('os', async (importOriginal) => { const actualOs = await importOriginal(); @@ -156,6 +161,93 @@ describe('parseArguments', () => { expect(argv.promptInteractive).toBe('interactive prompt'); expect(argv.prompt).toBeUndefined(); }); + + it('should throw an error when both --yolo and --approval-mode are used together', async () => { + process.argv = [ + 'node', + 'script.js', + '--yolo', + '--approval-mode', + 'default', + ]; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments()).rejects.toThrow('process.exit called'); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should throw an error when using short flags -y and --approval-mode together', async () => { + process.argv = ['node', 'script.js', '-y', '--approval-mode', 'yolo']; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments()).rejects.toThrow('process.exit called'); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should allow --approval-mode without --yolo', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'auto_edit']; + const argv = await parseArguments(); + expect(argv.approvalMode).toBe('auto_edit'); + expect(argv.yolo).toBe(false); + }); + + it('should allow --yolo without --approval-mode', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments(); + expect(argv.yolo).toBe(true); + expect(argv.approvalMode).toBeUndefined(); + }); + + it('should reject invalid --approval-mode values', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'invalid']; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments()).rejects.toThrow('process.exit called'); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining('Invalid values:'), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); }); describe('loadCliConfig', () => { @@ -834,6 +926,211 @@ describe('mergeExcludeTools', () => { }); }); +describe('Approval mode tool exclusion logic', () => { + const originalIsTTY = process.stdin.isTTY; + + beforeEach(() => { + process.stdin.isTTY = false; // Ensure non-interactive mode + }); + + afterEach(() => { + process.stdin.isTTY = originalIsTTY; + }); + + it('should exclude all interactive tools in non-interactive mode with default approval mode', async () => { + process.argv = ['node', 'script.js', '-p', 'test']; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).toContain(EditTool.Name); + expect(excludedTools).toContain(WriteFileTool.Name); + }); + + it('should exclude all interactive tools in non-interactive mode with explicit default approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'default', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).toContain(EditTool.Name); + expect(excludedTools).toContain(WriteFileTool.Name); + }); + + it('should exclude only shell tools in non-interactive mode with auto_edit approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should exclude no interactive tools in non-interactive mode with yolo approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'yolo', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should exclude no interactive tools in non-interactive mode with legacy yolo flag', async () => { + process.argv = ['node', 'script.js', '--yolo', '-p', 'test']; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should not exclude interactive tools in interactive mode regardless of approval mode', async () => { + process.stdin.isTTY = true; // Interactive mode + + const testCases = [ + { args: ['node', 'script.js'] }, // default + { args: ['node', 'script.js', '--approval-mode', 'default'] }, + { args: ['node', 'script.js', '--approval-mode', 'auto_edit'] }, + { args: ['node', 'script.js', '--approval-mode', 'yolo'] }, + { args: ['node', 'script.js', '--yolo'] }, + ]; + + for (const testCase of testCases) { + process.argv = testCase.args; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + } + }); + + it('should merge approval mode exclusions with settings exclusions in auto_edit mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = { excludeTools: ['custom_tool'] }; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain('custom_tool'); // From settings + expect(excludedTools).toContain(ShellTool.Name); // From approval mode + expect(excludedTools).not.toContain(EditTool.Name); // Should be allowed in auto_edit + expect(excludedTools).not.toContain(WriteFileTool.Name); // Should be allowed in auto_edit + }); + + it('should throw an error for invalid approval mode values in loadCliConfig', async () => { + // Create a mock argv with an invalid approval mode that bypasses argument parsing validation + const invalidArgv: Partial & { approvalMode: string } = { + approvalMode: 'invalid_mode', + promptInteractive: '', + prompt: '', + yolo: false, + }; + + const settings: Settings = {}; + const extensions: Extension[] = []; + + await expect( + loadCliConfig(settings, extensions, 'test-session', invalidArgv), + ).rejects.toThrow( + 'Invalid approval mode: invalid_mode. Valid values are: yolo, auto_edit, default', + ); + }); +}); + describe('loadCliConfig with allowed-mcp-server-names', () => { const originalArgv = process.argv; const originalEnv = { ...process.env }; @@ -1084,33 +1381,6 @@ describe('loadCliConfig model selection', () => { }); }); -describe('loadCliConfig ideModeFeature', () => { - const originalArgv = process.argv; - const originalEnv = { ...process.env }; - - beforeEach(() => { - vi.resetAllMocks(); - vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); - process.env.GEMINI_API_KEY = 'test-api-key'; - delete process.env.SANDBOX; - delete process.env.QWEN_CODE_IDE_SERVER_PORT; - }); - - afterEach(() => { - process.argv = originalArgv; - process.env = originalEnv; - vi.restoreAllMocks(); - }); - - it('should be false by default', async () => { - process.argv = ['node', 'script.js']; - const settings: Settings = {}; - const argv = await parseArguments(); - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect(config.getIdeModeFeature()).toBe(false); - }); -}); - describe('loadCliConfig folderTrustFeature', () => { const originalArgv = process.argv; const originalEnv = { ...process.env }; @@ -1428,3 +1698,198 @@ describe('loadCliConfig interactive', () => { expect(config.isInteractive()).toBe(false); }); }); + +describe('loadCliConfig approval mode', () => { + const originalArgv = process.argv; + const originalEnv = { ...process.env }; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + process.env.GEMINI_API_KEY = 'test-api-key'; + process.argv = ['node', 'script.js']; // Reset argv for each test + }); + + afterEach(() => { + process.argv = originalArgv; + process.env = originalEnv; + vi.restoreAllMocks(); + }); + + it('should default to DEFAULT approval mode when no flags are set', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should set YOLO approval mode when --yolo flag is used', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should set YOLO approval mode when -y flag is used', async () => { + process.argv = ['node', 'script.js', '-y']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should set DEFAULT approval mode when --approval-mode=default', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'default']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should set AUTO_EDIT approval mode when --approval-mode=auto_edit', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'auto_edit']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.AUTO_EDIT); + }); + + it('should set YOLO approval mode when --approval-mode=yolo', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'yolo']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should prioritize --approval-mode over --yolo when both would be valid (but validation prevents this)', async () => { + // Note: This test documents the intended behavior, but in practice the validation + // prevents both flags from being used together + process.argv = ['node', 'script.js', '--approval-mode', 'default']; + const argv = await parseArguments(); + // Manually set yolo to true to simulate what would happen if validation didn't prevent it + argv.yolo = true; + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should fall back to --yolo behavior when --approval-mode is not set', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); +}); + +describe('loadCliConfig trustedFolder', () => { + const originalArgv = process.argv; + const originalEnv = { ...process.env }; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + process.env.GEMINI_API_KEY = 'test-api-key'; + process.argv = ['node', 'script.js']; // Reset argv for each test + }); + + afterEach(() => { + process.argv = originalArgv; + process.env = originalEnv; + vi.restoreAllMocks(); + }); + + const testCases = [ + // Cases where folderTrustFeature is false (feature disabled) + { + folderTrustFeature: false, + folderTrust: true, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust true, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: false, + folderTrust: true, + isWorkspaceTrusted: false, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust true, workspace not trusted -> behave as trusted', + }, + { + folderTrustFeature: false, + folderTrust: false, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust false, workspace trusted -> behave as trusted', + }, + + // Cases where folderTrustFeature is true but folderTrust setting is false + { + folderTrustFeature: true, + folderTrust: false, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature on, folderTrust false, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: true, + folderTrust: false, + isWorkspaceTrusted: false, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature on, folderTrust false, workspace not trusted -> behave as trusted', + }, + + // Cases where feature is fully enabled (folderTrustFeature and folderTrust are true) + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: true, + expectedFolderTrust: true, + expectedIsTrustedFolder: true, + description: + 'feature on, folderTrust on, workspace trusted -> is trusted', + }, + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: false, + expectedFolderTrust: true, + expectedIsTrustedFolder: false, + description: + 'feature on, folderTrust on, workspace NOT trusted -> is NOT trusted', + }, + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: undefined, + expectedFolderTrust: true, + expectedIsTrustedFolder: undefined, + description: + 'feature on, folderTrust on, workspace trust unknown -> is unknown', + }, + ]; + + for (const { + folderTrustFeature, + folderTrust, + isWorkspaceTrusted: mockTrustValue, + expectedFolderTrust, + expectedIsTrustedFolder, + description, + } of testCases) { + it(`should be correct for: ${description}`, async () => { + (isWorkspaceTrusted as vi.Mock).mockReturnValue(mockTrustValue); + const argv = await parseArguments(); + const settings: Settings = { folderTrustFeature, folderTrust }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + + expect(config.getFolderTrust()).toBe(expectedFolderTrust); + expect(config.isTrustedFolder()).toBe(expectedIsTrustedFolder); + }); + } +}); diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index 0ec6bd07..aa45f1b2 100644 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -36,6 +36,8 @@ import { getCliVersion } from '../utils/version.js'; import { loadSandboxConfig } from './sandboxConfig.js'; import { resolvePath } from '../utils/resolvePath.js'; +import { isWorkspaceTrusted } from './trustedFolders.js'; + // Simple console logger for now - replace with actual logger if available const logger = { // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -58,6 +60,7 @@ export interface CliArgs { showMemoryUsage: boolean | undefined; show_memory_usage: boolean | undefined; yolo: boolean | undefined; + approvalMode: string | undefined; telemetry: boolean | undefined; checkpointing: boolean | undefined; telemetryTarget: string | undefined; @@ -68,7 +71,6 @@ export interface CliArgs { experimentalAcp: boolean | undefined; extensions: string[] | undefined; listExtensions: boolean | undefined; - ideModeFeature: boolean | undefined; openaiLogging: boolean | undefined; openaiApiKey: string | undefined; openaiBaseUrl: string | undefined; @@ -153,6 +155,12 @@ export async function parseArguments(): Promise { 'Automatically accept all actions (aka YOLO mode, see https://www.youtube.com/watch?v=xvFZjo5PgG0 for more details)?', default: false, }) + .option('approval-mode', { + type: 'string', + choices: ['default', 'auto_edit', 'yolo'], + description: + 'Set the approval mode: default (prompt for approval), auto_edit (auto-approve edit tools), yolo (auto-approve all tools)', + }) .option('telemetry', { type: 'boolean', description: @@ -205,10 +213,6 @@ export async function parseArguments(): Promise { type: 'boolean', description: 'List all available extensions and exit.', }) - .option('ide-mode-feature', { - type: 'boolean', - description: 'Run in IDE mode?', - }) .option('proxy', { type: 'string', description: @@ -246,6 +250,11 @@ export async function parseArguments(): Promise { 'Cannot use both --prompt (-p) and --prompt-interactive (-i) together', ); } + if (argv.yolo && argv.approvalMode) { + throw new Error( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ); + } return true; }), ) @@ -319,6 +328,7 @@ export async function loadCliConfig( extensions: Extension[], sessionId: string, argv: CliArgs, + cwd: string = process.cwd(), ): Promise { const debugMode = argv.debug || @@ -329,12 +339,11 @@ export async function loadCliConfig( const memoryImportFormat = settings.memoryImportFormat || 'tree'; const ideMode = settings.ideMode ?? false; - const ideModeFeature = - argv.ideModeFeature ?? settings.ideModeFeature ?? false; const folderTrustFeature = settings.folderTrustFeature ?? false; - const folderTrustSetting = settings.folderTrust ?? false; + const folderTrustSetting = settings.folderTrust ?? true; const folderTrust = folderTrustFeature && folderTrustSetting; + const trustedFolder = folderTrust ? isWorkspaceTrusted() : true; const allExtensions = annotateActiveExtensions( extensions, @@ -374,7 +383,7 @@ export async function loadCliConfig( (e) => e.contextFiles, ); - const fileService = new FileDiscoveryService(process.cwd()); + const fileService = new FileDiscoveryService(cwd); const fileFiltering = { ...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, @@ -387,7 +396,7 @@ export async function loadCliConfig( // Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory( - process.cwd(), + cwd, settings.loadMemoryFromIncludeDirectories ? includeDirectories : [], debugMode, fileService, @@ -399,20 +408,59 @@ export async function loadCliConfig( let mcpServers = mergeMcpServers(settings, activeExtensions); const question = argv.promptInteractive || argv.prompt || ''; - const approvalMode = - argv.yolo || false ? ApprovalMode.YOLO : ApprovalMode.DEFAULT; + + // Determine approval mode with backward compatibility + let approvalMode: ApprovalMode; + if (argv.approvalMode) { + // New --approval-mode flag takes precedence + switch (argv.approvalMode) { + case 'yolo': + approvalMode = ApprovalMode.YOLO; + break; + case 'auto_edit': + approvalMode = ApprovalMode.AUTO_EDIT; + break; + case 'default': + approvalMode = ApprovalMode.DEFAULT; + break; + default: + throw new Error( + `Invalid approval mode: ${argv.approvalMode}. Valid values are: yolo, auto_edit, default`, + ); + } + } else { + // Fallback to legacy --yolo flag behavior + approvalMode = + argv.yolo || false ? ApprovalMode.YOLO : ApprovalMode.DEFAULT; + } + const interactive = !!argv.promptInteractive || (process.stdin.isTTY && question.length === 0); - // In non-interactive and non-yolo mode, exclude interactive built in tools. - const extraExcludes = - !interactive && approvalMode !== ApprovalMode.YOLO - ? [ShellTool.Name, EditTool.Name, WriteFileTool.Name] - : undefined; + // In non-interactive mode, exclude tools that require a prompt. + const extraExcludes: string[] = []; + if (!interactive && !argv.experimentalAcp) { + switch (approvalMode) { + case ApprovalMode.DEFAULT: + // In default non-interactive mode, all tools that require approval are excluded. + extraExcludes.push(ShellTool.Name, EditTool.Name, WriteFileTool.Name); + break; + case ApprovalMode.AUTO_EDIT: + // In auto-edit non-interactive mode, only tools that still require a prompt are excluded. + extraExcludes.push(ShellTool.Name); + break; + case ApprovalMode.YOLO: + // No extra excludes for YOLO mode. + break; + default: + // This should never happen due to validation earlier, but satisfies the linter + break; + } + } const excludeTools = mergeExcludeTools( settings, activeExtensions, - extraExcludes, + extraExcludes.length > 0 ? extraExcludes : undefined, ); const blockedMcpServers: Array<{ name: string; extensionName: string }> = []; @@ -450,7 +498,7 @@ export async function loadCliConfig( sessionId, embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL, sandbox: sandboxConfig, - targetDir: process.cwd(), + targetDir: cwd, includeDirectories, loadMemoryFromIncludeDirectories: settings.loadMemoryFromIncludeDirectories || false, @@ -498,21 +546,20 @@ export async function loadCliConfig( process.env.https_proxy || process.env.HTTP_PROXY || process.env.http_proxy, - cwd: process.cwd(), + cwd, fileDiscoveryService: fileService, bugCommand: settings.bugCommand, model: argv.model || settings.model || DEFAULT_GEMINI_MODEL, extensionContextFilePaths, maxSessionTurns: settings.maxSessionTurns ?? -1, sessionTokenLimit: settings.sessionTokenLimit ?? -1, - experimentalAcp: argv.experimentalAcp || false, + experimentalZedIntegration: argv.experimentalAcp || false, listExtensions: argv.listExtensions || false, extensions: allExtensions, blockedMcpServers, noBrowser: !!process.env.NO_BROWSER, summarizeToolOutput: settings.summarizeToolOutput, ideMode, - ideModeFeature, enableOpenAILogging: (typeof argv.openaiLogging === 'undefined' ? settings.enableOpenAILogging @@ -537,6 +584,7 @@ export async function loadCliConfig( folderTrustFeature, folderTrust, interactive, + trustedFolder, }); } diff --git a/packages/cli/src/config/keyBindings.ts b/packages/cli/src/config/keyBindings.ts index 6f4a21a2..640bf9de 100644 --- a/packages/cli/src/config/keyBindings.ts +++ b/packages/cli/src/config/keyBindings.ts @@ -129,20 +129,24 @@ export const defaultKeyBindings: KeyBindingConfig = { // Text input // Original: key.name === 'return' && !key.ctrl && !key.meta && !key.paste + // Must also exclude shift to allow shift+enter for newline [Command.SUBMIT]: [ { key: 'return', ctrl: false, command: false, paste: false, + shift: false, }, ], // Original: key.name === 'return' && (key.ctrl || key.meta || key.paste) // Split into multiple data-driven bindings + // Now also includes shift+enter for multi-line input [Command.NEWLINE]: [ { key: 'return', ctrl: true }, { key: 'return', command: true }, { key: 'return', paste: true }, + { key: 'return', shift: true }, ], // External tools diff --git a/packages/cli/src/config/settingsSchema.test.ts b/packages/cli/src/config/settingsSchema.test.ts index ab820ee1..118b1823 100644 --- a/packages/cli/src/config/settingsSchema.test.ts +++ b/packages/cli/src/config/settingsSchema.test.ts @@ -44,7 +44,6 @@ describe('SettingsSchema', () => { 'telemetry', 'bugCommand', 'summarizeToolOutput', - 'ideModeFeature', 'dnsResolutionOrder', 'excludedProjectEnvVars', 'disableUpdateNag', diff --git a/packages/cli/src/config/settingsSchema.ts b/packages/cli/src/config/settingsSchema.ts index 73ffebdc..6e472067 100644 --- a/packages/cli/src/config/settingsSchema.ts +++ b/packages/cli/src/config/settingsSchema.ts @@ -395,15 +395,7 @@ export const SETTINGS_SCHEMA = { description: 'Settings for summarizing tool output.', showInDialog: false, }, - ideModeFeature: { - type: 'boolean', - label: 'IDE Mode Feature Flag', - category: 'Advanced', - requiresRestart: true, - default: undefined as boolean | undefined, - description: 'Internal feature flag for IDE mode.', - showInDialog: false, - }, + dnsResolutionOrder: { type: 'string', label: 'DNS Resolution Order', diff --git a/packages/cli/src/config/trustedFolders.test.ts b/packages/cli/src/config/trustedFolders.test.ts new file mode 100644 index 00000000..67bf9cfc --- /dev/null +++ b/packages/cli/src/config/trustedFolders.test.ts @@ -0,0 +1,203 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Mock 'os' first. +import * as osActual from 'os'; +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); + return { + ...actualOs, + homedir: vi.fn(() => '/mock/home/user'), + platform: vi.fn(() => 'linux'), + }; +}); + +import { + describe, + it, + expect, + vi, + beforeEach, + afterEach, + type Mocked, + type Mock, +} from 'vitest'; +import * as fs from 'fs'; +import stripJsonComments from 'strip-json-comments'; +import * as path from 'path'; + +import { + loadTrustedFolders, + USER_TRUSTED_FOLDERS_PATH, + TrustLevel, + isWorkspaceTrusted, +} from './trustedFolders.js'; + +vi.mock('fs', async (importOriginal) => { + const actualFs = await importOriginal(); + return { + ...actualFs, + existsSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + mkdirSync: vi.fn(), + }; +}); + +vi.mock('strip-json-comments', () => ({ + default: vi.fn((content) => content), +})); + +describe('Trusted Folders Loading', () => { + let mockFsExistsSync: Mocked; + let mockStripJsonComments: Mocked; + let mockFsWriteFileSync: Mocked; + + beforeEach(() => { + vi.resetAllMocks(); + mockFsExistsSync = vi.mocked(fs.existsSync); + mockStripJsonComments = vi.mocked(stripJsonComments); + mockFsWriteFileSync = vi.mocked(fs.writeFileSync); + vi.mocked(osActual.homedir).mockReturnValue('/mock/home/user'); + (mockStripJsonComments as unknown as Mock).mockImplementation( + (jsonString: string) => jsonString, + ); + (mockFsExistsSync as Mock).mockReturnValue(false); + (fs.readFileSync as Mock).mockReturnValue('{}'); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should load empty rules if no files exist', () => { + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([]); + expect(errors).toEqual([]); + }); + + it('should load user rules if only user file exists', () => { + const userPath = USER_TRUSTED_FOLDERS_PATH; + (mockFsExistsSync as Mock).mockImplementation((p) => p === userPath); + const userContent = { + '/user/folder': TrustLevel.TRUST_FOLDER, + }; + (fs.readFileSync as Mock).mockImplementation((p) => { + if (p === userPath) return JSON.stringify(userContent); + return '{}'; + }); + + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([ + { path: '/user/folder', trustLevel: TrustLevel.TRUST_FOLDER }, + ]); + expect(errors).toEqual([]); + }); + + it('should handle JSON parsing errors gracefully', () => { + const userPath = USER_TRUSTED_FOLDERS_PATH; + (mockFsExistsSync as Mock).mockImplementation((p) => p === userPath); + (fs.readFileSync as Mock).mockImplementation((p) => { + if (p === userPath) return 'invalid json'; + return '{}'; + }); + + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([]); + expect(errors.length).toBe(1); + expect(errors[0].path).toBe(userPath); + expect(errors[0].message).toContain('Unexpected token'); + }); + + it('setValue should update the user config and save it', () => { + const loadedFolders = loadTrustedFolders(); + loadedFolders.setValue('/new/path', TrustLevel.TRUST_FOLDER); + + expect(loadedFolders.user.config['/new/path']).toBe( + TrustLevel.TRUST_FOLDER, + ); + expect(mockFsWriteFileSync).toHaveBeenCalledWith( + USER_TRUSTED_FOLDERS_PATH, + JSON.stringify({ '/new/path': TrustLevel.TRUST_FOLDER }, null, 2), + 'utf-8', + ); + }); +}); + +describe('isWorkspaceTrusted', () => { + let mockCwd: string; + const mockRules: Record = {}; + + beforeEach(() => { + vi.spyOn(process, 'cwd').mockImplementation(() => mockCwd); + vi.spyOn(fs, 'readFileSync').mockImplementation((p) => { + if (p === USER_TRUSTED_FOLDERS_PATH) { + return JSON.stringify(mockRules); + } + return '{}'; + }); + vi.spyOn(fs, 'existsSync').mockImplementation( + (p) => p === USER_TRUSTED_FOLDERS_PATH, + ); + }); + + afterEach(() => { + vi.restoreAllMocks(); + // Clear the object + Object.keys(mockRules).forEach((key) => delete mockRules[key]); + }); + + it('should return true for a directly trusted folder', () => { + mockCwd = '/home/user/projectA'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should return true for a child of a trusted folder', () => { + mockCwd = '/home/user/projectA/src'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should return true for a child of a trusted parent folder', () => { + mockCwd = '/home/user/projectB'; + mockRules['/home/user/projectB/somefile.txt'] = TrustLevel.TRUST_PARENT; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should return false for a directly untrusted folder', () => { + mockCwd = '/home/user/untrusted'; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBe(false); + }); + + it('should return undefined for a child of an untrusted folder', () => { + mockCwd = '/home/user/untrusted/src'; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBeUndefined(); + }); + + it('should return undefined when no rules match', () => { + mockCwd = '/home/user/other'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBeUndefined(); + }); + + it('should prioritize trust over distrust', () => { + mockCwd = '/home/user/projectA/untrusted'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + mockRules['/home/user/projectA/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should handle path normalization', () => { + mockCwd = '/home/user/projectA'; + mockRules[`/home/user/../user/${path.basename('/home/user/projectA')}`] = + TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted()).toBe(true); + }); +}); diff --git a/packages/cli/src/config/trustedFolders.ts b/packages/cli/src/config/trustedFolders.ts new file mode 100644 index 00000000..9da27c80 --- /dev/null +++ b/packages/cli/src/config/trustedFolders.ts @@ -0,0 +1,158 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'fs'; +import * as path from 'path'; +import { homedir } from 'os'; +import { getErrorMessage, isWithinRoot } from '@google/gemini-cli-core'; +import stripJsonComments from 'strip-json-comments'; + +export const TRUSTED_FOLDERS_FILENAME = 'trustedFolders.json'; +export const SETTINGS_DIRECTORY_NAME = '.gemini'; +export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME); +export const USER_TRUSTED_FOLDERS_PATH = path.join( + USER_SETTINGS_DIR, + TRUSTED_FOLDERS_FILENAME, +); + +export enum TrustLevel { + TRUST_FOLDER = 'TRUST_FOLDER', + TRUST_PARENT = 'TRUST_PARENT', + DO_NOT_TRUST = 'DO_NOT_TRUST', +} + +export interface TrustRule { + path: string; + trustLevel: TrustLevel; +} + +export interface TrustedFoldersError { + message: string; + path: string; +} + +export interface TrustedFoldersFile { + config: Record; + path: string; +} + +export class LoadedTrustedFolders { + constructor( + public user: TrustedFoldersFile, + public errors: TrustedFoldersError[], + ) {} + + get rules(): TrustRule[] { + return Object.entries(this.user.config).map(([path, trustLevel]) => ({ + path, + trustLevel, + })); + } + + setValue(path: string, trustLevel: TrustLevel): void { + this.user.config[path] = trustLevel; + saveTrustedFolders(this.user); + } +} + +export function loadTrustedFolders(): LoadedTrustedFolders { + const errors: TrustedFoldersError[] = []; + const userConfig: Record = {}; + + const userPath = USER_TRUSTED_FOLDERS_PATH; + + // Load user trusted folders + try { + if (fs.existsSync(userPath)) { + const content = fs.readFileSync(userPath, 'utf-8'); + const parsed = JSON.parse(stripJsonComments(content)) as Record< + string, + TrustLevel + >; + if (parsed) { + Object.assign(userConfig, parsed); + } + } + } catch (error: unknown) { + errors.push({ + message: getErrorMessage(error), + path: userPath, + }); + } + + return new LoadedTrustedFolders( + { path: userPath, config: userConfig }, + errors, + ); +} + +export function saveTrustedFolders( + trustedFoldersFile: TrustedFoldersFile, +): void { + try { + // Ensure the directory exists + const dirPath = path.dirname(trustedFoldersFile.path); + if (!fs.existsSync(dirPath)) { + fs.mkdirSync(dirPath, { recursive: true }); + } + + fs.writeFileSync( + trustedFoldersFile.path, + JSON.stringify(trustedFoldersFile.config, null, 2), + 'utf-8', + ); + } catch (error) { + console.error('Error saving trusted folders file:', error); + } +} + +export function isWorkspaceTrusted(): boolean | undefined { + const { rules, errors } = loadTrustedFolders(); + + if (errors.length > 0) { + for (const error of errors) { + console.error( + `Error loading trusted folders config from ${error.path}: ${error.message}`, + ); + } + } + + const trustedPaths: string[] = []; + const untrustedPaths: string[] = []; + + for (const rule of rules) { + switch (rule.trustLevel) { + case TrustLevel.TRUST_FOLDER: + trustedPaths.push(rule.path); + break; + case TrustLevel.TRUST_PARENT: + trustedPaths.push(path.dirname(rule.path)); + break; + case TrustLevel.DO_NOT_TRUST: + untrustedPaths.push(rule.path); + break; + default: + // Do nothing for unknown trust levels. + break; + } + } + + const cwd = process.cwd(); + + for (const trustedPath of trustedPaths) { + if (isWithinRoot(cwd, trustedPath)) { + return true; + } + } + + for (const untrustedPath of untrustedPaths) { + if (path.normalize(cwd) === path.normalize(untrustedPath)) { + return false; + } + } + + return undefined; +} diff --git a/packages/cli/src/gemini.tsx b/packages/cli/src/gemini.tsx index a7e4c75b..6c70b3d9 100644 --- a/packages/cli/src/gemini.tsx +++ b/packages/cli/src/gemini.tsx @@ -41,6 +41,7 @@ import { import { validateAuthMethod } from './config/auth.js'; import { setMaxSizedBoxDebugging } from './ui/components/shared/MaxSizedBox.js'; import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js'; +import { detectAndEnableKittyProtocol } from './ui/utils/kittyProtocolDetector.js'; import { checkForUpdates } from './ui/utils/updateCheck.js'; import { handleAutoUpdate } from './utils/handleAutoUpdate.js'; import { appEvents, AppEvent } from './utils/events.js'; @@ -106,7 +107,7 @@ async function relaunchWithAdditionalArgs(additionalArgs: string[]) { await new Promise((resolve) => child.on('close', resolve)); process.exit(0); } -import { runAcpPeer } from './acp/acpPeer.js'; +import { runZedIntegration } from './zed-integration/zedIntegration.js'; export function setupUnhandledRejectionHandler() { let unhandledRejectionOccurred = false; @@ -191,7 +192,7 @@ export async function main() { await config.initialize(); - if (config.getIdeMode() && config.getIdeModeFeature()) { + if (config.getIdeMode()) { await config.getIdeClient().connect(); logIdeConnection(config, new IdeConnectionEvent(IdeConnectionType.START)); } @@ -250,8 +251,8 @@ export async function main() { await getOauthClient(settings.merged.selectedAuthType, config); } - if (config.getExperimentalAcp()) { - return runAcpPeer(config, settings); + if (config.getExperimentalZedIntegration()) { + return runZedIntegration(config, settings, extensions, argv); } let input = config.getQuestion(); @@ -263,6 +264,8 @@ export async function main() { // Render UI, passing necessary config values. Check that there is no command line question. if (config.isInteractive()) { const version = await getCliVersion(); + // Detect and enable Kitty keyboard protocol once at startup + await detectAndEnableKittyProtocol(); setWindowTitle(basename(workspaceRoot), settings); const instance = render( diff --git a/packages/cli/src/nonInteractiveCli.ts b/packages/cli/src/nonInteractiveCli.ts index 24f5cb10..a4f99046 100644 --- a/packages/cli/src/nonInteractiveCli.ts +++ b/packages/cli/src/nonInteractiveCli.ts @@ -13,10 +13,10 @@ import { isTelemetrySdkInitialized, GeminiEventType, ToolErrorType, + parseAndFormatApiError, } from '@qwen-code/qwen-code-core'; import { Content, Part, FunctionCall } from '@google/genai'; -import { parseAndFormatApiError } from './ui/utils/errorParsing.js'; import { ConsolePatcher } from './ui/utils/ConsolePatcher.js'; export async function runNonInteractive( @@ -143,7 +143,7 @@ export async function runNonInteractive( } finally { consolePatcher.cleanup(); if (isTelemetrySdkInitialized()) { - await shutdownTelemetry(); + await shutdownTelemetry(config); } } } diff --git a/packages/cli/src/services/BuiltinCommandLoader.ts b/packages/cli/src/services/BuiltinCommandLoader.ts index 5343fd10..7304d912 100644 --- a/packages/cli/src/services/BuiltinCommandLoader.ts +++ b/packages/cli/src/services/BuiltinCommandLoader.ts @@ -33,6 +33,7 @@ import { toolsCommand } from '../ui/commands/toolsCommand.js'; import { settingsCommand } from '../ui/commands/settingsCommand.js'; import { vimCommand } from '../ui/commands/vimCommand.js'; import { setupGithubCommand } from '../ui/commands/setupGithubCommand.js'; +import { terminalSetupCommand } from '../ui/commands/terminalSetupCommand.js'; /** * Loads the core, hard-coded slash commands that are an integral part @@ -76,6 +77,7 @@ export class BuiltinCommandLoader implements ICommandLoader { settingsCommand, vimCommand, setupGithubCommand, + terminalSetupCommand, ]; return allDefinitions.filter((cmd): cmd is SlashCommand => cmd !== null); diff --git a/packages/cli/src/ui/App.test.tsx b/packages/cli/src/ui/App.test.tsx index 97b0bf97..57cca871 100644 --- a/packages/cli/src/ui/App.test.tsx +++ b/packages/cli/src/ui/App.test.tsx @@ -155,13 +155,13 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { setFlashFallbackHandler: vi.fn(), getSessionId: vi.fn(() => 'test-session-id'), getUserTier: vi.fn().mockResolvedValue(undefined), - getIdeModeFeature: vi.fn(() => false), - getIdeMode: vi.fn(() => false), + getIdeMode: vi.fn(() => true), getWorkspaceContext: vi.fn(() => ({ getDirectories: vi.fn(() => []), })), getIdeClient: vi.fn(() => ({ getCurrentIde: vi.fn(() => 'vscode'), + getDetectedIdeDisplayName: vi.fn(() => 'VSCode'), })), }; }); diff --git a/packages/cli/src/ui/App.tsx b/packages/cli/src/ui/App.tsx index fffb4167..675d3c00 100644 --- a/packages/cli/src/ui/App.tsx +++ b/packages/cli/src/ui/App.tsx @@ -82,6 +82,7 @@ import { useTextBuffer } from './components/shared/text-buffer.js'; import { useVimMode, VimModeProvider } from './contexts/VimModeContext.js'; import { useVim } from './hooks/vim.js'; import { useKeypress, Key } from './hooks/useKeypress.js'; +import { useKittyKeyboardProtocol } from './hooks/useKittyKeyboardProtocol.js'; import { keyMatchers, Command } from './keyMatchers.js'; import * as fs from 'fs'; import { UpdateNotification } from './components/UpdateNotification.js'; @@ -132,7 +133,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { registerCleanup(() => config.getIdeClient().disconnect()); }, [config]); const shouldShowIdePrompt = - config.getIdeModeFeature() && currentIDE && !config.getIdeMode() && !settings.merged.hasSeenIdeIntegrationNudge && @@ -254,8 +254,10 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const { isSettingsDialogOpen, openSettingsDialog, closeSettingsDialog } = useSettingsCommand(); - const { isFolderTrustDialogOpen, handleFolderTrustSelect } = - useFolderTrust(settings); + const { isFolderTrustDialogOpen, handleFolderTrustSelect } = useFolderTrust( + settings, + config, + ); const { isAuthDialogOpen, @@ -608,14 +610,18 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const handleIdePromptComplete = useCallback( (result: IdeIntegrationNudgeResult) => { - if (result === 'yes') { - handleSlashCommand('/ide install'); + if (result.userSelection === 'yes') { + if (result.isExtensionPreInstalled) { + handleSlashCommand('/ide enable'); + } else { + handleSlashCommand('/ide install'); + } settings.setValue( SettingScope.User, 'hasSeenIdeIntegrationNudge', true, ); - } else if (result === 'dismiss') { + } else if (result.userSelection === 'dismiss') { settings.setValue( SettingScope.User, 'hasSeenIdeIntegrationNudge', @@ -634,6 +640,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const { elapsedTime, currentLoadingPhrase } = useLoadingIndicator(streamingState); const showAutoAcceptIndicator = useAutoAcceptIndicator({ config }); + const kittyProtocolStatus = useKittyKeyboardProtocol(); const handleExit = useCallback( ( @@ -726,7 +733,11 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { ], ); - useKeypress(handleGlobalKeypress, { isActive: true }); + useKeypress(handleGlobalKeypress, { + isActive: true, + kittyProtocolEnabled: kittyProtocolStatus.enabled, + config, + }); useEffect(() => { if (config) { @@ -974,9 +985,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { )} - {shouldShowIdePrompt ? ( + {shouldShowIdePrompt && currentIDE ? ( ) : isFolderTrustDialogOpen ? ( diff --git a/packages/cli/src/ui/IdeIntegrationNudge.tsx b/packages/cli/src/ui/IdeIntegrationNudge.tsx index f0c6172d..2be69ad7 100644 --- a/packages/cli/src/ui/IdeIntegrationNudge.tsx +++ b/packages/cli/src/ui/IdeIntegrationNudge.tsx @@ -4,44 +4,78 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Text, useInput } from 'ink'; +import { DetectedIde, getIdeInfo } from '@google/gemini-cli-core'; +import { Box, Text } from 'ink'; import { RadioButtonSelect, RadioSelectItem, } from './components/shared/RadioButtonSelect.js'; +import { useKeypress } from './hooks/useKeypress.js'; -export type IdeIntegrationNudgeResult = 'yes' | 'no' | 'dismiss'; +export type IdeIntegrationNudgeResult = { + userSelection: 'yes' | 'no' | 'dismiss'; + isExtensionPreInstalled: boolean; +}; interface IdeIntegrationNudgeProps { - ideName?: string; + ide: DetectedIde; onComplete: (result: IdeIntegrationNudgeResult) => void; } export function IdeIntegrationNudge({ - ideName, + ide, onComplete, }: IdeIntegrationNudgeProps) { - useInput((_input, key) => { - if (key.escape) { - onComplete('no'); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onComplete({ + userSelection: 'no', + isExtensionPreInstalled: false, + }); + } + }, + { isActive: true }, + ); + + const { displayName: ideName } = getIdeInfo(ide); + // Assume extension is already installed if the env variables are set. + const isExtensionPreInstalled = + !!process.env.GEMINI_CLI_IDE_SERVER_PORT && + !!process.env.GEMINI_CLI_IDE_WORKSPACE_PATH; const OPTIONS: Array> = [ { label: 'Yes', - value: 'yes', + value: { + userSelection: 'yes', + isExtensionPreInstalled, + }, }, { label: 'No (esc)', - value: 'no', + value: { + userSelection: 'no', + isExtensionPreInstalled, + }, }, { label: "No, don't ask again", - value: 'dismiss', + value: { + userSelection: 'dismiss', + isExtensionPreInstalled, + }, }, ]; + const installText = isExtensionPreInstalled + ? `If you select Yes, the CLI will have access to your open files and display diffs directly in ${ + ideName ?? 'your editor' + }.` + : `If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${ + ideName ?? 'your editor' + }.`; + return ( {'> '} - {`Do you want to connect your ${ideName ?? 'your'} editor to Gemini CLI?`} + {`Do you want to connect ${ideName ?? 'your'} editor to Gemini CLI?`} - {`If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${ideName ?? 'your editor'}.`} + {installText} 0) { addItem( - { - type: MessageType.ERROR, - text: errors.join('\n'), - }, + { type: MessageType.ERROR, text: errors.join('\n') }, Date.now(), ); } + return; }, }, { diff --git a/packages/cli/src/ui/commands/ideCommand.test.ts b/packages/cli/src/ui/commands/ideCommand.test.ts index 03689c28..69ec5d80 100644 --- a/packages/cli/src/ui/commands/ideCommand.test.ts +++ b/packages/cli/src/ui/commands/ideCommand.test.ts @@ -40,7 +40,6 @@ describe('ideCommand', () => { } as unknown as CommandContext; mockConfig = { - getIdeModeFeature: vi.fn(), getIdeMode: vi.fn(), getIdeClient: vi.fn(() => ({ reconnect: vi.fn(), @@ -60,14 +59,12 @@ describe('ideCommand', () => { vi.restoreAllMocks(); }); - it('should return null if ideModeFeature is not enabled', () => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(false); - const command = ideCommand(mockConfig); + it('should return null if config is not provided', () => { + const command = ideCommand(null); expect(command).toBeNull(); }); - it('should return the ide command if ideModeFeature is enabled', () => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(true); + it('should return the ide command', () => { vi.mocked(mockConfig.getIdeMode).mockReturnValue(true); vi.mocked(mockConfig.getIdeClient).mockReturnValue({ getCurrentIde: () => DetectedIde.VSCode, @@ -85,7 +82,6 @@ describe('ideCommand', () => { describe('status subcommand', () => { const mockGetConnectionStatus = vi.fn(); beforeEach(() => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(true); vi.mocked(mockConfig.getIdeClient).mockReturnValue({ getConnectionStatus: mockGetConnectionStatus, getCurrentIde: () => DetectedIde.VSCode, @@ -162,7 +158,6 @@ describe('ideCommand', () => { describe('install subcommand', () => { const mockInstall = vi.fn(); beforeEach(() => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(true); vi.mocked(mockConfig.getIdeMode).mockReturnValue(true); vi.mocked(mockConfig.getIdeClient).mockReturnValue({ getCurrentIde: () => DetectedIde.VSCode, diff --git a/packages/cli/src/ui/commands/ideCommand.ts b/packages/cli/src/ui/commands/ideCommand.ts index d5d1cb75..e18ab12d 100644 --- a/packages/cli/src/ui/commands/ideCommand.ts +++ b/packages/cli/src/ui/commands/ideCommand.ts @@ -9,7 +9,7 @@ import { DetectedIde, QWEN_CODE_COMPANION_EXTENSION_NAME, IDEConnectionStatus, - getIdeDisplayName, + getIdeInfo, getIdeInstaller, IdeClient, type File, @@ -116,7 +116,7 @@ async function getIdeStatusMessageWithFiles(ideClient: IdeClient): Promise<{ } export const ideCommand = (config: Config | null): SlashCommand | null => { - if (!config || !config.getIdeModeFeature()) { + if (!config) { return null; } const ideClient = config.getIdeClient(); @@ -133,7 +133,7 @@ export const ideCommand = (config: Config | null): SlashCommand | null => { content: `IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: ${Object.values( DetectedIde, ) - .map((ide) => getIdeDisplayName(ide)) + .map((ide) => getIdeInfo(ide).displayName) .join(', ')}`, }) as const, }; diff --git a/packages/cli/src/ui/commands/mcpCommand.test.ts b/packages/cli/src/ui/commands/mcpCommand.test.ts index 8c7e3199..42ee3612 100644 --- a/packages/cli/src/ui/commands/mcpCommand.test.ts +++ b/packages/cli/src/ui/commands/mcpCommand.test.ts @@ -881,9 +881,14 @@ describe('mcpCommand', () => { }), getToolRegistry: vi.fn().mockResolvedValue(mockToolRegistry), getGeminiClient: vi.fn().mockReturnValue(mockGeminiClient), + getPromptRegistry: vi.fn().mockResolvedValue({ + removePromptsByServer: vi.fn(), + }), }, }, }); + // Mock the reloadCommands function + context.ui.reloadCommands = vi.fn(); const { MCPOAuthProvider } = await import('@qwen-code/qwen-code-core'); @@ -901,6 +906,7 @@ describe('mcpCommand', () => { 'test-server', ); expect(mockGeminiClient.setTools).toHaveBeenCalled(); + expect(context.ui.reloadCommands).toHaveBeenCalledTimes(1); expect(isMessageAction(result)).toBe(true); if (isMessageAction(result)) { @@ -985,6 +991,8 @@ describe('mcpCommand', () => { }, }, }); + // Mock the reloadCommands function, which is new logic. + context.ui.reloadCommands = vi.fn(); const refreshCommand = mcpCommand.subCommands?.find( (cmd) => cmd.name === 'refresh', @@ -1002,6 +1010,7 @@ describe('mcpCommand', () => { ); expect(mockToolRegistry.discoverMcpTools).toHaveBeenCalled(); expect(mockGeminiClient.setTools).toHaveBeenCalled(); + expect(context.ui.reloadCommands).toHaveBeenCalledTimes(1); expect(isMessageAction(result)).toBe(true); if (isMessageAction(result)) { diff --git a/packages/cli/src/ui/commands/mcpCommand.ts b/packages/cli/src/ui/commands/mcpCommand.ts index 2660da7a..db537b7b 100644 --- a/packages/cli/src/ui/commands/mcpCommand.ts +++ b/packages/cli/src/ui/commands/mcpCommand.ts @@ -417,6 +417,9 @@ const authCommand: SlashCommand = { await geminiClient.setTools(); } + // Reload the slash commands to reflect the changes. + context.ui.reloadCommands(); + return { type: 'message', messageType: 'info', @@ -507,6 +510,9 @@ const refreshCommand: SlashCommand = { await geminiClient.setTools(); } + // Reload the slash commands to reflect the changes. + context.ui.reloadCommands(); + return getMcpStatus(context, false, false, false); }, }; diff --git a/packages/cli/src/ui/commands/terminalSetupCommand.test.ts b/packages/cli/src/ui/commands/terminalSetupCommand.test.ts new file mode 100644 index 00000000..85f8735e --- /dev/null +++ b/packages/cli/src/ui/commands/terminalSetupCommand.test.ts @@ -0,0 +1,85 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { terminalSetupCommand } from './terminalSetupCommand.js'; +import * as terminalSetupModule from '../utils/terminalSetup.js'; +import { CommandContext } from './types.js'; + +vi.mock('../utils/terminalSetup.js'); + +describe('terminalSetupCommand', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('should have correct metadata', () => { + expect(terminalSetupCommand.name).toBe('terminal-setup'); + expect(terminalSetupCommand.description).toContain('multiline input'); + expect(terminalSetupCommand.kind).toBe('built-in'); + }); + + it('should return success message when terminal setup succeeds', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockResolvedValue({ + success: true, + message: 'Terminal configured successfully', + }); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: 'Terminal configured successfully', + messageType: 'info', + }); + }); + + it('should append restart message when terminal setup requires restart', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockResolvedValue({ + success: true, + message: 'Terminal configured successfully', + requiresRestart: true, + }); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: + 'Terminal configured successfully\n\nPlease restart your terminal for the changes to take effect.', + messageType: 'info', + }); + }); + + it('should return error message when terminal setup fails', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockResolvedValue({ + success: false, + message: 'Failed to detect terminal', + }); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: 'Failed to detect terminal', + messageType: 'error', + }); + }); + + it('should handle exceptions from terminal setup', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockRejectedValue( + new Error('Unexpected error'), + ); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: 'Failed to configure terminal: Error: Unexpected error', + messageType: 'error', + }); + }); +}); diff --git a/packages/cli/src/ui/commands/terminalSetupCommand.ts b/packages/cli/src/ui/commands/terminalSetupCommand.ts new file mode 100644 index 00000000..11520c0e --- /dev/null +++ b/packages/cli/src/ui/commands/terminalSetupCommand.ts @@ -0,0 +1,45 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { MessageActionReturn, SlashCommand, CommandKind } from './types.js'; +import { terminalSetup } from '../utils/terminalSetup.js'; + +/** + * Command to configure terminal keybindings for multiline input support. + * + * This command automatically detects and configures VS Code, Cursor, and Windsurf + * to support Shift+Enter and Ctrl+Enter for multiline input. + */ +export const terminalSetupCommand: SlashCommand = { + name: 'terminal-setup', + description: + 'Configure terminal keybindings for multiline input (VS Code, Cursor, Windsurf)', + kind: CommandKind.BUILT_IN, + + action: async (): Promise => { + try { + const result = await terminalSetup(); + + let content = result.message; + if (result.requiresRestart) { + content += + '\n\nPlease restart your terminal for the changes to take effect.'; + } + + return { + type: 'message', + content, + messageType: result.success ? 'info' : 'error', + }; + } catch (error) { + return { + type: 'message', + content: `Failed to configure terminal: ${error}`, + messageType: 'error', + }; + } + }, +}; diff --git a/packages/cli/src/ui/commands/types.ts b/packages/cli/src/ui/commands/types.ts index de21f8eb..4c4925b7 100644 --- a/packages/cli/src/ui/commands/types.ts +++ b/packages/cli/src/ui/commands/types.ts @@ -61,6 +61,7 @@ export interface CommandContext { toggleCorgiMode: () => void; toggleVimEnabled: () => Promise; setGeminiMdFileCount: (count: number) => void; + reloadCommands: () => void; }; // Session-specific data session: { diff --git a/packages/cli/src/ui/components/AuthDialog.tsx b/packages/cli/src/ui/components/AuthDialog.tsx index 1b8e6b8a..6bc0131f 100644 --- a/packages/cli/src/ui/components/AuthDialog.tsx +++ b/packages/cli/src/ui/components/AuthDialog.tsx @@ -4,19 +4,20 @@ * SPDX-License-Identifier: Apache-2.0 */ -import React, { useState } from 'react'; -import { Box, Text, useInput } from 'ink'; -import { Colors } from '../colors.js'; -import { RadioButtonSelect } from './shared/RadioButtonSelect.js'; -import { LoadedSettings, SettingScope } from '../../config/settings.js'; import { AuthType } from '@qwen-code/qwen-code-core'; +import { Box, Text } from 'ink'; +import React, { useState } from 'react'; import { - validateAuthMethod, setOpenAIApiKey, setOpenAIBaseUrl, setOpenAIModel, + validateAuthMethod, } from '../../config/auth.js'; +import { LoadedSettings, SettingScope } from '../../config/settings.js'; +import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; import { OpenAIKeyPrompt } from './OpenAIKeyPrompt.js'; +import { RadioButtonSelect } from './shared/RadioButtonSelect.js'; interface AuthDialogProps { onSelect: (authMethod: AuthType | undefined, scope: SettingScope) => void; @@ -108,27 +109,31 @@ export function AuthDialog({ setErrorMessage('OpenAI API key is required to use OpenAI authentication.'); }; - useInput((_input, key) => { - if (showOpenAIKeyPrompt) { - return; - } + useKeypress( + (key) => { - if (key.escape) { - // Prevent exit if there is an error message. - // This means they user is not authenticated yet. - if (errorMessage) { + if (showOpenAIKeyPrompt) { return; } - if (settings.merged.selectedAuthType === undefined) { - // Prevent exiting if no auth method is set - setErrorMessage( - 'You must select an auth method to proceed. Press Ctrl+C twice to exit.', - ); - return; + + if (key.name === 'escape') { + // Prevent exit if there is an error message. + // This means they user is not authenticated yet. + if (errorMessage) { + return; + } + if (settings.merged.selectedAuthType === undefined) { + // Prevent exiting if no auth method is set + setErrorMessage( + 'You must select an auth method to proceed. Press Ctrl+C twice to exit.', + ); + return; + } + onSelect(undefined, SettingScope.User); } - onSelect(undefined, SettingScope.User); - } - }); + }, + { isActive: true }, + ); if (showOpenAIKeyPrompt) { return ( diff --git a/packages/cli/src/ui/components/AuthInProgress.tsx b/packages/cli/src/ui/components/AuthInProgress.tsx index f05efe1d..53377c7c 100644 --- a/packages/cli/src/ui/components/AuthInProgress.tsx +++ b/packages/cli/src/ui/components/AuthInProgress.tsx @@ -5,9 +5,10 @@ */ import React, { useState, useEffect } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import Spinner from 'ink-spinner'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface AuthInProgressProps { onTimeout: () => void; @@ -18,11 +19,14 @@ export function AuthInProgress({ }: AuthInProgressProps): React.JSX.Element { const [timedOut, setTimedOut] = useState(false); - useInput((input, key) => { - if (key.escape || (key.ctrl && (input === 'c' || input === 'C'))) { - onTimeout(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape' || (key.ctrl && key.name === 'c')) { + onTimeout(); + } + }, + { isActive: true }, + ); useEffect(() => { const timer = setTimeout(() => { diff --git a/packages/cli/src/ui/components/DebugProfiler.tsx b/packages/cli/src/ui/components/DebugProfiler.tsx index 89c40a91..22c16cfb 100644 --- a/packages/cli/src/ui/components/DebugProfiler.tsx +++ b/packages/cli/src/ui/components/DebugProfiler.tsx @@ -4,9 +4,10 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Text, useInput } from 'ink'; +import { Text } from 'ink'; import { useEffect, useRef, useState } from 'react'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; export const DebugProfiler = () => { const numRenders = useRef(0); @@ -16,11 +17,14 @@ export const DebugProfiler = () => { numRenders.current++; }); - useInput((input, key) => { - if (key.ctrl && input === 'b') { - setShowNumRenders((prev) => !prev); - } - }); + useKeypress( + (key) => { + if (key.ctrl && key.name === 'b') { + setShowNumRenders((prev) => !prev); + } + }, + { isActive: true }, + ); if (!showNumRenders) { return null; diff --git a/packages/cli/src/ui/components/EditorSettingsDialog.tsx b/packages/cli/src/ui/components/EditorSettingsDialog.tsx index 3f32f40d..8732e23b 100644 --- a/packages/cli/src/ui/components/EditorSettingsDialog.tsx +++ b/packages/cli/src/ui/components/EditorSettingsDialog.tsx @@ -5,7 +5,7 @@ */ import React, { useState } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { Colors } from '../colors.js'; import { EDITOR_DISPLAY_NAMES, @@ -15,6 +15,7 @@ import { import { RadioButtonSelect } from './shared/RadioButtonSelect.js'; import { LoadedSettings, SettingScope } from '../../config/settings.js'; import { EditorType, isEditorAvailable } from '@qwen-code/qwen-code-core'; +import { useKeypress } from '../hooks/useKeypress.js'; interface EditorDialogProps { onSelect: (editorType: EditorType | undefined, scope: SettingScope) => void; @@ -33,14 +34,17 @@ export function EditorSettingsDialog({ const [focusedSection, setFocusedSection] = useState<'editor' | 'scope'>( 'editor', ); - useInput((_, key) => { - if (key.tab) { - setFocusedSection((prev) => (prev === 'editor' ? 'scope' : 'editor')); - } - if (key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (key.name === 'tab') { + setFocusedSection((prev) => (prev === 'editor' ? 'scope' : 'editor')); + } + if (key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); const editorItems: EditorDisplay[] = editorSettingsManager.getAvailableEditorDisplays(); @@ -49,8 +53,8 @@ export function EditorSettingsDialog({ settings.forScope(selectedScope).settings.preferredEditor; let editorIndex = currentPreference ? editorItems.findIndex( - (item: EditorDisplay) => item.type === currentPreference, - ) + (item: EditorDisplay) => item.type === currentPreference, + ) : 0; if (editorIndex === -1) { console.error(`Editor is not supported: ${currentPreference}`); diff --git a/packages/cli/src/ui/components/FolderTrustDialog.test.tsx b/packages/cli/src/ui/components/FolderTrustDialog.test.tsx index 01394d0f..d1be0b61 100644 --- a/packages/cli/src/ui/components/FolderTrustDialog.test.tsx +++ b/packages/cli/src/ui/components/FolderTrustDialog.test.tsx @@ -5,6 +5,7 @@ */ import { render } from 'ink-testing-library'; +import { waitFor } from '@testing-library/react'; import { vi } from 'vitest'; import { FolderTrustDialog, FolderTrustChoice } from './FolderTrustDialog.js'; @@ -18,12 +19,14 @@ describe('FolderTrustDialog', () => { ); }); - it('should call onSelect with DO_NOT_TRUST when escape is pressed', () => { + it('should call onSelect with DO_NOT_TRUST when escape is pressed', async () => { const onSelect = vi.fn(); const { stdin } = render(); - stdin.write('\u001B'); // Simulate escape key + stdin.write('\x1b'); - expect(onSelect).toHaveBeenCalledWith(FolderTrustChoice.DO_NOT_TRUST); + await waitFor(() => { + expect(onSelect).toHaveBeenCalledWith(FolderTrustChoice.DO_NOT_TRUST); + }); }); }); diff --git a/packages/cli/src/ui/components/FolderTrustDialog.tsx b/packages/cli/src/ui/components/FolderTrustDialog.tsx index 1918998c..30f3ff52 100644 --- a/packages/cli/src/ui/components/FolderTrustDialog.tsx +++ b/packages/cli/src/ui/components/FolderTrustDialog.tsx @@ -4,13 +4,14 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import React from 'react'; import { Colors } from '../colors.js'; import { RadioButtonSelect, RadioSelectItem, } from './shared/RadioButtonSelect.js'; +import { useKeypress } from '../hooks/useKeypress.js'; export enum FolderTrustChoice { TRUST_FOLDER = 'trust_folder', @@ -25,11 +26,14 @@ interface FolderTrustDialogProps { export const FolderTrustDialog: React.FC = ({ onSelect, }) => { - useInput((_, key) => { - if (key.escape) { - onSelect(FolderTrustChoice.DO_NOT_TRUST); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onSelect(FolderTrustChoice.DO_NOT_TRUST); + } + }, + { isActive: true }, + ); const options: Array> = [ { diff --git a/packages/cli/src/ui/components/InputPrompt.tsx b/packages/cli/src/ui/components/InputPrompt.tsx index 357210a7..7b49868b 100644 --- a/packages/cli/src/ui/components/InputPrompt.tsx +++ b/packages/cli/src/ui/components/InputPrompt.tsx @@ -17,6 +17,7 @@ import { useShellHistory } from '../hooks/useShellHistory.js'; import { useReverseSearchCompletion } from '../hooks/useReverseSearchCompletion.js'; import { useCommandCompletion } from '../hooks/useCommandCompletion.js'; import { useKeypress, Key } from '../hooks/useKeypress.js'; +import { useKittyKeyboardProtocol } from '../hooks/useKittyKeyboardProtocol.js'; import { keyMatchers, Command } from '../keyMatchers.js'; import { CommandContext, SlashCommand } from '../commands/types.js'; import { Config } from '@qwen-code/qwen-code-core'; @@ -66,6 +67,7 @@ export const InputPrompt: React.FC = ({ const [escPressCount, setEscPressCount] = useState(0); const [showEscapePrompt, setShowEscapePrompt] = useState(false); const escapeTimerRef = useRef(null); + const kittyProtocolStatus = useKittyKeyboardProtocol(); const [dirs, setDirs] = useState( config.getWorkspaceContext().getDirectories(), @@ -525,7 +527,11 @@ export const InputPrompt: React.FC = ({ ], ); - useKeypress(handleInput, { isActive: true }); + useKeypress(handleInput, { + isActive: true, + kittyProtocolEnabled: kittyProtocolStatus.enabled, + config, + }); const linesToRender = buffer.viewportVisualLines; const [cursorVisualRowAbsolute, cursorVisualColAbsolute] = diff --git a/packages/cli/src/ui/components/SettingsDialog.tsx b/packages/cli/src/ui/components/SettingsDialog.tsx index 80e2339f..a09cd76a 100644 --- a/packages/cli/src/ui/components/SettingsDialog.tsx +++ b/packages/cli/src/ui/components/SettingsDialog.tsx @@ -5,7 +5,7 @@ */ import React, { useState, useEffect } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { Colors } from '../colors.js'; import { LoadedSettings, @@ -31,6 +31,7 @@ import { getDefaultValue, } from '../../utils/settingsUtils.js'; import { useVimMode } from '../contexts/VimModeContext.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface SettingsDialogProps { settings: LoadedSettings; @@ -256,107 +257,111 @@ export function SettingsDialog({ const showScrollUp = true; const showScrollDown = true; - useInput((input, key) => { - if (key.tab) { - setFocusSection((prev) => (prev === 'settings' ? 'scope' : 'settings')); - } - if (focusSection === 'settings') { - if (key.upArrow || input === 'k') { - const newIndex = - activeSettingIndex > 0 ? activeSettingIndex - 1 : items.length - 1; - setActiveSettingIndex(newIndex); - // Adjust scroll offset for wrap-around - if (newIndex === items.length - 1) { - setScrollOffset(Math.max(0, items.length - maxItemsToShow)); - } else if (newIndex < scrollOffset) { - setScrollOffset(newIndex); - } - } else if (key.downArrow || input === 'j') { - const newIndex = - activeSettingIndex < items.length - 1 ? activeSettingIndex + 1 : 0; - setActiveSettingIndex(newIndex); - // Adjust scroll offset for wrap-around - if (newIndex === 0) { - setScrollOffset(0); - } else if (newIndex >= scrollOffset + maxItemsToShow) { - setScrollOffset(newIndex - maxItemsToShow + 1); - } - } else if (key.return || input === ' ') { - items[activeSettingIndex]?.toggle(); - } else if ((key.ctrl && input === 'c') || (key.ctrl && input === 'l')) { - // Ctrl+C or Ctrl+L: Clear current setting and reset to default - const currentSetting = items[activeSettingIndex]; - if (currentSetting) { - const defaultValue = getDefaultValue(currentSetting.value); - // Ensure defaultValue is a boolean for setPendingSettingValue - const booleanDefaultValue = - typeof defaultValue === 'boolean' ? defaultValue : false; + useKeypress( + (key) => { + const { name, ctrl } = key; + if (name === 'tab') { + setFocusSection((prev) => (prev === 'settings' ? 'scope' : 'settings')); + } + if (focusSection === 'settings') { + if (name === 'up' || name === 'k') { + const newIndex = + activeSettingIndex > 0 ? activeSettingIndex - 1 : items.length - 1; + setActiveSettingIndex(newIndex); + // Adjust scroll offset for wrap-around + if (newIndex === items.length - 1) { + setScrollOffset(Math.max(0, items.length - maxItemsToShow)); + } else if (newIndex < scrollOffset) { + setScrollOffset(newIndex); + } + } else if (name === 'down' || name === 'j') { + const newIndex = + activeSettingIndex < items.length - 1 ? activeSettingIndex + 1 : 0; + setActiveSettingIndex(newIndex); + // Adjust scroll offset for wrap-around + if (newIndex === 0) { + setScrollOffset(0); + } else if (newIndex >= scrollOffset + maxItemsToShow) { + setScrollOffset(newIndex - maxItemsToShow + 1); + } + } else if (name === 'return' || name === 'space') { + items[activeSettingIndex]?.toggle(); + } else if (ctrl && (name === 'c' || name === 'l')) { + // Ctrl+C or Ctrl+L: Clear current setting and reset to default + const currentSetting = items[activeSettingIndex]; + if (currentSetting) { + const defaultValue = getDefaultValue(currentSetting.value); + // Ensure defaultValue is a boolean for setPendingSettingValue + const booleanDefaultValue = + typeof defaultValue === 'boolean' ? defaultValue : false; - // Update pending settings to default value - setPendingSettings((prev) => - setPendingSettingValue( - currentSetting.value, - booleanDefaultValue, - prev, - ), - ); - - // Remove from modified settings since it's now at default - setModifiedSettings((prev) => { - const updated = new Set(prev); - updated.delete(currentSetting.value); - return updated; - }); - - // Remove from restart-required settings if it was there - setRestartRequiredSettings((prev) => { - const updated = new Set(prev); - updated.delete(currentSetting.value); - return updated; - }); - - // If this setting doesn't require restart, save it immediately - if (!requiresRestart(currentSetting.value)) { - const immediateSettings = new Set([currentSetting.value]); - const immediateSettingsObject = setPendingSettingValue( - currentSetting.value, - booleanDefaultValue, - {}, + // Update pending settings to default value + setPendingSettings((prev) => + setPendingSettingValue( + currentSetting.value, + booleanDefaultValue, + prev, + ), ); - saveModifiedSettings( - immediateSettings, - immediateSettingsObject, - settings, - selectedScope, - ); + // Remove from modified settings since it's now at default + setModifiedSettings((prev) => { + const updated = new Set(prev); + updated.delete(currentSetting.value); + return updated; + }); + + // Remove from restart-required settings if it was there + setRestartRequiredSettings((prev) => { + const updated = new Set(prev); + updated.delete(currentSetting.value); + return updated; + }); + + // If this setting doesn't require restart, save it immediately + if (!requiresRestart(currentSetting.value)) { + const immediateSettings = new Set([currentSetting.value]); + const immediateSettingsObject = setPendingSettingValue( + currentSetting.value, + booleanDefaultValue, + {}, + ); + + saveModifiedSettings( + immediateSettings, + immediateSettingsObject, + settings, + selectedScope, + ); + } } } } - } - if (showRestartPrompt && input === 'r') { - // Only save settings that require restart (non-restart settings were already saved immediately) - const restartRequiredSettings = - getRestartRequiredFromModified(modifiedSettings); - const restartRequiredSet = new Set(restartRequiredSettings); + if (showRestartPrompt && name === 'r') { + // Only save settings that require restart (non-restart settings were already saved immediately) + const restartRequiredSettings = + getRestartRequiredFromModified(modifiedSettings); + const restartRequiredSet = new Set(restartRequiredSettings); - if (restartRequiredSet.size > 0) { - saveModifiedSettings( - restartRequiredSet, - pendingSettings, - settings, - selectedScope, - ); + if (restartRequiredSet.size > 0) { + saveModifiedSettings( + restartRequiredSet, + pendingSettings, + settings, + selectedScope, + ); + } + + setShowRestartPrompt(false); + setRestartRequiredSettings(new Set()); // Clear restart-required settings + if (onRestartRequest) onRestartRequest(); } - - setShowRestartPrompt(false); - setRestartRequiredSettings(new Set()); // Clear restart-required settings - if (onRestartRequest) onRestartRequest(); - } - if (key.escape) { - onSelect(undefined, selectedScope); - } - }); + if (name === 'escape') { + onSelect(undefined, selectedScope); + } + }, + { isActive: true }, + ); return ( = ({ request }) => { const { commands, onConfirm } = request; - useInput((_, key) => { - if (key.escape) { - onConfirm(ToolConfirmationOutcome.Cancel); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onConfirm(ToolConfirmationOutcome.Cancel); + } + }, + { isActive: true }, + ); const handleSelect = (item: ToolConfirmationOutcome) => { if (item === ToolConfirmationOutcome.Cancel) { diff --git a/packages/cli/src/ui/components/ThemeDialog.tsx b/packages/cli/src/ui/components/ThemeDialog.tsx index 37663447..16ecfc8f 100644 --- a/packages/cli/src/ui/components/ThemeDialog.tsx +++ b/packages/cli/src/ui/components/ThemeDialog.tsx @@ -5,7 +5,7 @@ */ import React, { useCallback, useState } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { Colors } from '../colors.js'; import { themeManager, DEFAULT_THEME } from '../themes/theme-manager.js'; import { RadioButtonSelect } from './shared/RadioButtonSelect.js'; @@ -16,6 +16,7 @@ import { getScopeItems, getScopeMessageForSetting, } from '../../utils/dialogScopeUtils.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface ThemeDialogProps { /** Callback function when a theme is selected */ @@ -111,14 +112,17 @@ export function ThemeDialog({ 'theme', ); - useInput((input, key) => { - if (key.tab) { - setFocusedSection((prev) => (prev === 'theme' ? 'scope' : 'theme')); - } - if (key.escape) { - onSelect(undefined, selectedScope); - } - }); + useKeypress( + (key) => { + if (key.name === 'tab') { + setFocusedSection((prev) => (prev === 'theme' ? 'scope' : 'theme')); + } + if (key.name === 'escape') { + onSelect(undefined, selectedScope); + } + }, + { isActive: true }, + ); // Generate scope message for theme setting const otherScopeModifiedMessage = getScopeMessageForSetting( diff --git a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx index 0e6e2eed..0f9c094f 100644 --- a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx +++ b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx @@ -5,7 +5,7 @@ */ import React from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { DiffRenderer } from './DiffRenderer.js'; import { Colors } from '../../colors.js'; import { @@ -20,6 +20,7 @@ import { RadioSelectItem, } from '../shared/RadioButtonSelect.js'; import { MaxSizedBox } from '../shared/MaxSizedBox.js'; +import { useKeypress } from '../../hooks/useKeypress.js'; export interface ToolConfirmationMessageProps { confirmationDetails: ToolCallConfirmationDetails; @@ -44,7 +45,7 @@ export const ToolConfirmationMessage: React.FC< const handleConfirm = async (outcome: ToolConfirmationOutcome) => { if (confirmationDetails.type === 'edit') { const ideClient = config?.getIdeClient(); - if (config?.getIdeMode() && config?.getIdeModeFeature()) { + if (config?.getIdeMode()) { const cliOutcome = outcome === ToolConfirmationOutcome.Cancel ? 'rejected' : 'accepted'; await ideClient?.resolveDiffFromCli( @@ -56,12 +57,15 @@ export const ToolConfirmationMessage: React.FC< onConfirm(outcome); }; - useInput((input, key) => { - if (!isFocused) return; - if (key.escape || (key.ctrl && (input === 'c' || input === 'C'))) { - handleConfirm(ToolConfirmationOutcome.Cancel); - } - }); + useKeypress( + (key) => { + if (!isFocused) return; + if (key.name === 'escape' || (key.ctrl && key.name === 'c')) { + handleConfirm(ToolConfirmationOutcome.Cancel); + } + }, + { isActive: isFocused }, + ); const handleSelect = (item: ToolConfirmationOutcome) => handleConfirm(item); @@ -132,7 +136,7 @@ export const ToolConfirmationMessage: React.FC< value: ToolConfirmationOutcome.ProceedAlways, }, ); - if (config?.getIdeMode() && config?.getIdeModeFeature()) { + if (config?.getIdeMode()) { options.push({ label: 'No (esc)', value: ToolConfirmationOutcome.Cancel, diff --git a/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx b/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx index 8b0057ca..511d3847 100644 --- a/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx +++ b/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx @@ -5,8 +5,9 @@ */ import React, { useEffect, useState, useRef } from 'react'; -import { Text, Box, useInput } from 'ink'; +import { Text, Box } from 'ink'; import { Colors } from '../../colors.js'; +import { useKeypress } from '../../hooks/useKeypress.js'; /** * Represents a single option for the RadioButtonSelect. @@ -85,9 +86,10 @@ export function RadioButtonSelect({ [], ); - useInput( - (input, key) => { - const isNumeric = showNumbers && /^[0-9]$/.test(input); + useKeypress( + (key) => { + const { sequence, name } = key; + const isNumeric = showNumbers && /^[0-9]$/.test(sequence); // Any key press that is not a digit should clear the number input buffer. if (!isNumeric && numberInputTimer.current) { @@ -95,21 +97,21 @@ export function RadioButtonSelect({ setNumberInput(''); } - if (input === 'k' || key.upArrow) { + if (name === 'k' || name === 'up') { const newIndex = activeIndex > 0 ? activeIndex - 1 : items.length - 1; setActiveIndex(newIndex); onHighlight?.(items[newIndex]!.value); return; } - if (input === 'j' || key.downArrow) { + if (name === 'j' || name === 'down') { const newIndex = activeIndex < items.length - 1 ? activeIndex + 1 : 0; setActiveIndex(newIndex); onHighlight?.(items[newIndex]!.value); return; } - if (key.return) { + if (name === 'return') { onSelect(items[activeIndex]!.value); return; } @@ -120,7 +122,7 @@ export function RadioButtonSelect({ clearTimeout(numberInputTimer.current); } - const newNumberInput = numberInput + input; + const newNumberInput = numberInput + sequence; setNumberInput(newNumberInput); const targetIndex = Number.parseInt(newNumberInput, 10) - 1; @@ -154,7 +156,7 @@ export function RadioButtonSelect({ } } }, - { isActive: isFocused && items.length > 0 }, + { isActive: !!(isFocused && items.length > 0) }, ); const visibleItems = items.slice(scrollOffset, scrollOffset + maxItemsToShow); diff --git a/packages/cli/src/ui/components/shared/text-buffer.test.ts b/packages/cli/src/ui/components/shared/text-buffer.test.ts index fb75179e..b5f2d8c0 100644 --- a/packages/cli/src/ui/components/shared/text-buffer.test.ts +++ b/packages/cli/src/ui/components/shared/text-buffer.test.ts @@ -5,6 +5,7 @@ */ import { describe, it, expect, beforeEach } from 'vitest'; +import stripAnsi from 'strip-ansi'; import { renderHook, act } from '@testing-library/react'; import { useTextBuffer, @@ -1278,6 +1279,45 @@ Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots ); expect(getBufferState(result).text).toBe('Pasted Text'); }); + + it('should not strip popular emojis', () => { + const { result } = renderHook(() => + useTextBuffer({ viewport, isValidPath: () => false }), + ); + const emojis = 'πŸπŸ³πŸ¦€πŸ¦„'; + act(() => + result.current.handleInput({ + name: '', + ctrl: false, + meta: false, + shift: false, + paste: false, + sequence: emojis, + }), + ); + expect(getBufferState(result).text).toBe(emojis); + }); + }); + + describe('stripAnsi', () => { + it('should correctly strip ANSI escape codes', () => { + const textWithAnsi = '\x1B[31mHello\x1B[0m World'; + expect(stripAnsi(textWithAnsi)).toBe('Hello World'); + }); + + it('should handle multiple ANSI codes', () => { + const textWithMultipleAnsi = '\x1B[1m\x1B[34mBold Blue\x1B[0m Text'; + expect(stripAnsi(textWithMultipleAnsi)).toBe('Bold Blue Text'); + }); + + it('should not modify text without ANSI codes', () => { + const plainText = 'Plain text'; + expect(stripAnsi(plainText)).toBe('Plain text'); + }); + + it('should handle empty string', () => { + expect(stripAnsi('')).toBe(''); + }); }); }); diff --git a/packages/cli/src/ui/components/shared/text-buffer.ts b/packages/cli/src/ui/components/shared/text-buffer.ts index 5472c5ef..936f6dee 100644 --- a/packages/cli/src/ui/components/shared/text-buffer.ts +++ b/packages/cli/src/ui/components/shared/text-buffer.ts @@ -5,6 +5,7 @@ */ import stripAnsi from 'strip-ansi'; +import { stripVTControlCharacters } from 'util'; import { spawnSync } from 'child_process'; import fs from 'fs'; import os from 'os'; @@ -496,21 +497,44 @@ export const replaceRangeInternal = ( /** * Strip characters that can break terminal rendering. * - * Strip ANSI escape codes and control characters except for line breaks. - * Control characters such as delete break terminal UI rendering. + * Uses Node.js built-in stripVTControlCharacters to handle VT sequences, + * then filters remaining control characters that can disrupt display. + * + * Characters stripped: + * - ANSI escape sequences (via strip-ansi) + * - VT control sequences (via Node.js util.stripVTControlCharacters) + * - C0 control chars (0x00-0x1F) except CR/LF which are handled elsewhere + * - C1 control chars (0x80-0x9F) that can cause display issues + * + * Characters preserved: + * - All printable Unicode including emojis + * - DEL (0x7F) - handled functionally by applyOperations, not a display issue + * - CR/LF (0x0D/0x0A) - needed for line breaks */ function stripUnsafeCharacters(str: string): string { - const stripped = stripAnsi(str); - return toCodePoints(stripped) + const strippedAnsi = stripAnsi(str); + const strippedVT = stripVTControlCharacters(strippedAnsi); + + return toCodePoints(strippedVT) .filter((char) => { - if (char.length > 1) return false; const code = char.codePointAt(0); - if (code === undefined) { - return false; - } - const isUnsafe = - code === 127 || (code <= 31 && code !== 13 && code !== 10); - return !isUnsafe; + if (code === undefined) return false; + + // Preserve CR/LF for line handling + if (code === 0x0a || code === 0x0d) return true; + + // Remove C0 control chars (except CR/LF) that can break display + // Examples: BELL(0x07) makes noise, BS(0x08) moves cursor, VT(0x0B), FF(0x0C) + if (code >= 0x00 && code <= 0x1f) return false; + + // Remove C1 control chars (0x80-0x9F) - legacy 8-bit control codes + if (code >= 0x80 && code <= 0x9f) return false; + + // Preserve DEL (0x7F) - it's handled functionally by applyOperations as backspace + // and doesn't cause rendering issues when displayed + + // Preserve all other characters including Unicode/emojis + return true; }) .join(''); } diff --git a/packages/cli/src/ui/components/shared/vim-buffer-actions.ts b/packages/cli/src/ui/components/shared/vim-buffer-actions.ts index 0e2e7989..bf04716f 100644 --- a/packages/cli/src/ui/components/shared/vim-buffer-actions.ts +++ b/packages/cli/src/ui/components/shared/vim-buffer-actions.ts @@ -19,6 +19,7 @@ import { findWordEndInLine, } from './text-buffer.js'; import { cpLen, toCodePoints } from '../../utils/textUtils.js'; +import { assumeExhaustive } from '../../../utils/checks.js'; // Check if we're at the end of a base word (on the last base character) // Returns true if current position has a base character followed only by combining marks until non-word @@ -806,7 +807,7 @@ export function handleVimAction( default: { // This should never happen if TypeScript is working correctly - const _exhaustiveCheck: never = action; + assumeExhaustive(action); return state; } } diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts index 8a9ded82..ce1ae3f3 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts @@ -4,9 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -const { logSlashCommand, SlashCommandEvent } = vi.hoisted(() => ({ +const { logSlashCommand } = vi.hoisted(() => ({ logSlashCommand: vi.fn(), - SlashCommandEvent: vi.fn((command, subCommand) => ({ command, subCommand })), })); vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { @@ -15,7 +14,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { return { ...original, logSlashCommand, - SlashCommandEvent, getIdeInstaller: vi.fn().mockReturnValue(null), }; }); @@ -25,10 +23,10 @@ const { mockProcessExit } = vi.hoisted(() => ({ })); vi.mock('node:process', () => { - const mockProcess = { + const mockProcess: Partial = { exit: mockProcessExit, - platform: 'test-platform', - }; + platform: 'sunos', + } as unknown as NodeJS.Process; return { ...mockProcess, default: mockProcess, @@ -68,31 +66,37 @@ vi.mock('../../utils/cleanup.js', () => ({ runExitCleanup: mockRunExitCleanup, })); +import { + SlashCommandStatus, + ToolConfirmationOutcome, + makeFakeConfig, +} from '@qwen-code/qwen-code-core'; import { act, renderHook, waitFor } from '@testing-library/react'; -import { vi, describe, it, expect, beforeEach, type Mock } from 'vitest'; -import { useSlashCommandProcessor } from './slashCommandProcessor.js'; +import { beforeEach, describe, expect, it, vi, type Mock } from 'vitest'; +import { LoadedSettings } from '../../config/settings.js'; +import { BuiltinCommandLoader } from '../../services/BuiltinCommandLoader.js'; +import { FileCommandLoader } from '../../services/FileCommandLoader.js'; +import { McpPromptLoader } from '../../services/McpPromptLoader.js'; import { CommandContext, CommandKind, ConfirmShellCommandsActionReturn, SlashCommand, } from '../commands/types.js'; -import { Config, ToolConfirmationOutcome } from '@qwen-code/qwen-code-core'; -import { LoadedSettings } from '../../config/settings.js'; import { MessageType } from '../types.js'; -import { BuiltinCommandLoader } from '../../services/BuiltinCommandLoader.js'; -import { FileCommandLoader } from '../../services/FileCommandLoader.js'; -import { McpPromptLoader } from '../../services/McpPromptLoader.js'; +import { useSlashCommandProcessor } from './slashCommandProcessor.js'; -const createTestCommand = ( +function createTestCommand( overrides: Partial, kind: CommandKind = CommandKind.BUILT_IN, -): SlashCommand => ({ - name: 'test', - description: 'a test command', - kind, - ...overrides, -}); +): SlashCommand { + return { + name: 'test', + description: 'a test command', + kind, + ...overrides, + }; +} describe('useSlashCommandProcessor', () => { const mockAddItem = vi.fn(); @@ -102,15 +106,7 @@ describe('useSlashCommandProcessor', () => { const mockOpenAuthDialog = vi.fn(); const mockSetQuittingMessages = vi.fn(); - const mockConfig = { - getProjectRoot: vi.fn(() => '/mock/cwd'), - getSessionId: vi.fn(() => 'test-session'), - getGeminiClient: vi.fn(() => ({ - setHistory: vi.fn().mockResolvedValue(undefined), - })), - getExtensions: vi.fn(() => []), - getIdeMode: vi.fn(() => false), - } as unknown as Config; + const mockConfig = makeFakeConfig({}); const mockSettings = {} as LoadedSettings; @@ -314,6 +310,39 @@ describe('useSlashCommandProcessor', () => { ); }); + it('sets isProcessing to false if the the input is not a command', async () => { + const setMockIsProcessing = vi.fn(); + const result = setupProcessorHook([], [], [], setMockIsProcessing); + + await act(async () => { + await result.current.handleSlashCommand('imnotacommand'); + }); + + expect(setMockIsProcessing).not.toHaveBeenCalled(); + }); + + it('sets isProcessing to false if the command has an error', async () => { + const setMockIsProcessing = vi.fn(); + const failCommand = createTestCommand({ + name: 'fail', + action: vi.fn().mockRejectedValue(new Error('oh no!')), + }); + + const result = setupProcessorHook( + [failCommand], + [], + [], + setMockIsProcessing, + ); + + await act(async () => { + await result.current.handleSlashCommand('/fail'); + }); + + expect(setMockIsProcessing).toHaveBeenNthCalledWith(1, true); + expect(setMockIsProcessing).toHaveBeenNthCalledWith(2, false); + }); + it('should set isProcessing to true during execution and false afterwards', async () => { const mockSetIsProcessing = vi.fn(); const command = createTestCommand({ @@ -329,14 +358,14 @@ describe('useSlashCommandProcessor', () => { }); // It should be true immediately after starting - expect(mockSetIsProcessing).toHaveBeenCalledWith(true); + expect(mockSetIsProcessing).toHaveBeenNthCalledWith(1, true); // It should not have been called with false yet expect(mockSetIsProcessing).not.toHaveBeenCalledWith(false); await executionPromise; // After the promise resolves, it should be called with false - expect(mockSetIsProcessing).toHaveBeenCalledWith(false); + expect(mockSetIsProcessing).toHaveBeenNthCalledWith(2, false); expect(mockSetIsProcessing).toHaveBeenCalledTimes(2); }); }); @@ -884,7 +913,9 @@ describe('useSlashCommandProcessor', () => { const loggingTestCommands: SlashCommand[] = [ createTestCommand({ name: 'logtest', - action: mockCommandAction, + action: vi + .fn() + .mockResolvedValue({ type: 'message', content: 'hello world' }), }), createTestCommand({ name: 'logwithsub', @@ -895,6 +926,10 @@ describe('useSlashCommandProcessor', () => { }), ], }), + createTestCommand({ + name: 'fail', + action: vi.fn().mockRejectedValue(new Error('oh no!')), + }), createTestCommand({ name: 'logalias', altNames: ['la'], @@ -905,7 +940,6 @@ describe('useSlashCommandProcessor', () => { beforeEach(() => { mockCommandAction.mockClear(); vi.mocked(logSlashCommand).mockClear(); - vi.mocked(SlashCommandEvent).mockClear(); }); it('should log a simple slash command', async () => { @@ -917,8 +951,45 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logtest'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logtest', undefined); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logtest', + subcommand: undefined, + status: SlashCommandStatus.SUCCESS, + }), + ); + }); + + it('logs nothing for a bogus command', async () => { + const result = setupProcessorHook(loggingTestCommands); + await waitFor(() => + expect(result.current.slashCommands.length).toBeGreaterThan(0), + ); + await act(async () => { + await result.current.handleSlashCommand('/bogusbogusbogus'); + }); + + expect(logSlashCommand).not.toHaveBeenCalled(); + }); + + it('logs a failure event for a failed command', async () => { + const result = setupProcessorHook(loggingTestCommands); + await waitFor(() => + expect(result.current.slashCommands.length).toBeGreaterThan(0), + ); + await act(async () => { + await result.current.handleSlashCommand('/fail'); + }); + + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'fail', + status: 'error', + subcommand: undefined, + }), + ); }); it('should log a slash command with a subcommand', async () => { @@ -930,8 +1001,13 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logwithsub sub'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logwithsub', 'sub'); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logwithsub', + subcommand: 'sub', + }), + ); }); it('should log the command path when an alias is used', async () => { @@ -942,8 +1018,12 @@ describe('useSlashCommandProcessor', () => { await act(async () => { await result.current.handleSlashCommand('/la'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logalias', undefined); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logalias', + }), + ); }); it('should not log for unknown commands', async () => { diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.ts index 121453da..a87f3c54 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.ts @@ -14,7 +14,8 @@ import { GitService, Logger, logSlashCommand, - SlashCommandEvent, + makeSlashCommandEvent, + SlashCommandStatus, ToolConfirmationOutcome, } from '@qwen-code/qwen-code-core'; import { useSessionStats } from '../contexts/SessionContext.js'; @@ -57,6 +58,11 @@ export const useSlashCommandProcessor = ( ) => { const session = useSessionStats(); const [commands, setCommands] = useState([]); + const [reloadTrigger, setReloadTrigger] = useState(0); + + const reloadCommands = useCallback(() => { + setReloadTrigger((v) => v + 1); + }, []); const [shellConfirmationRequest, setShellConfirmationRequest] = useState { controller.abort(); }; - }, [config, ideMode]); + }, [config, ideMode, reloadTrigger]); const handleSlashCommand = useCallback( async ( @@ -230,77 +238,71 @@ export const useSlashCommandProcessor = ( oneTimeShellAllowlist?: Set, overwriteConfirmed?: boolean, ): Promise => { + if (typeof rawQuery !== 'string') { + return false; + } + + const trimmed = rawQuery.trim(); + if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { + return false; + } + setIsProcessing(true); - try { - if (typeof rawQuery !== 'string') { - return false; + + const userMessageTimestamp = Date.now(); + addItem({ type: MessageType.USER, text: trimmed }, userMessageTimestamp); + + const parts = trimmed.substring(1).trim().split(/\s+/); + const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] + + let currentCommands = commands; + let commandToExecute: SlashCommand | undefined; + let pathIndex = 0; + let hasError = false; + const canonicalPath: string[] = []; + + for (const part of commandPath) { + // TODO: For better performance and architectural clarity, this two-pass + // search could be replaced. A more optimal approach would be to + // pre-compute a single lookup map in `CommandService.ts` that resolves + // all name and alias conflicts during the initial loading phase. The + // processor would then perform a single, fast lookup on that map. + + // First pass: check for an exact match on the primary command name. + let foundCommand = currentCommands.find((cmd) => cmd.name === part); + + // Second pass: if no primary name matches, check for an alias. + if (!foundCommand) { + foundCommand = currentCommands.find((cmd) => + cmd.altNames?.includes(part), + ); } - const trimmed = rawQuery.trim(); - if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { - return false; - } - - const userMessageTimestamp = Date.now(); - addItem( - { type: MessageType.USER, text: trimmed }, - userMessageTimestamp, - ); - - const parts = trimmed.substring(1).trim().split(/\s+/); - const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] - - let currentCommands = commands; - let commandToExecute: SlashCommand | undefined; - let pathIndex = 0; - const canonicalPath: string[] = []; - - for (const part of commandPath) { - // TODO: For better performance and architectural clarity, this two-pass - // search could be replaced. A more optimal approach would be to - // pre-compute a single lookup map in `CommandService.ts` that resolves - // all name and alias conflicts during the initial loading phase. The - // processor would then perform a single, fast lookup on that map. - - // First pass: check for an exact match on the primary command name. - let foundCommand = currentCommands.find((cmd) => cmd.name === part); - - // Second pass: if no primary name matches, check for an alias. - if (!foundCommand) { - foundCommand = currentCommands.find((cmd) => - cmd.altNames?.includes(part), - ); - } - - if (foundCommand) { - commandToExecute = foundCommand; - canonicalPath.push(foundCommand.name); - pathIndex++; - if (foundCommand.subCommands) { - currentCommands = foundCommand.subCommands; - } else { - break; - } + if (foundCommand) { + commandToExecute = foundCommand; + canonicalPath.push(foundCommand.name); + pathIndex++; + if (foundCommand.subCommands) { + currentCommands = foundCommand.subCommands; } else { break; } + } else { + break; } + } + const resolvedCommandPath = canonicalPath; + const subcommand = + resolvedCommandPath.length > 1 + ? resolvedCommandPath.slice(1).join(' ') + : undefined; + + try { if (commandToExecute) { const args = parts.slice(pathIndex).join(' '); if (commandToExecute.action) { - if (config) { - const resolvedCommandPath = canonicalPath; - const event = new SlashCommandEvent( - resolvedCommandPath[0], - resolvedCommandPath.length > 1 - ? resolvedCommandPath.slice(1).join(' ') - : undefined, - ); - logSlashCommand(config, event); - } - const fullCommandContext: CommandContext = { ...commandContext, invocation: { @@ -322,7 +324,6 @@ export const useSlashCommandProcessor = ( ]), }; } - const result = await commandToExecute.action( fullCommandContext, args, @@ -495,8 +496,18 @@ export const useSlashCommandProcessor = ( content: `Unknown command: ${trimmed}`, timestamp: new Date(), }); + return { type: 'handled' }; - } catch (e) { + } catch (e: unknown) { + hasError = true; + if (config) { + const event = makeSlashCommandEvent({ + command: resolvedCommandPath[0], + subcommand, + status: SlashCommandStatus.ERROR, + }); + logSlashCommand(config, event); + } addItem( { type: MessageType.ERROR, @@ -506,6 +517,14 @@ export const useSlashCommandProcessor = ( ); return { type: 'handled' }; } finally { + if (config && resolvedCommandPath[0] && !hasError) { + const event = makeSlashCommandEvent({ + command: resolvedCommandPath[0], + subcommand, + status: SlashCommandStatus.SUCCESS, + }); + logSlashCommand(config, event); + } setIsProcessing(false); } }, diff --git a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts index ee6c1511..ecf6d206 100644 --- a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts +++ b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts @@ -21,9 +21,9 @@ import { Config as ActualConfigType, ApprovalMode, } from '@qwen-code/qwen-code-core'; -import { useInput, type Key as InkKey } from 'ink'; +import { useKeypress, Key } from './useKeypress.js'; -vi.mock('ink'); +vi.mock('./useKeypress.js'); vi.mock('@qwen-code/qwen-code-core', async () => { const actualServerModule = (await vi.importActual( @@ -53,13 +53,12 @@ interface MockConfigInstanceShape { getToolRegistry: Mock<() => { discoverTools: Mock<() => void> }>; } -type UseInputKey = InkKey; -type UseInputHandler = (input: string, key: UseInputKey) => void; +type UseKeypressHandler = (key: Key) => void; describe('useAutoAcceptIndicator', () => { let mockConfigInstance: MockConfigInstanceShape; - let capturedUseInputHandler: UseInputHandler; - let mockedInkUseInput: MockedFunction; + let capturedUseKeypressHandler: UseKeypressHandler; + let mockedUseKeypress: MockedFunction; beforeEach(() => { vi.resetAllMocks(); @@ -111,10 +110,12 @@ describe('useAutoAcceptIndicator', () => { return instance; }); - mockedInkUseInput = useInput as MockedFunction; - mockedInkUseInput.mockImplementation((handler: UseInputHandler) => { - capturedUseInputHandler = handler; - }); + mockedUseKeypress = useKeypress as MockedFunction; + mockedUseKeypress.mockImplementation( + (handler: UseKeypressHandler, _options) => { + capturedUseKeypressHandler = handler; + }, + ); // eslint-disable-next-line @typescript-eslint/no-explicit-any mockConfigInstance = new (Config as any)() as MockConfigInstanceShape; @@ -163,7 +164,10 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.DEFAULT); act(() => { - capturedUseInputHandler('', { tab: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.AUTO_EDIT, @@ -171,7 +175,7 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.AUTO_EDIT); act(() => { - capturedUseInputHandler('y', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.YOLO, @@ -179,7 +183,7 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.YOLO); act(() => { - capturedUseInputHandler('y', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.DEFAULT, @@ -187,7 +191,7 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.DEFAULT); act(() => { - capturedUseInputHandler('y', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.YOLO, @@ -195,7 +199,10 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.YOLO); act(() => { - capturedUseInputHandler('', { tab: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.AUTO_EDIT, @@ -203,7 +210,10 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.AUTO_EDIT); act(() => { - capturedUseInputHandler('', { tab: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.DEFAULT, @@ -220,37 +230,51 @@ describe('useAutoAcceptIndicator', () => { ); act(() => { - capturedUseInputHandler('', { tab: true, shift: false } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: false, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('', { tab: false, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'unknown', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('a', { tab: false, shift: false } as InkKey); + capturedUseKeypressHandler({ + name: 'a', + shift: false, + ctrl: false, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('y', { tab: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: false } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('a', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'a', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('y', { shift: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', shift: true } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('a', { ctrl: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'a', + ctrl: true, + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); }); diff --git a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts index e1d004e4..4cb8cf4c 100644 --- a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts +++ b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts @@ -4,9 +4,9 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { useState, useEffect } from 'react'; -import { useInput } from 'ink'; import { ApprovalMode, type Config } from '@qwen-code/qwen-code-core'; +import { useEffect, useState } from 'react'; +import { useKeypress } from './useKeypress.js'; export interface UseAutoAcceptIndicatorArgs { config: Config; @@ -23,27 +23,30 @@ export function useAutoAcceptIndicator({ setShowAutoAcceptIndicator(currentConfigValue); }, [currentConfigValue]); - useInput((input, key) => { - let nextApprovalMode: ApprovalMode | undefined; + useKeypress( + (key) => { + let nextApprovalMode: ApprovalMode | undefined; - if (key.ctrl && input === 'y') { - nextApprovalMode = - config.getApprovalMode() === ApprovalMode.YOLO - ? ApprovalMode.DEFAULT - : ApprovalMode.YOLO; - } else if (key.tab && key.shift) { - nextApprovalMode = - config.getApprovalMode() === ApprovalMode.AUTO_EDIT - ? ApprovalMode.DEFAULT - : ApprovalMode.AUTO_EDIT; - } + if (key.ctrl && key.name === 'y') { + nextApprovalMode = + config.getApprovalMode() === ApprovalMode.YOLO + ? ApprovalMode.DEFAULT + : ApprovalMode.YOLO; + } else if (key.shift && key.name === 'tab') { + nextApprovalMode = + config.getApprovalMode() === ApprovalMode.AUTO_EDIT + ? ApprovalMode.DEFAULT + : ApprovalMode.AUTO_EDIT; + } - if (nextApprovalMode) { - config.setApprovalMode(nextApprovalMode); - // Update local state immediately for responsiveness - setShowAutoAcceptIndicator(nextApprovalMode); - } - }); + if (nextApprovalMode) { + config.setApprovalMode(nextApprovalMode); + // Update local state immediately for responsiveness + setShowAutoAcceptIndicator(nextApprovalMode); + } + }, + { isActive: true }, + ); return showAutoAcceptIndicator; } diff --git a/packages/cli/src/ui/hooks/useFocus.ts b/packages/cli/src/ui/hooks/useFocus.ts index 6c9a6daa..8a7f9f6c 100644 --- a/packages/cli/src/ui/hooks/useFocus.ts +++ b/packages/cli/src/ui/hooks/useFocus.ts @@ -8,12 +8,12 @@ import { useStdin, useStdout } from 'ink'; import { useEffect, useState } from 'react'; // ANSI escape codes to enable/disable terminal focus reporting -const ENABLE_FOCUS_REPORTING = '\x1b[?1004h'; -const DISABLE_FOCUS_REPORTING = '\x1b[?1004l'; +export const ENABLE_FOCUS_REPORTING = '\x1b[?1004h'; +export const DISABLE_FOCUS_REPORTING = '\x1b[?1004l'; // ANSI escape codes for focus events -const FOCUS_IN = '\x1b[I'; -const FOCUS_OUT = '\x1b[O'; +export const FOCUS_IN = '\x1b[I'; +export const FOCUS_OUT = '\x1b[O'; export const useFocus = () => { const { stdin } = useStdin(); diff --git a/packages/cli/src/ui/hooks/useFolderTrust.test.ts b/packages/cli/src/ui/hooks/useFolderTrust.test.ts index 61552af0..e565ab05 100644 --- a/packages/cli/src/ui/hooks/useFolderTrust.test.ts +++ b/packages/cli/src/ui/hooks/useFolderTrust.test.ts @@ -4,15 +4,33 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { renderHook, act } from '@testing-library/react'; import { vi } from 'vitest'; +import { renderHook, act } from '@testing-library/react'; import { useFolderTrust } from './useFolderTrust.js'; -import { LoadedSettings, SettingScope } from '../../config/settings.js'; +import { type Config } from '@google/gemini-cli-core'; +import { LoadedSettings } from '../../config/settings.js'; import { FolderTrustChoice } from '../components/FolderTrustDialog.js'; +import { + LoadedTrustedFolders, + TrustLevel, +} from '../../config/trustedFolders.js'; +import * as process from 'process'; + +import * as trustedFolders from '../../config/trustedFolders.js'; + +vi.mock('process', () => ({ + cwd: vi.fn(), + platform: 'linux', +})); describe('useFolderTrust', () => { - it('should set isFolderTrustDialogOpen to true when folderTrustFeature is true and folderTrust is undefined', () => { - const settings = { + let mockSettings: LoadedSettings; + let mockConfig: Config; + let mockTrustedFolders: LoadedTrustedFolders; + let loadTrustedFoldersSpy: vi.SpyInstance; + + beforeEach(() => { + mockSettings = { merged: { folderTrustFeature: true, folderTrust: undefined, @@ -20,59 +38,110 @@ describe('useFolderTrust', () => { setValue: vi.fn(), } as unknown as LoadedSettings; - const { result } = renderHook(() => useFolderTrust(settings)); + mockConfig = { + isTrustedFolder: vi.fn().mockReturnValue(undefined), + } as unknown as Config; + mockTrustedFolders = { + setValue: vi.fn(), + } as unknown as LoadedTrustedFolders; + + loadTrustedFoldersSpy = vi + .spyOn(trustedFolders, 'loadTrustedFolders') + .mockReturnValue(mockTrustedFolders); + (process.cwd as vi.Mock).mockReturnValue('/test/path'); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should not open dialog when folder is already trusted', () => { + (mockConfig.isTrustedFolder as vi.Mock).mockReturnValue(true); + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should not open dialog when folder is already untrusted', () => { + (mockConfig.isTrustedFolder as vi.Mock).mockReturnValue(false); + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should open dialog when folder trust is undefined', () => { + (mockConfig.isTrustedFolder as vi.Mock).mockReturnValue(undefined); + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); expect(result.current.isFolderTrustDialogOpen).toBe(true); }); - it('should set isFolderTrustDialogOpen to false when folderTrustFeature is false', () => { - const settings = { - merged: { - folderTrustFeature: false, - folderTrust: undefined, - }, - setValue: vi.fn(), - } as unknown as LoadedSettings; - - const { result } = renderHook(() => useFolderTrust(settings)); - - expect(result.current.isFolderTrustDialogOpen).toBe(false); - }); - - it('should set isFolderTrustDialogOpen to false when folderTrust is defined', () => { - const settings = { - merged: { - folderTrustFeature: true, - folderTrust: true, - }, - setValue: vi.fn(), - } as unknown as LoadedSettings; - - const { result } = renderHook(() => useFolderTrust(settings)); - - expect(result.current.isFolderTrustDialogOpen).toBe(false); - }); - - it('should call setValue and set isFolderTrustDialogOpen to false on handleFolderTrustSelect', () => { - const settings = { - merged: { - folderTrustFeature: true, - folderTrust: undefined, - }, - setValue: vi.fn(), - } as unknown as LoadedSettings; - - const { result } = renderHook(() => useFolderTrust(settings)); + it('should handle TRUST_FOLDER choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); act(() => { result.current.handleFolderTrustSelect(FolderTrustChoice.TRUST_FOLDER); }); - expect(settings.setValue).toHaveBeenCalledWith( - SettingScope.User, - 'folderTrust', - true, + expect(loadTrustedFoldersSpy).toHaveBeenCalled(); + expect(mockTrustedFolders.setValue).toHaveBeenCalledWith( + '/test/path', + TrustLevel.TRUST_FOLDER, ); expect(result.current.isFolderTrustDialogOpen).toBe(false); }); + + it('should handle TRUST_PARENT choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + + act(() => { + result.current.handleFolderTrustSelect(FolderTrustChoice.TRUST_PARENT); + }); + + expect(mockTrustedFolders.setValue).toHaveBeenCalledWith( + '/test/path', + TrustLevel.TRUST_PARENT, + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should handle DO_NOT_TRUST choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + + act(() => { + result.current.handleFolderTrustSelect(FolderTrustChoice.DO_NOT_TRUST); + }); + + expect(mockTrustedFolders.setValue).toHaveBeenCalledWith( + '/test/path', + TrustLevel.DO_NOT_TRUST, + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should do nothing for default choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + + act(() => { + result.current.handleFolderTrustSelect( + 'invalid_choice' as FolderTrustChoice, + ); + }); + + expect(mockTrustedFolders.setValue).not.toHaveBeenCalled(); + expect(mockSettings.setValue).not.toHaveBeenCalled(); + expect(result.current.isFolderTrustDialogOpen).toBe(true); + }); }); diff --git a/packages/cli/src/ui/hooks/useFolderTrust.ts b/packages/cli/src/ui/hooks/useFolderTrust.ts index 90a69132..6458d4aa 100644 --- a/packages/cli/src/ui/hooks/useFolderTrust.ts +++ b/packages/cli/src/ui/hooks/useFolderTrust.ts @@ -5,24 +5,39 @@ */ import { useState, useCallback } from 'react'; -import { LoadedSettings, SettingScope } from '../../config/settings.js'; +import { type Config } from '@google/gemini-cli-core'; +import { LoadedSettings } from '../../config/settings.js'; import { FolderTrustChoice } from '../components/FolderTrustDialog.js'; +import { loadTrustedFolders, TrustLevel } from '../../config/trustedFolders.js'; +import * as process from 'process'; -export const useFolderTrust = (settings: LoadedSettings) => { +export const useFolderTrust = (settings: LoadedSettings, config: Config) => { const [isFolderTrustDialogOpen, setIsFolderTrustDialogOpen] = useState( - !!settings.merged.folderTrustFeature && - // TODO: Update to avoid showing dialog for folders that are trusted. - settings.merged.folderTrust === undefined, + config.isTrustedFolder() === undefined, ); - const handleFolderTrustSelect = useCallback( - (_choice: FolderTrustChoice) => { - // TODO: Store folderPath in the trusted folders config file based on the choice. - settings.setValue(SettingScope.User, 'folderTrust', true); - setIsFolderTrustDialogOpen(false); - }, - [settings], - ); + const handleFolderTrustSelect = useCallback((choice: FolderTrustChoice) => { + const trustedFolders = loadTrustedFolders(); + const cwd = process.cwd(); + let trustLevel: TrustLevel; + + switch (choice) { + case FolderTrustChoice.TRUST_FOLDER: + trustLevel = TrustLevel.TRUST_FOLDER; + break; + case FolderTrustChoice.TRUST_PARENT: + trustLevel = TrustLevel.TRUST_PARENT; + break; + case FolderTrustChoice.DO_NOT_TRUST: + trustLevel = TrustLevel.DO_NOT_TRUST; + break; + default: + return; + } + + trustedFolders.setValue(cwd, trustLevel); + setIsFolderTrustDialogOpen(false); + }, []); return { isFolderTrustDialogOpen, diff --git a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx index bf4b7e08..f7e33350 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx +++ b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx @@ -8,7 +8,7 @@ import { describe, it, expect, vi, beforeEach, Mock } from 'vitest'; import { renderHook, act, waitFor } from '@testing-library/react'; import { useGeminiStream, mergePartListUnions } from './useGeminiStream.js'; -import { useInput } from 'ink'; +import { useKeypress } from './useKeypress.js'; import { useReactToolScheduler, TrackedToolCall, @@ -51,6 +51,7 @@ const MockedGeminiClientClass = vi.hoisted(() => const MockedUserPromptEvent = vi.hoisted(() => vi.fn().mockImplementation(() => {}), ); +const mockParseAndFormatApiError = vi.hoisted(() => vi.fn()); vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { const actualCoreModule = (await importOriginal()) as any; @@ -59,6 +60,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { GitService: vi.fn(), GeminiClient: MockedGeminiClientClass, UserPromptEvent: MockedUserPromptEvent, + parseAndFormatApiError: mockParseAndFormatApiError, }; }); @@ -71,10 +73,9 @@ vi.mock('./useReactToolScheduler.js', async (importOriginal) => { }; }); -vi.mock('ink', async (importOriginal) => { - const actualInkModule = (await importOriginal()) as any; - return { ...(actualInkModule || {}), useInput: vi.fn() }; -}); +vi.mock('./useKeypress.js', () => ({ + useKeypress: vi.fn(), +})); vi.mock('./shellCommandProcessor.js', () => ({ useShellCommandProcessor: vi.fn().mockReturnValue({ @@ -128,11 +129,6 @@ vi.mock('./slashCommandProcessor.js', () => ({ handleSlashCommand: vi.fn().mockReturnValue(false), })); -const mockParseAndFormatApiError = vi.hoisted(() => vi.fn()); -vi.mock('../utils/errorParsing.js', () => ({ - parseAndFormatApiError: mockParseAndFormatApiError, -})); - // --- END MOCKS --- describe('mergePartListUnions', () => { @@ -903,19 +899,23 @@ describe('useGeminiStream', () => { }); describe('User Cancellation', () => { - let useInputCallback: (input: string, key: any) => void; - const mockUseInput = useInput as Mock; + let keypressCallback: (key: any) => void; + const mockUseKeypress = useKeypress as Mock; beforeEach(() => { - // Capture the callback passed to useInput - mockUseInput.mockImplementation((callback) => { - useInputCallback = callback; + // Capture the callback passed to useKeypress + mockUseKeypress.mockImplementation((callback, options) => { + if (options.isActive) { + keypressCallback = callback; + } else { + keypressCallback = () => {}; + } }); }); const simulateEscapeKeyPress = () => { act(() => { - useInputCallback('', { escape: true }); + keypressCallback({ name: 'escape' }); }); }; diff --git a/packages/cli/src/ui/hooks/useGeminiStream.ts b/packages/cli/src/ui/hooks/useGeminiStream.ts index 9972bbdc..5b46f15b 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.ts +++ b/packages/cli/src/ui/hooks/useGeminiStream.ts @@ -4,57 +4,57 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { useState, useRef, useCallback, useEffect, useMemo } from 'react'; -import { useInput } from 'ink'; -import { - Config, - GeminiClient, - GeminiEventType as ServerGeminiEventType, - ServerGeminiStreamEvent as GeminiEvent, - ServerGeminiContentEvent as ContentEvent, - ServerGeminiErrorEvent as ErrorEvent, - ServerGeminiChatCompressedEvent, - ServerGeminiFinishedEvent, - getErrorMessage, - isNodeError, - MessageSenderType, - ToolCallRequestInfo, - logUserPrompt, - GitService, - EditorType, - ThoughtSummary, - UnauthorizedError, - UserPromptEvent, - DEFAULT_GEMINI_FLASH_MODEL, -} from '@qwen-code/qwen-code-core'; import { type Part, type PartListUnion, FinishReason } from '@google/genai'; import { - StreamingState, + Config, + ServerGeminiContentEvent as ContentEvent, + DEFAULT_GEMINI_FLASH_MODEL, + EditorType, + ServerGeminiErrorEvent as ErrorEvent, + GeminiClient, + ServerGeminiStreamEvent as GeminiEvent, + getErrorMessage, + GitService, + isNodeError, + logUserPrompt, + MessageSenderType, + parseAndFormatApiError, + ServerGeminiChatCompressedEvent, + GeminiEventType as ServerGeminiEventType, + ServerGeminiFinishedEvent, + ThoughtSummary, + ToolCallRequestInfo, + UnauthorizedError, + UserPromptEvent, +} from '@qwen-code/qwen-code-core'; +import { promises as fs } from 'fs'; +import path from 'path'; +import { useCallback, useEffect, useMemo, useRef, useState } from 'react'; +import { useSessionStats } from '../contexts/SessionContext.js'; +import { HistoryItem, - HistoryItemWithoutId, HistoryItemToolGroup, + HistoryItemWithoutId, MessageType, SlashCommandProcessorResult, + StreamingState, ToolCallStatus, } from '../types.js'; import { isAtCommand } from '../utils/commandUtils.js'; -import { parseAndFormatApiError } from '../utils/errorParsing.js'; -import { useShellCommandProcessor } from './shellCommandProcessor.js'; -import { handleAtCommand } from './atCommandProcessor.js'; import { findLastSafeSplitPoint } from '../utils/markdownUtilities.js'; -import { useStateAndRef } from './useStateAndRef.js'; +import { handleAtCommand } from './atCommandProcessor.js'; +import { useShellCommandProcessor } from './shellCommandProcessor.js'; import { UseHistoryManagerReturn } from './useHistoryManager.js'; +import { useKeypress } from './useKeypress.js'; import { useLogger } from './useLogger.js'; -import { promises as fs } from 'fs'; -import path from 'path'; import { - useReactToolScheduler, mapToDisplay as mapTrackedToolCallsToDisplay, - TrackedToolCall, - TrackedCompletedToolCall, TrackedCancelledToolCall, + TrackedCompletedToolCall, + TrackedToolCall, + useReactToolScheduler, } from './useReactToolScheduler.js'; -import { useSessionStats } from '../contexts/SessionContext.js'; +import { useStateAndRef } from './useStateAndRef.js'; export function mergePartListUnions(list: PartListUnion[]): PartListUnion { const resultParts: PartListUnion = []; @@ -214,11 +214,14 @@ export const useGeminiStream = ( pendingHistoryItemRef, ]); - useInput((_input, key) => { - if (key.escape) { - cancelOngoingRequest(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + cancelOngoingRequest(); + } + }, + { isActive: streamingState === StreamingState.Responding }, + ); const prepareQueryForGemini = useCallback( async ( diff --git a/packages/cli/src/ui/hooks/useKeypress.test.ts b/packages/cli/src/ui/hooks/useKeypress.test.ts index a30eabf2..946ee054 100644 --- a/packages/cli/src/ui/hooks/useKeypress.test.ts +++ b/packages/cli/src/ui/hooks/useKeypress.test.ts @@ -134,9 +134,14 @@ describe('useKeypress', () => { expect(onKeypress).not.toHaveBeenCalled(); }); - it('should listen for keypress when active', () => { + it.each([ + { key: { name: 'a', sequence: 'a' } }, + { key: { name: 'left', sequence: '\x1b[D' } }, + { key: { name: 'right', sequence: '\x1b[C' } }, + { key: { name: 'up', sequence: '\x1b[A' } }, + { key: { name: 'down', sequence: '\x1b[B' } }, + ])('should listen for keypress when active for key $key.name', ({ key }) => { renderHook(() => useKeypress(onKeypress, { isActive: true })); - const key = { name: 'a', sequence: 'a' }; act(() => stdin.pressKey(key)); expect(onKeypress).toHaveBeenCalledWith(expect.objectContaining(key)); }); @@ -187,7 +192,7 @@ describe('useKeypress', () => { }, isLegacy: true, }, - ])('Paste Handling in $description', ({ setup, isLegacy }) => { + ])('in $description', ({ setup, isLegacy }) => { beforeEach(() => { setup(); stdin.setLegacy(isLegacy); diff --git a/packages/cli/src/ui/hooks/useKeypress.ts b/packages/cli/src/ui/hooks/useKeypress.ts index 6c2b7e8f..920270ee 100644 --- a/packages/cli/src/ui/hooks/useKeypress.ts +++ b/packages/cli/src/ui/hooks/useKeypress.ts @@ -8,6 +8,21 @@ import { useEffect, useRef } from 'react'; import { useStdin } from 'ink'; import readline from 'readline'; import { PassThrough } from 'stream'; +import { + KITTY_CTRL_C, + BACKSLASH_ENTER_DETECTION_WINDOW_MS, + MAX_KITTY_SEQUENCE_LENGTH, +} from '../utils/platformConstants.js'; +import { + KittySequenceOverflowEvent, + logKittySequenceOverflow, + Config, +} from '@google/gemini-cli-core'; +import { FOCUS_IN, FOCUS_OUT } from './useFocus.js'; + +const ESC = '\u001B'; +export const PASTE_MODE_PREFIX = `${ESC}[200~`; +export const PASTE_MODE_SUFFIX = `${ESC}[201~`; export interface Key { name: string; @@ -16,6 +31,7 @@ export interface Key { shift: boolean; paste: boolean; sequence: string; + kittyProtocol?: boolean; } /** @@ -30,10 +46,16 @@ export interface Key { * @param onKeypress - The callback function to execute on each keypress. * @param options - Options to control the hook's behavior. * @param options.isActive - Whether the hook should be actively listening for input. + * @param options.kittyProtocolEnabled - Whether Kitty keyboard protocol is enabled. + * @param options.config - Optional config for telemetry logging. */ export function useKeypress( onKeypress: (key: Key) => void, - { isActive }: { isActive: boolean }, + { + isActive, + kittyProtocolEnabled = false, + config, + }: { isActive: boolean; kittyProtocolEnabled?: boolean; config?: Config }, ) { const { stdin, setRawMode } = useStdin(); const onKeypressRef = useRef(onKeypress); @@ -64,8 +86,210 @@ export function useKeypress( let isPaste = false; let pasteBuffer = Buffer.alloc(0); + let kittySequenceBuffer = ''; + let backslashTimeout: NodeJS.Timeout | null = null; + let waitingForEnterAfterBackslash = false; + + // Parse Kitty protocol sequences + const parseKittySequence = (sequence: string): Key | null => { + // Match CSI ; u or ~ + // Format: ESC [ ; u/~ + const kittyPattern = new RegExp(`^${ESC}\\[(\\d+)(;(\\d+))?([u~])$`); + const match = sequence.match(kittyPattern); + if (!match) return null; + + const keyCode = parseInt(match[1], 10); + const modifiers = match[3] ? parseInt(match[3], 10) : 1; + + // Decode modifiers (subtract 1 as per Kitty protocol spec) + const modifierBits = modifiers - 1; + const shift = (modifierBits & 1) === 1; + const alt = (modifierBits & 2) === 2; + const ctrl = (modifierBits & 4) === 4; + + // Handle Escape key (code 27) + if (keyCode === 27) { + return { + name: 'escape', + ctrl, + meta: alt, + shift, + paste: false, + sequence, + kittyProtocol: true, + }; + } + + // Handle Enter key (code 13) + if (keyCode === 13) { + return { + name: 'return', + ctrl, + meta: alt, + shift, + paste: false, + sequence, + kittyProtocol: true, + }; + } + + // Handle Ctrl+letter combinations (a-z) + // ASCII codes: a=97, b=98, c=99, ..., z=122 + if (keyCode >= 97 && keyCode <= 122 && ctrl) { + const letter = String.fromCharCode(keyCode); + return { + name: letter, + ctrl: true, + meta: alt, + shift, + paste: false, + sequence, + kittyProtocol: true, + }; + } + + // Handle other keys as needed + return null; + }; const handleKeypress = (_: unknown, key: Key) => { + // Handle VS Code's backslash+return pattern (Shift+Enter) + if (key.name === 'return' && waitingForEnterAfterBackslash) { + // Cancel the timeout since we got the Enter + if (backslashTimeout) { + clearTimeout(backslashTimeout); + backslashTimeout = null; + } + waitingForEnterAfterBackslash = false; + + // Convert to Shift+Enter + onKeypressRef.current({ + ...key, + shift: true, + sequence: '\\\r', // VS Code's Shift+Enter representation + }); + return; + } + + // Handle backslash - hold it to see if Enter follows + if (key.sequence === '\\' && !key.name) { + // Don't pass through the backslash yet - wait to see if Enter follows + waitingForEnterAfterBackslash = true; + + // Set up a timeout to pass through the backslash if no Enter follows + backslashTimeout = setTimeout(() => { + waitingForEnterAfterBackslash = false; + backslashTimeout = null; + // Pass through the backslash since no Enter followed + onKeypressRef.current(key); + }, BACKSLASH_ENTER_DETECTION_WINDOW_MS); + + return; + } + + // If we're waiting for Enter after backslash but got something else, + // pass through the backslash first, then the new key + if (waitingForEnterAfterBackslash && key.name !== 'return') { + if (backslashTimeout) { + clearTimeout(backslashTimeout); + backslashTimeout = null; + } + waitingForEnterAfterBackslash = false; + + // Pass through the backslash that was held + onKeypressRef.current({ + name: '', + sequence: '\\', + ctrl: false, + meta: false, + shift: false, + paste: false, + }); + + // Then continue processing the current key normally + } + + // If readline has already identified an arrow key, pass it through + // immediately, bypassing the Kitty protocol sequence buffering. + if (['up', 'down', 'left', 'right'].includes(key.name)) { + onKeypressRef.current(key); + return; + } + + // Always pass through Ctrl+C immediately, regardless of protocol state + // Check both standard format and Kitty protocol sequence + if ( + (key.ctrl && key.name === 'c') || + key.sequence === `${ESC}${KITTY_CTRL_C}` + ) { + kittySequenceBuffer = ''; + // If it's the Kitty sequence, create a proper key object + if (key.sequence === `${ESC}${KITTY_CTRL_C}`) { + onKeypressRef.current({ + name: 'c', + ctrl: true, + meta: false, + shift: false, + paste: false, + sequence: key.sequence, + kittyProtocol: true, + }); + } else { + onKeypressRef.current(key); + } + return; + } + + // If Kitty protocol is enabled, handle CSI sequences + if (kittyProtocolEnabled) { + // If we have a buffer or this starts a CSI sequence + if ( + kittySequenceBuffer || + (key.sequence.startsWith(`${ESC}[`) && + !key.sequence.startsWith(PASTE_MODE_PREFIX) && + !key.sequence.startsWith(PASTE_MODE_SUFFIX) && + !key.sequence.startsWith(FOCUS_IN) && + !key.sequence.startsWith(FOCUS_OUT)) + ) { + kittySequenceBuffer += key.sequence; + + // Try to parse the buffer as a Kitty sequence + const kittyKey = parseKittySequence(kittySequenceBuffer); + if (kittyKey) { + kittySequenceBuffer = ''; + onKeypressRef.current(kittyKey); + return; + } + + if (config?.getDebugMode()) { + const codes = Array.from(kittySequenceBuffer).map((ch) => + ch.charCodeAt(0), + ); + // Unless the user is sshing over a slow connection, this likely + // indicates this is not a kitty sequence but we have incorrectly + // interpreted it as such. See the examples above for sequences + // such as FOCUS_IN that are not Kitty sequences. + console.warn('Kitty sequence buffer has char codes:', codes); + } + + // If buffer doesn't match expected pattern and is getting long, flush it + if (kittySequenceBuffer.length > MAX_KITTY_SEQUENCE_LENGTH) { + // Log telemetry for buffer overflow + if (config) { + const event = new KittySequenceOverflowEvent( + kittySequenceBuffer.length, + kittySequenceBuffer, + ); + logKittySequenceOverflow(config, event); + } + // Not a Kitty sequence, treat as regular key + kittySequenceBuffer = ''; + } else { + // Wait for more characters + return; + } + } + } if (key.name === 'paste-start') { isPaste = true; } else if (key.name === 'paste-end') { @@ -84,7 +308,7 @@ export function useKeypress( pasteBuffer = Buffer.concat([pasteBuffer, Buffer.from(key.sequence)]); } else { // Handle special keys - if (key.name === 'return' && key.sequence === '\x1B\r') { + if (key.name === 'return' && key.sequence === `${ESC}\r`) { key.meta = true; } onKeypressRef.current({ ...key, paste: isPaste }); @@ -93,13 +317,13 @@ export function useKeypress( }; const handleRawKeypress = (data: Buffer) => { - const PASTE_MODE_PREFIX = Buffer.from('\x1B[200~'); - const PASTE_MODE_SUFFIX = Buffer.from('\x1B[201~'); + const pasteModePrefixBuffer = Buffer.from(PASTE_MODE_PREFIX); + const pasteModeSuffixBuffer = Buffer.from(PASTE_MODE_SUFFIX); let pos = 0; while (pos < data.length) { - const prefixPos = data.indexOf(PASTE_MODE_PREFIX, pos); - const suffixPos = data.indexOf(PASTE_MODE_SUFFIX, pos); + const prefixPos = data.indexOf(pasteModePrefixBuffer, pos); + const suffixPos = data.indexOf(pasteModeSuffixBuffer, pos); // Determine which marker comes first, if any. const isPrefixNext = @@ -115,7 +339,7 @@ export function useKeypress( } else if (isSuffixNext) { nextMarkerPos = suffixPos; } - markerLength = PASTE_MODE_SUFFIX.length; + markerLength = pasteModeSuffixBuffer.length; if (nextMarkerPos === -1) { keypressStream.write(data.slice(pos)); @@ -170,6 +394,12 @@ export function useKeypress( rl.close(); setRawMode(false); + // Clean up any pending backslash timeout + if (backslashTimeout) { + clearTimeout(backslashTimeout); + backslashTimeout = null; + } + // If we are in the middle of a paste, send what we have. if (isPaste) { onKeypressRef.current({ @@ -183,5 +413,5 @@ export function useKeypress( pasteBuffer = Buffer.alloc(0); } }; - }, [isActive, stdin, setRawMode]); + }, [isActive, stdin, setRawMode, kittyProtocolEnabled, config]); } diff --git a/packages/cli/src/ui/hooks/useKittyKeyboardProtocol.ts b/packages/cli/src/ui/hooks/useKittyKeyboardProtocol.ts new file mode 100644 index 00000000..53c7566c --- /dev/null +++ b/packages/cli/src/ui/hooks/useKittyKeyboardProtocol.ts @@ -0,0 +1,31 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { useState } from 'react'; +import { + isKittyProtocolEnabled, + isKittyProtocolSupported, +} from '../utils/kittyProtocolDetector.js'; + +export interface KittyProtocolStatus { + supported: boolean; + enabled: boolean; + checking: boolean; +} + +/** + * Hook that returns the cached Kitty keyboard protocol status. + * Detection is done once at app startup to avoid repeated queries. + */ +export function useKittyKeyboardProtocol(): KittyProtocolStatus { + const [status] = useState({ + supported: isKittyProtocolSupported(), + enabled: isKittyProtocolEnabled(), + checking: false, + }); + + return status; +} diff --git a/packages/cli/src/ui/hooks/useToolScheduler.test.ts b/packages/cli/src/ui/hooks/useToolScheduler.test.ts index d0c4cbeb..c08b6240 100644 --- a/packages/cli/src/ui/hooks/useToolScheduler.test.ts +++ b/packages/cli/src/ui/hooks/useToolScheduler.test.ts @@ -23,7 +23,7 @@ import { ToolCall, // Import from core Status as ToolCallStatusType, ApprovalMode, - Icon, + Kind, BaseTool, AnyDeclarativeTool, AnyToolInvocation, @@ -67,7 +67,7 @@ class MockTool extends BaseTool { name, displayName, 'A mock tool for testing', - Icon.Hammer, + Kind.Other, {}, isOutputMarkdown, canUpdateOutput, diff --git a/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx b/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx index 2d360abc..9ec46415 100644 --- a/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx +++ b/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx @@ -4,12 +4,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Newline, Text, useInput } from 'ink'; +import { Box, Newline, Text } from 'ink'; import { RadioButtonSelect } from '../components/shared/RadioButtonSelect.js'; import { usePrivacySettings } from '../hooks/usePrivacySettings.js'; import { CloudPaidPrivacyNotice } from './CloudPaidPrivacyNotice.js'; import { Config } from '@qwen-code/qwen-code-core'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface CloudFreePrivacyNoticeProps { config: Config; @@ -23,11 +24,14 @@ export const CloudFreePrivacyNotice = ({ const { privacyState, updateDataCollectionOptIn } = usePrivacySettings(config); - useInput((input, key) => { - if (privacyState.error && key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (privacyState.error && key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); if (privacyState.isLoading) { return Loading...; diff --git a/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx b/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx index e50dcd4b..f0adbb68 100644 --- a/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx +++ b/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx @@ -4,8 +4,9 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Newline, Text, useInput } from 'ink'; +import { Box, Newline, Text } from 'ink'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface CloudPaidPrivacyNoticeProps { onExit: () => void; @@ -14,11 +15,14 @@ interface CloudPaidPrivacyNoticeProps { export const CloudPaidPrivacyNotice = ({ onExit, }: CloudPaidPrivacyNoticeProps) => { - useInput((input, key) => { - if (key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); return ( diff --git a/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx b/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx index 57030ac3..c0eaa74f 100644 --- a/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx +++ b/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx @@ -4,19 +4,23 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Newline, Text, useInput } from 'ink'; +import { Box, Newline, Text } from 'ink'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface GeminiPrivacyNoticeProps { onExit: () => void; } export const GeminiPrivacyNotice = ({ onExit }: GeminiPrivacyNoticeProps) => { - useInput((input, key) => { - if (key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); return ( diff --git a/packages/cli/src/ui/utils/kittyProtocolDetector.ts b/packages/cli/src/ui/utils/kittyProtocolDetector.ts new file mode 100644 index 00000000..5d77943a --- /dev/null +++ b/packages/cli/src/ui/utils/kittyProtocolDetector.ts @@ -0,0 +1,105 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +let detectionComplete = false; +let protocolSupported = false; +let protocolEnabled = false; + +/** + * Detects Kitty keyboard protocol support. + * Definitive document about this protocol lives at https://sw.kovidgoyal.net/kitty/keyboard-protocol/ + * This function should be called once at app startup. + */ +export async function detectAndEnableKittyProtocol(): Promise { + if (detectionComplete) { + return protocolSupported; + } + + return new Promise((resolve) => { + if (!process.stdin.isTTY || !process.stdout.isTTY) { + detectionComplete = true; + resolve(false); + return; + } + + const originalRawMode = process.stdin.isRaw; + if (!originalRawMode) { + process.stdin.setRawMode(true); + } + + let responseBuffer = ''; + let progressiveEnhancementReceived = false; + let checkFinished = false; + + const handleData = (data: Buffer) => { + responseBuffer += data.toString(); + + // Check for progressive enhancement response (CSI ? u) + if (responseBuffer.includes('\x1b[?') && responseBuffer.includes('u')) { + progressiveEnhancementReceived = true; + } + + // Check for device attributes response (CSI ? c) + if (responseBuffer.includes('\x1b[?') && responseBuffer.includes('c')) { + if (!checkFinished) { + checkFinished = true; + process.stdin.removeListener('data', handleData); + + if (!originalRawMode) { + process.stdin.setRawMode(false); + } + + if (progressiveEnhancementReceived) { + // Enable the protocol + process.stdout.write('\x1b[>1u'); + protocolSupported = true; + protocolEnabled = true; + + // Set up cleanup on exit + process.on('exit', disableProtocol); + process.on('SIGTERM', disableProtocol); + } + + detectionComplete = true; + resolve(protocolSupported); + } + } + }; + + process.stdin.on('data', handleData); + + // Send queries + process.stdout.write('\x1b[?u'); // Query progressive enhancement + process.stdout.write('\x1b[c'); // Query device attributes + + // Timeout after 50ms + setTimeout(() => { + if (!checkFinished) { + process.stdin.removeListener('data', handleData); + if (!originalRawMode) { + process.stdin.setRawMode(false); + } + detectionComplete = true; + resolve(false); + } + }, 50); + }); +} + +function disableProtocol() { + if (protocolEnabled) { + process.stdout.write('\x1b[ ; u/~ + * Example: \x1b[13;2u (Shift+Enter) = 8 chars + * Longest reasonable: \x1b[127;15~ = 11 chars (Del with all modifiers) + * We use 12 to provide a small buffer. + */ +export const MAX_KITTY_SEQUENCE_LENGTH = 12; diff --git a/packages/cli/src/ui/utils/terminalSetup.ts b/packages/cli/src/ui/utils/terminalSetup.ts new file mode 100644 index 00000000..7f944847 --- /dev/null +++ b/packages/cli/src/ui/utils/terminalSetup.ts @@ -0,0 +1,340 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Terminal setup utility for configuring Shift+Enter and Ctrl+Enter support. + * + * This module provides automatic detection and configuration of various terminal + * emulators to support multiline input through modified Enter keys. + * + * Supported terminals: + * - VS Code: Configures keybindings.json to send \\\r\n + * - Cursor: Configures keybindings.json to send \\\r\n (VS Code fork) + * - Windsurf: Configures keybindings.json to send \\\r\n (VS Code fork) + * + * For VS Code and its forks: + * - Shift+Enter: Sends \\\r\n (backslash followed by CRLF) + * - Ctrl+Enter: Sends \\\r\n (backslash followed by CRLF) + * + * The module will not modify existing shift+enter or ctrl+enter keybindings + * to avoid conflicts with user customizations. + */ + +import { promises as fs } from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { exec } from 'child_process'; +import { promisify } from 'util'; +import { isKittyProtocolEnabled } from './kittyProtocolDetector.js'; +import { VSCODE_SHIFT_ENTER_SEQUENCE } from './platformConstants.js'; + +const execAsync = promisify(exec); + +/** + * Removes single-line JSON comments (// ...) from a string to allow parsing + * VS Code style JSON files that may contain comments. + */ +function stripJsonComments(content: string): string { + // Remove single-line comments (// ...) + return content.replace(/^\s*\/\/.*$/gm, ''); +} + +export interface TerminalSetupResult { + success: boolean; + message: string; + requiresRestart?: boolean; +} + +type SupportedTerminal = 'vscode' | 'cursor' | 'windsurf'; + +// Terminal detection +async function detectTerminal(): Promise { + const termProgram = process.env.TERM_PROGRAM; + + // Check VS Code and its forks - check forks first to avoid false positives + // Check for Cursor-specific indicators + if ( + process.env.CURSOR_TRACE_ID || + process.env.VSCODE_GIT_ASKPASS_MAIN?.toLowerCase().includes('cursor') + ) { + return 'cursor'; + } + // Check for Windsurf-specific indicators + if (process.env.VSCODE_GIT_ASKPASS_MAIN?.toLowerCase().includes('windsurf')) { + return 'windsurf'; + } + // Check VS Code last since forks may also set VSCODE env vars + if (termProgram === 'vscode' || process.env.VSCODE_GIT_IPC_HANDLE) { + return 'vscode'; + } + + // Check parent process name + if (os.platform() !== 'win32') { + try { + const { stdout } = await execAsync('ps -o comm= -p $PPID'); + const parentName = stdout.trim(); + + // Check forks before VS Code to avoid false positives + if (parentName.includes('windsurf') || parentName.includes('Windsurf')) + return 'windsurf'; + if (parentName.includes('cursor') || parentName.includes('Cursor')) + return 'cursor'; + if (parentName.includes('code') || parentName.includes('Code')) + return 'vscode'; + } catch (error) { + // Continue detection even if process check fails + console.debug('Parent process detection failed:', error); + } + } + + return null; +} + +// Backup file helper +async function backupFile(filePath: string): Promise { + try { + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const backupPath = `${filePath}.backup.${timestamp}`; + await fs.copyFile(filePath, backupPath); + } catch (error) { + // Log backup errors but continue with operation + console.warn(`Failed to create backup of ${filePath}:`, error); + } +} + +// Helper function to get VS Code-style config directory +function getVSCodeStyleConfigDir(appName: string): string | null { + const platform = os.platform(); + + if (platform === 'darwin') { + return path.join( + os.homedir(), + 'Library', + 'Application Support', + appName, + 'User', + ); + } else if (platform === 'win32') { + if (!process.env.APPDATA) { + return null; + } + return path.join(process.env.APPDATA, appName, 'User'); + } else { + return path.join(os.homedir(), '.config', appName, 'User'); + } +} + +// Generic VS Code-style terminal configuration +async function configureVSCodeStyle( + terminalName: string, + appName: string, +): Promise { + const configDir = getVSCodeStyleConfigDir(appName); + + if (!configDir) { + return { + success: false, + message: `Could not determine ${terminalName} config path on Windows: APPDATA environment variable is not set.`, + }; + } + + const keybindingsFile = path.join(configDir, 'keybindings.json'); + + try { + await fs.mkdir(configDir, { recursive: true }); + + let keybindings: unknown[] = []; + try { + const content = await fs.readFile(keybindingsFile, 'utf8'); + await backupFile(keybindingsFile); + try { + const cleanContent = stripJsonComments(content); + const parsedContent = JSON.parse(cleanContent); + if (!Array.isArray(parsedContent)) { + return { + success: false, + message: + `${terminalName} keybindings.json exists but is not a valid JSON array. ` + + `Please fix the file manually or delete it to allow automatic configuration.\n` + + `File: ${keybindingsFile}`, + }; + } + keybindings = parsedContent; + } catch (parseError) { + return { + success: false, + message: + `Failed to parse ${terminalName} keybindings.json. The file contains invalid JSON.\n` + + `Please fix the file manually or delete it to allow automatic configuration.\n` + + `File: ${keybindingsFile}\n` + + `Error: ${parseError}`, + }; + } + } catch { + // File doesn't exist, will create new one + } + + const shiftEnterBinding = { + key: 'shift+enter', + command: 'workbench.action.terminal.sendSequence', + when: 'terminalFocus', + args: { text: VSCODE_SHIFT_ENTER_SEQUENCE }, + }; + + const ctrlEnterBinding = { + key: 'ctrl+enter', + command: 'workbench.action.terminal.sendSequence', + when: 'terminalFocus', + args: { text: VSCODE_SHIFT_ENTER_SEQUENCE }, + }; + + // Check if ANY shift+enter or ctrl+enter bindings already exist + const existingShiftEnter = keybindings.find((kb) => { + const binding = kb as { key?: string }; + return binding.key === 'shift+enter'; + }); + + const existingCtrlEnter = keybindings.find((kb) => { + const binding = kb as { key?: string }; + return binding.key === 'ctrl+enter'; + }); + + if (existingShiftEnter || existingCtrlEnter) { + const messages: string[] = []; + if (existingShiftEnter) { + messages.push(`- Shift+Enter binding already exists`); + } + if (existingCtrlEnter) { + messages.push(`- Ctrl+Enter binding already exists`); + } + return { + success: false, + message: + `Existing keybindings detected. Will not modify to avoid conflicts.\n` + + messages.join('\n') + + '\n' + + `Please check and modify manually if needed: ${keybindingsFile}`, + }; + } + + // Check if our specific bindings already exist + const hasOurShiftEnter = keybindings.some((kb) => { + const binding = kb as { + command?: string; + args?: { text?: string }; + key?: string; + }; + return ( + binding.key === 'shift+enter' && + binding.command === 'workbench.action.terminal.sendSequence' && + binding.args?.text === '\\\r\n' + ); + }); + + const hasOurCtrlEnter = keybindings.some((kb) => { + const binding = kb as { + command?: string; + args?: { text?: string }; + key?: string; + }; + return ( + binding.key === 'ctrl+enter' && + binding.command === 'workbench.action.terminal.sendSequence' && + binding.args?.text === '\\\r\n' + ); + }); + + if (!hasOurShiftEnter || !hasOurCtrlEnter) { + if (!hasOurShiftEnter) keybindings.unshift(shiftEnterBinding); + if (!hasOurCtrlEnter) keybindings.unshift(ctrlEnterBinding); + + await fs.writeFile(keybindingsFile, JSON.stringify(keybindings, null, 4)); + return { + success: true, + message: `Added Shift+Enter and Ctrl+Enter keybindings to ${terminalName}.\nModified: ${keybindingsFile}`, + requiresRestart: true, + }; + } else { + return { + success: true, + message: `${terminalName} keybindings already configured.`, + }; + } + } catch (error) { + return { + success: false, + message: `Failed to configure ${terminalName}.\nFile: ${keybindingsFile}\nError: ${error}`, + }; + } +} + +// Terminal-specific configuration functions + +async function configureVSCode(): Promise { + return configureVSCodeStyle('VS Code', 'Code'); +} + +async function configureCursor(): Promise { + return configureVSCodeStyle('Cursor', 'Cursor'); +} + +async function configureWindsurf(): Promise { + return configureVSCodeStyle('Windsurf', 'Windsurf'); +} + +/** + * Main terminal setup function that detects and configures the current terminal. + * + * This function: + * 1. Detects the current terminal emulator + * 2. Applies appropriate configuration for Shift+Enter and Ctrl+Enter support + * 3. Creates backups of configuration files before modifying them + * + * @returns Promise Result object with success status and message + * + * @example + * const result = await terminalSetup(); + * if (result.success) { + * console.log(result.message); + * if (result.requiresRestart) { + * console.log('Please restart your terminal'); + * } + * } + */ +export async function terminalSetup(): Promise { + // Check if terminal already has optimal keyboard support + if (isKittyProtocolEnabled()) { + return { + success: true, + message: + 'Your terminal is already configured for an optimal experience with multiline input (Shift+Enter and Ctrl+Enter).', + }; + } + + const terminal = await detectTerminal(); + + if (!terminal) { + return { + success: false, + message: + 'Could not detect terminal type. Supported terminals: VS Code, Cursor, and Windsurf.', + }; + } + + switch (terminal) { + case 'vscode': + return configureVSCode(); + case 'cursor': + return configureCursor(); + case 'windsurf': + return configureWindsurf(); + default: + return { + success: false, + message: `Terminal "${terminal}" is not supported yet.`, + }; + } +} diff --git a/packages/cli/src/utils/checks.ts b/packages/cli/src/utils/checks.ts new file mode 100644 index 00000000..0598835f --- /dev/null +++ b/packages/cli/src/utils/checks.ts @@ -0,0 +1,28 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Fail to compile on unexpected values. */ +export function assumeExhaustive(_value: never): void {} + +/** + * Throws an exception on unexpected values. + * + * A common use case is switch statements: + * switch(enumValue) { + * case Enum.A: + * case Enum.B: + * break; + * default: + * checkExhaustive(enumValue); + * } + */ +export function checkExhaustive( + value: never, + msg = `unexpected value ${value}!`, +): never { + assumeExhaustive(value); + throw new Error(msg); +} diff --git a/packages/cli/src/zed-integration/acp.ts b/packages/cli/src/zed-integration/acp.ts new file mode 100644 index 00000000..eef4e1ee --- /dev/null +++ b/packages/cli/src/zed-integration/acp.ts @@ -0,0 +1,366 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */ + +import { z } from 'zod'; +import * as schema from './schema.js'; +export * from './schema.js'; + +import { WritableStream, ReadableStream } from 'node:stream/web'; + +export class AgentSideConnection implements Client { + #connection: Connection; + + constructor( + toAgent: (conn: Client) => Agent, + input: WritableStream, + output: ReadableStream, + ) { + const agent = toAgent(this); + + const handler = async ( + method: string, + params: unknown, + ): Promise => { + switch (method) { + case schema.AGENT_METHODS.initialize: { + const validatedParams = schema.initializeRequestSchema.parse(params); + return agent.initialize(validatedParams); + } + case schema.AGENT_METHODS.session_new: { + const validatedParams = schema.newSessionRequestSchema.parse(params); + return agent.newSession(validatedParams); + } + case schema.AGENT_METHODS.session_load: { + if (!agent.loadSession) { + throw RequestError.methodNotFound(); + } + const validatedParams = schema.loadSessionRequestSchema.parse(params); + return agent.loadSession(validatedParams); + } + case schema.AGENT_METHODS.authenticate: { + const validatedParams = + schema.authenticateRequestSchema.parse(params); + return agent.authenticate(validatedParams); + } + case schema.AGENT_METHODS.session_prompt: { + const validatedParams = schema.promptRequestSchema.parse(params); + return agent.prompt(validatedParams); + } + case schema.AGENT_METHODS.session_cancel: { + const validatedParams = schema.cancelNotificationSchema.parse(params); + return agent.cancel(validatedParams); + } + default: + throw RequestError.methodNotFound(method); + } + }; + + this.#connection = new Connection(handler, input, output); + } + + /** + * Streams new content to the client including text, tool calls, etc. + */ + async sessionUpdate(params: schema.SessionNotification): Promise { + return await this.#connection.sendNotification( + schema.CLIENT_METHODS.session_update, + params, + ); + } + + /** + * Request permission before running a tool + * + * The agent specifies a series of permission options with different granularity, + * and the client returns the chosen one. + */ + async requestPermission( + params: schema.RequestPermissionRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.session_request_permission, + params, + ); + } + + async readTextFile( + params: schema.ReadTextFileRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.fs_read_text_file, + params, + ); + } + + async writeTextFile( + params: schema.WriteTextFileRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.fs_write_text_file, + params, + ); + } +} + +type AnyMessage = AnyRequest | AnyResponse | AnyNotification; + +type AnyRequest = { + jsonrpc: '2.0'; + id: string | number; + method: string; + params?: unknown; +}; + +type AnyResponse = { + jsonrpc: '2.0'; + id: string | number; +} & Result; + +type AnyNotification = { + jsonrpc: '2.0'; + method: string; + params?: unknown; +}; + +type Result = + | { + result: T; + } + | { + error: ErrorResponse; + }; + +type ErrorResponse = { + code: number; + message: string; + data?: unknown; +}; + +type PendingResponse = { + resolve: (response: unknown) => void; + reject: (error: ErrorResponse) => void; +}; + +type MethodHandler = (method: string, params: unknown) => Promise; + +class Connection { + #pendingResponses: Map = new Map(); + #nextRequestId: number = 0; + #handler: MethodHandler; + #peerInput: WritableStream; + #writeQueue: Promise = Promise.resolve(); + #textEncoder: TextEncoder; + + constructor( + handler: MethodHandler, + peerInput: WritableStream, + peerOutput: ReadableStream, + ) { + this.#handler = handler; + this.#peerInput = peerInput; + this.#textEncoder = new TextEncoder(); + this.#receive(peerOutput); + } + + async #receive(output: ReadableStream) { + let content = ''; + const decoder = new TextDecoder(); + for await (const chunk of output) { + content += decoder.decode(chunk, { stream: true }); + const lines = content.split('\n'); + content = lines.pop() || ''; + + for (const line of lines) { + const trimmedLine = line.trim(); + + if (trimmedLine) { + const message = JSON.parse(trimmedLine); + this.#processMessage(message); + } + } + } + } + + async #processMessage(message: AnyMessage) { + if ('method' in message && 'id' in message) { + // It's a request + const response = await this.#tryCallHandler( + message.method, + message.params, + ); + + await this.#sendMessage({ + jsonrpc: '2.0', + id: message.id, + ...response, + }); + } else if ('method' in message) { + // It's a notification + await this.#tryCallHandler(message.method, message.params); + } else if ('id' in message) { + // It's a response + this.#handleResponse(message as AnyResponse); + } + } + + async #tryCallHandler( + method: string, + params?: unknown, + ): Promise> { + try { + const result = await this.#handler(method, params); + return { result: result ?? null }; + } catch (error: unknown) { + if (error instanceof RequestError) { + return error.toResult(); + } + + if (error instanceof z.ZodError) { + return RequestError.invalidParams( + JSON.stringify(error.format(), undefined, 2), + ).toResult(); + } + + let details; + + if (error instanceof Error) { + details = error.message; + } else if ( + typeof error === 'object' && + error != null && + 'message' in error && + typeof error.message === 'string' + ) { + details = error.message; + } + + return RequestError.internalError(details).toResult(); + } + } + + #handleResponse(response: AnyResponse) { + const pendingResponse = this.#pendingResponses.get(response.id); + if (pendingResponse) { + if ('result' in response) { + pendingResponse.resolve(response.result); + } else if ('error' in response) { + pendingResponse.reject(response.error); + } + this.#pendingResponses.delete(response.id); + } + } + + async sendRequest(method: string, params?: Req): Promise { + const id = this.#nextRequestId++; + const responsePromise = new Promise((resolve, reject) => { + this.#pendingResponses.set(id, { resolve, reject }); + }); + await this.#sendMessage({ jsonrpc: '2.0', id, method, params }); + return responsePromise as Promise; + } + + async sendNotification(method: string, params?: N): Promise { + await this.#sendMessage({ jsonrpc: '2.0', method, params }); + } + + async #sendMessage(json: AnyMessage) { + const content = JSON.stringify(json) + '\n'; + this.#writeQueue = this.#writeQueue + .then(async () => { + const writer = this.#peerInput.getWriter(); + try { + await writer.write(this.#textEncoder.encode(content)); + } finally { + writer.releaseLock(); + } + }) + .catch((error) => { + // Continue processing writes on error + console.error('ACP write error:', error); + }); + return this.#writeQueue; + } +} + +export class RequestError extends Error { + data?: { details?: string }; + + constructor( + public code: number, + message: string, + details?: string, + ) { + super(message); + this.name = 'RequestError'; + if (details) { + this.data = { details }; + } + } + + static parseError(details?: string): RequestError { + return new RequestError(-32700, 'Parse error', details); + } + + static invalidRequest(details?: string): RequestError { + return new RequestError(-32600, 'Invalid request', details); + } + + static methodNotFound(details?: string): RequestError { + return new RequestError(-32601, 'Method not found', details); + } + + static invalidParams(details?: string): RequestError { + return new RequestError(-32602, 'Invalid params', details); + } + + static internalError(details?: string): RequestError { + return new RequestError(-32603, 'Internal error', details); + } + + static authRequired(details?: string): RequestError { + return new RequestError(-32000, 'Authentication required', details); + } + + toResult(): Result { + return { + error: { + code: this.code, + message: this.message, + data: this.data, + }, + }; + } +} + +export interface Client { + requestPermission( + params: schema.RequestPermissionRequest, + ): Promise; + sessionUpdate(params: schema.SessionNotification): Promise; + writeTextFile( + params: schema.WriteTextFileRequest, + ): Promise; + readTextFile( + params: schema.ReadTextFileRequest, + ): Promise; +} + +export interface Agent { + initialize( + params: schema.InitializeRequest, + ): Promise; + newSession( + params: schema.NewSessionRequest, + ): Promise; + loadSession?( + params: schema.LoadSessionRequest, + ): Promise; + authenticate(params: schema.AuthenticateRequest): Promise; + prompt(params: schema.PromptRequest): Promise; + cancel(params: schema.CancelNotification): Promise; +} diff --git a/packages/cli/src/zed-integration/schema.ts b/packages/cli/src/zed-integration/schema.ts new file mode 100644 index 00000000..4c962131 --- /dev/null +++ b/packages/cli/src/zed-integration/schema.ts @@ -0,0 +1,457 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { z } from 'zod'; + +export const AGENT_METHODS = { + authenticate: 'authenticate', + initialize: 'initialize', + session_cancel: 'session/cancel', + session_load: 'session/load', + session_new: 'session/new', + session_prompt: 'session/prompt', +}; + +export const CLIENT_METHODS = { + fs_read_text_file: 'fs/read_text_file', + fs_write_text_file: 'fs/write_text_file', + session_request_permission: 'session/request_permission', + session_update: 'session/update', +}; + +export const PROTOCOL_VERSION = 1; + +export type WriteTextFileRequest = z.infer; + +export type ReadTextFileRequest = z.infer; + +export type PermissionOptionKind = z.infer; + +export type Role = z.infer; + +export type TextResourceContents = z.infer; + +export type BlobResourceContents = z.infer; + +export type ToolKind = z.infer; + +export type ToolCallStatus = z.infer; + +export type WriteTextFileResponse = z.infer; + +export type ReadTextFileResponse = z.infer; + +export type RequestPermissionOutcome = z.infer< + typeof requestPermissionOutcomeSchema +>; + +export type CancelNotification = z.infer; + +export type AuthenticateRequest = z.infer; + +export type AuthenticateResponse = z.infer; + +export type NewSessionResponse = z.infer; + +export type LoadSessionResponse = z.infer; + +export type StopReason = z.infer; + +export type PromptResponse = z.infer; + +export type ToolCallLocation = z.infer; + +export type PlanEntry = z.infer; + +export type PermissionOption = z.infer; + +export type Annotations = z.infer; + +export type RequestPermissionResponse = z.infer< + typeof requestPermissionResponseSchema +>; + +export type FileSystemCapability = z.infer; + +export type EnvVariable = z.infer; + +export type McpServer = z.infer; + +export type AgentCapabilities = z.infer; + +export type AuthMethod = z.infer; + +export type ClientResponse = z.infer; + +export type ClientNotification = z.infer; + +export type EmbeddedResourceResource = z.infer< + typeof embeddedResourceResourceSchema +>; + +export type NewSessionRequest = z.infer; + +export type LoadSessionRequest = z.infer; + +export type InitializeResponse = z.infer; + +export type ContentBlock = z.infer; + +export type ToolCallContent = z.infer; + +export type ToolCall = z.infer; + +export type ClientCapabilities = z.infer; + +export type PromptRequest = z.infer; + +export type SessionUpdate = z.infer; + +export type AgentResponse = z.infer; + +export type RequestPermissionRequest = z.infer< + typeof requestPermissionRequestSchema +>; + +export type InitializeRequest = z.infer; + +export type SessionNotification = z.infer; + +export type ClientRequest = z.infer; + +export type AgentRequest = z.infer; + +export type AgentNotification = z.infer; + +export const writeTextFileRequestSchema = z.object({ + content: z.string(), + path: z.string(), + sessionId: z.string(), +}); + +export const readTextFileRequestSchema = z.object({ + limit: z.number().optional().nullable(), + line: z.number().optional().nullable(), + path: z.string(), + sessionId: z.string(), +}); + +export const permissionOptionKindSchema = z.union([ + z.literal('allow_once'), + z.literal('allow_always'), + z.literal('reject_once'), + z.literal('reject_always'), +]); + +export const roleSchema = z.union([z.literal('assistant'), z.literal('user')]); + +export const textResourceContentsSchema = z.object({ + mimeType: z.string().optional().nullable(), + text: z.string(), + uri: z.string(), +}); + +export const blobResourceContentsSchema = z.object({ + blob: z.string(), + mimeType: z.string().optional().nullable(), + uri: z.string(), +}); + +export const toolKindSchema = z.union([ + z.literal('read'), + z.literal('edit'), + z.literal('delete'), + z.literal('move'), + z.literal('search'), + z.literal('execute'), + z.literal('think'), + z.literal('fetch'), + z.literal('other'), +]); + +export const toolCallStatusSchema = z.union([ + z.literal('pending'), + z.literal('in_progress'), + z.literal('completed'), + z.literal('failed'), +]); + +export const writeTextFileResponseSchema = z.null(); + +export const readTextFileResponseSchema = z.object({ + content: z.string(), +}); + +export const requestPermissionOutcomeSchema = z.union([ + z.object({ + outcome: z.literal('cancelled'), + }), + z.object({ + optionId: z.string(), + outcome: z.literal('selected'), + }), +]); + +export const cancelNotificationSchema = z.object({ + sessionId: z.string(), +}); + +export const authenticateRequestSchema = z.object({ + methodId: z.string(), +}); + +export const authenticateResponseSchema = z.null(); + +export const newSessionResponseSchema = z.object({ + sessionId: z.string(), +}); + +export const loadSessionResponseSchema = z.null(); + +export const stopReasonSchema = z.union([ + z.literal('end_turn'), + z.literal('max_tokens'), + z.literal('refusal'), + z.literal('cancelled'), +]); + +export const promptResponseSchema = z.object({ + stopReason: stopReasonSchema, +}); + +export const toolCallLocationSchema = z.object({ + line: z.number().optional().nullable(), + path: z.string(), +}); + +export const planEntrySchema = z.object({ + content: z.string(), + priority: z.union([z.literal('high'), z.literal('medium'), z.literal('low')]), + status: z.union([ + z.literal('pending'), + z.literal('in_progress'), + z.literal('completed'), + ]), +}); + +export const permissionOptionSchema = z.object({ + kind: permissionOptionKindSchema, + name: z.string(), + optionId: z.string(), +}); + +export const annotationsSchema = z.object({ + audience: z.array(roleSchema).optional().nullable(), + lastModified: z.string().optional().nullable(), + priority: z.number().optional().nullable(), +}); + +export const requestPermissionResponseSchema = z.object({ + outcome: requestPermissionOutcomeSchema, +}); + +export const fileSystemCapabilitySchema = z.object({ + readTextFile: z.boolean(), + writeTextFile: z.boolean(), +}); + +export const envVariableSchema = z.object({ + name: z.string(), + value: z.string(), +}); + +export const mcpServerSchema = z.object({ + args: z.array(z.string()), + command: z.string(), + env: z.array(envVariableSchema), + name: z.string(), +}); + +export const agentCapabilitiesSchema = z.object({ + loadSession: z.boolean(), +}); + +export const authMethodSchema = z.object({ + description: z.string().nullable(), + id: z.string(), + name: z.string(), +}); + +export const clientResponseSchema = z.union([ + writeTextFileResponseSchema, + readTextFileResponseSchema, + requestPermissionResponseSchema, +]); + +export const clientNotificationSchema = cancelNotificationSchema; + +export const embeddedResourceResourceSchema = z.union([ + textResourceContentsSchema, + blobResourceContentsSchema, +]); + +export const newSessionRequestSchema = z.object({ + cwd: z.string(), + mcpServers: z.array(mcpServerSchema), +}); + +export const loadSessionRequestSchema = z.object({ + cwd: z.string(), + mcpServers: z.array(mcpServerSchema), + sessionId: z.string(), +}); + +export const initializeResponseSchema = z.object({ + agentCapabilities: agentCapabilitiesSchema, + authMethods: z.array(authMethodSchema), + protocolVersion: z.number(), +}); + +export const contentBlockSchema = z.union([ + z.object({ + annotations: annotationsSchema.optional().nullable(), + text: z.string(), + type: z.literal('text'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + data: z.string(), + mimeType: z.string(), + type: z.literal('image'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + data: z.string(), + mimeType: z.string(), + type: z.literal('audio'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + description: z.string().optional().nullable(), + mimeType: z.string().optional().nullable(), + name: z.string(), + size: z.number().optional().nullable(), + title: z.string().optional().nullable(), + type: z.literal('resource_link'), + uri: z.string(), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + resource: embeddedResourceResourceSchema, + type: z.literal('resource'), + }), +]); + +export const toolCallContentSchema = z.union([ + z.object({ + content: contentBlockSchema, + type: z.literal('content'), + }), + z.object({ + newText: z.string(), + oldText: z.string().nullable(), + path: z.string(), + type: z.literal('diff'), + }), +]); + +export const toolCallSchema = z.object({ + content: z.array(toolCallContentSchema).optional(), + kind: toolKindSchema, + locations: z.array(toolCallLocationSchema).optional(), + rawInput: z.unknown().optional(), + status: toolCallStatusSchema, + title: z.string(), + toolCallId: z.string(), +}); + +export const clientCapabilitiesSchema = z.object({ + fs: fileSystemCapabilitySchema, +}); + +export const promptRequestSchema = z.object({ + prompt: z.array(contentBlockSchema), + sessionId: z.string(), +}); + +export const sessionUpdateSchema = z.union([ + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('user_message_chunk'), + }), + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('agent_message_chunk'), + }), + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('agent_thought_chunk'), + }), + z.object({ + content: z.array(toolCallContentSchema).optional(), + kind: toolKindSchema, + locations: z.array(toolCallLocationSchema).optional(), + rawInput: z.unknown().optional(), + sessionUpdate: z.literal('tool_call'), + status: toolCallStatusSchema, + title: z.string(), + toolCallId: z.string(), + }), + z.object({ + content: z.array(toolCallContentSchema).optional().nullable(), + kind: toolKindSchema.optional().nullable(), + locations: z.array(toolCallLocationSchema).optional().nullable(), + rawInput: z.unknown().optional(), + sessionUpdate: z.literal('tool_call_update'), + status: toolCallStatusSchema.optional().nullable(), + title: z.string().optional().nullable(), + toolCallId: z.string(), + }), + z.object({ + entries: z.array(planEntrySchema), + sessionUpdate: z.literal('plan'), + }), +]); + +export const agentResponseSchema = z.union([ + initializeResponseSchema, + authenticateResponseSchema, + newSessionResponseSchema, + loadSessionResponseSchema, + promptResponseSchema, +]); + +export const requestPermissionRequestSchema = z.object({ + options: z.array(permissionOptionSchema), + sessionId: z.string(), + toolCall: toolCallSchema, +}); + +export const initializeRequestSchema = z.object({ + clientCapabilities: clientCapabilitiesSchema, + protocolVersion: z.number(), +}); + +export const sessionNotificationSchema = z.object({ + sessionId: z.string(), + update: sessionUpdateSchema, +}); + +export const clientRequestSchema = z.union([ + writeTextFileRequestSchema, + readTextFileRequestSchema, + requestPermissionRequestSchema, +]); + +export const agentRequestSchema = z.union([ + initializeRequestSchema, + authenticateRequestSchema, + newSessionRequestSchema, + loadSessionRequestSchema, + promptRequestSchema, +]); + +export const agentNotificationSchema = sessionNotificationSchema; diff --git a/packages/cli/src/acp/acpPeer.ts b/packages/cli/src/zed-integration/zedIntegration.ts similarity index 53% rename from packages/cli/src/acp/acpPeer.ts rename to packages/cli/src/zed-integration/zedIntegration.ts index 18e37f35..f0005f31 100644 --- a/packages/cli/src/acp/acpPeer.ts +++ b/packages/cli/src/zed-integration/zedIntegration.ts @@ -4,33 +4,43 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { WritableStream, ReadableStream } from 'node:stream/web'; +import { ReadableStream, WritableStream } from 'node:stream/web'; +import { Content, FunctionCall, Part, PartListUnion } from '@google/genai'; import { AuthType, + clearCachedCredentialFile, Config, - GeminiChat, - ToolRegistry, - logToolCall, - ToolResult, convertToFunctionResponse, + GeminiChat, + getErrorMessage, + getErrorStatus, + isNodeError, + isWithinRoot, + logToolCall, + MCPServerConfig, ToolCallConfirmationDetails, ToolConfirmationOutcome, - clearCachedCredentialFile, - isNodeError, - getErrorMessage, - isWithinRoot, - getErrorStatus, + ToolRegistry, + ToolResult, } from '@qwen-code/qwen-code-core'; -import * as acp from './acp.js'; -import { Agent } from './acp.js'; -import { Readable, Writable } from 'node:stream'; -import { Content, Part, FunctionCall, PartListUnion } from '@google/genai'; -import { LoadedSettings, SettingScope } from '../config/settings.js'; import * as fs from 'fs/promises'; +import { Readable, Writable } from 'node:stream'; import * as path from 'path'; +import { z } from 'zod'; +import { LoadedSettings, SettingScope } from '../config/settings.js'; +import * as acp from './acp.js'; -export async function runAcpPeer(config: Config, settings: LoadedSettings) { +import { randomUUID } from 'crypto'; +import { CliArgs, loadCliConfig } from '../config/config.js'; +import { Extension } from '../config/extension.js'; + +export async function runZedIntegration( + config: Config, + settings: LoadedSettings, + extensions: Extension[], + argv: CliArgs, +) { const stdout = Writable.toWeb(process.stdout) as WritableStream; const stdin = Readable.toWeb(process.stdin) as ReadableStream; @@ -40,76 +50,176 @@ export async function runAcpPeer(config: Config, settings: LoadedSettings) { console.info = console.error; console.debug = console.error; - new acp.ClientConnection( - (client: acp.Client) => new GeminiAgent(config, settings, client), + new acp.AgentSideConnection( + (client: acp.Client) => + new GeminiAgent(config, settings, extensions, argv, client), stdout, stdin, ); } -class GeminiAgent implements Agent { - chat?: GeminiChat; - pendingSend?: AbortController; +class GeminiAgent { + private sessions: Map = new Map(); constructor( private config: Config, private settings: LoadedSettings, + private extensions: Extension[], + private argv: CliArgs, private client: acp.Client, ) {} - async initialize(_: acp.InitializeParams): Promise { + async initialize( + _args: acp.InitializeRequest, + ): Promise { + const authMethods = [ + { + id: AuthType.LOGIN_WITH_GOOGLE, + name: 'Log in with Google', + description: null, + }, + { + id: AuthType.USE_GEMINI, + name: 'Use Gemini API key', + description: + 'Requires setting the `GEMINI_API_KEY` environment variable', + }, + { + id: AuthType.USE_VERTEX_AI, + name: 'Vertex AI', + description: null, + }, + ]; + + return { + protocolVersion: acp.PROTOCOL_VERSION, + authMethods, + agentCapabilities: { + loadSession: false, + }, + }; + } + + async authenticate({ methodId }: acp.AuthenticateRequest): Promise { + const method = z.nativeEnum(AuthType).parse(methodId); + + await clearCachedCredentialFile(); + await this.config.refreshAuth(method); + this.settings.setValue(SettingScope.User, 'selectedAuthType', method); + } + + async newSession({ + cwd, + mcpServers, + }: acp.NewSessionRequest): Promise { + const sessionId = randomUUID(); + const config = await this.newSessionConfig(sessionId, cwd, mcpServers); + let isAuthenticated = false; if (this.settings.merged.selectedAuthType) { try { - await this.config.refreshAuth(this.settings.merged.selectedAuthType); + await config.refreshAuth(this.settings.merged.selectedAuthType); isAuthenticated = true; - } catch (error) { - console.error('Failed to refresh auth:', error); + } catch (e) { + console.error(`Authentication failed: ${e}`); } } - return { protocolVersion: acp.LATEST_PROTOCOL_VERSION, isAuthenticated }; + + if (!isAuthenticated) { + throw acp.RequestError.authRequired(); + } + + const geminiClient = config.getGeminiClient(); + const chat = await geminiClient.startChat(); + const session = new Session(sessionId, chat, config, this.client); + this.sessions.set(sessionId, session); + + return { + sessionId, + }; } - async authenticate(): Promise { - await clearCachedCredentialFile(); - await this.config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE); - this.settings.setValue( - SettingScope.User, - 'selectedAuthType', - AuthType.LOGIN_WITH_GOOGLE, + async newSessionConfig( + sessionId: string, + cwd: string, + mcpServers: acp.McpServer[], + ): Promise { + const mergedMcpServers = { ...this.settings.merged.mcpServers }; + + for (const { command, args, env: rawEnv, name } of mcpServers) { + const env: Record = {}; + for (const { name: envName, value } of rawEnv) { + env[envName] = value; + } + mergedMcpServers[name] = new MCPServerConfig(command, args, env, cwd); + } + + const settings = { ...this.settings.merged, mcpServers: mergedMcpServers }; + + const config = await loadCliConfig( + settings, + this.extensions, + sessionId, + this.argv, + cwd, ); + + await config.initialize(); + return config; } - async cancelSendMessage(): Promise { - if (!this.pendingSend) { + async cancel(params: acp.CancelNotification): Promise { + const session = this.sessions.get(params.sessionId); + if (!session) { + throw new Error(`Session not found: ${params.sessionId}`); + } + await session.cancelPendingPrompt(); + } + + async prompt(params: acp.PromptRequest): Promise { + const session = this.sessions.get(params.sessionId); + if (!session) { + throw new Error(`Session not found: ${params.sessionId}`); + } + return session.prompt(params); + } +} + +class Session { + private pendingPrompt: AbortController | null = null; + + constructor( + private readonly id: string, + private readonly chat: GeminiChat, + private readonly config: Config, + private readonly client: acp.Client, + ) {} + + async cancelPendingPrompt(): Promise { + if (!this.pendingPrompt) { throw new Error('Not currently generating'); } - this.pendingSend.abort(); - delete this.pendingSend; + this.pendingPrompt.abort(); + this.pendingPrompt = null; } - async sendUserMessage(params: acp.SendUserMessageParams): Promise { - this.pendingSend?.abort(); + async prompt(params: acp.PromptRequest): Promise { + this.pendingPrompt?.abort(); const pendingSend = new AbortController(); - this.pendingSend = pendingSend; - - if (!this.chat) { - const geminiClient = this.config.getGeminiClient(); - this.chat = await geminiClient.startChat(); - } + this.pendingPrompt = pendingSend; const promptId = Math.random().toString(16).slice(2); - const chat = this.chat!; - const toolRegistry: ToolRegistry = await this.config.getToolRegistry(); - const parts = await this.#resolveUserMessage(params, pendingSend.signal); + const chat = this.chat; + + const parts = await this.#resolvePrompt(params.prompt, pendingSend.signal); let nextMessage: Content | null = { role: 'user', parts }; while (nextMessage !== null) { if (pendingSend.signal.aborted) { chat.addHistory(nextMessage); - return; + return { stopReason: 'cancelled' }; } const functionCalls: FunctionCall[] = []; @@ -120,11 +230,6 @@ class GeminiAgent implements Agent { message: nextMessage?.parts ?? [], config: { abortSignal: pendingSend.signal, - tools: [ - { - functionDeclarations: toolRegistry.getFunctionDeclarations(), - }, - ], }, }, promptId, @@ -133,7 +238,7 @@ class GeminiAgent implements Agent { for await (const resp of responseStream) { if (pendingSend.signal.aborted) { - return; + return { stopReason: 'cancelled' }; } if (resp.candidates && resp.candidates.length > 0) { @@ -143,10 +248,16 @@ class GeminiAgent implements Agent { continue; } - this.client.streamAssistantMessageChunk({ - chunk: part.thought - ? { thought: part.text } - : { text: part.text }, + const content: acp.ContentBlock = { + type: 'text', + text: part.text, + }; + + this.sendUpdate({ + sessionUpdate: part.thought + ? 'agent_thought_chunk' + : 'agent_message_chunk', + content, }); } } @@ -170,11 +281,7 @@ class GeminiAgent implements Agent { const toolResponseParts: Part[] = []; for (const fc of functionCalls) { - const response = await this.#runTool( - pendingSend.signal, - promptId, - fc, - ); + const response = await this.runTool(pendingSend.signal, promptId, fc); const parts = Array.isArray(response) ? response : [response]; @@ -190,9 +297,20 @@ class GeminiAgent implements Agent { nextMessage = { role: 'user', parts: toolResponseParts }; } } + + return { stopReason: 'end_turn' }; } - async #runTool( + private async sendUpdate(update: acp.SessionUpdate): Promise { + const params: acp.SessionNotification = { + sessionId: this.id, + update, + }; + + await this.client.sessionUpdate(params); + } + + private async runTool( abortSignal: AbortSignal, promptId: string, fc: FunctionCall, @@ -239,68 +357,82 @@ class GeminiAgent implements Agent { ); } - let toolCallId: number | undefined = undefined; - try { - const invocation = tool.build(args); - const confirmationDetails = - await invocation.shouldConfirmExecute(abortSignal); - if (confirmationDetails) { - let content: acp.ToolCallContent | null = null; - if (confirmationDetails.type === 'edit') { - content = { - type: 'diff', - path: confirmationDetails.fileName, - oldText: confirmationDetails.originalContent, - newText: confirmationDetails.newContent, - }; - } + const invocation = tool.build(args); + const confirmationDetails = + await invocation.shouldConfirmExecute(abortSignal); - const result = await this.client.requestToolCallConfirmation({ - label: invocation.getDescription(), - icon: tool.icon, - content, - confirmation: toAcpToolCallConfirmation(confirmationDetails), - locations: invocation.toolLocations(), + if (confirmationDetails) { + const content: acp.ToolCallContent[] = []; + + if (confirmationDetails.type === 'edit') { + content.push({ + type: 'diff', + path: confirmationDetails.fileName, + oldText: confirmationDetails.originalContent, + newText: confirmationDetails.newContent, }); - - await confirmationDetails.onConfirm(toToolCallOutcome(result.outcome)); - switch (result.outcome) { - case 'reject': - return errorResponse( - new Error(`Tool "${fc.name}" not allowed to run by the user.`), - ); - - case 'cancel': - return errorResponse( - new Error(`Tool "${fc.name}" was canceled by the user.`), - ); - case 'allow': - case 'alwaysAllow': - case 'alwaysAllowMcpServer': - case 'alwaysAllowTool': - break; - default: { - const resultOutcome: never = result.outcome; - throw new Error(`Unexpected: ${resultOutcome}`); - } - } - toolCallId = result.id; - } else { - const result = await this.client.pushToolCall({ - icon: tool.icon, - label: invocation.getDescription(), - locations: invocation.toolLocations(), - }); - toolCallId = result.id; } - const toolResult: ToolResult = await invocation.execute(abortSignal); - const toolCallContent = toToolCallContent(toolResult); + const params: acp.RequestPermissionRequest = { + sessionId: this.id, + options: toPermissionOptions(confirmationDetails), + toolCall: { + toolCallId: callId, + status: 'pending', + title: invocation.getDescription(), + content, + locations: invocation.toolLocations(), + kind: tool.kind, + }, + }; - await this.client.updateToolCall({ - toolCallId, - status: 'finished', - content: toolCallContent, + const output = await this.client.requestPermission(params); + const outcome = + output.outcome.outcome === 'cancelled' + ? ToolConfirmationOutcome.Cancel + : z + .nativeEnum(ToolConfirmationOutcome) + .parse(output.outcome.optionId); + + await confirmationDetails.onConfirm(outcome); + + switch (outcome) { + case ToolConfirmationOutcome.Cancel: + return errorResponse( + new Error(`Tool "${fc.name}" was canceled by the user.`), + ); + case ToolConfirmationOutcome.ProceedOnce: + case ToolConfirmationOutcome.ProceedAlways: + case ToolConfirmationOutcome.ProceedAlwaysServer: + case ToolConfirmationOutcome.ProceedAlwaysTool: + case ToolConfirmationOutcome.ModifyWithEditor: + break; + default: { + const resultOutcome: never = outcome; + throw new Error(`Unexpected: ${resultOutcome}`); + } + } + } else { + await this.sendUpdate({ + sessionUpdate: 'tool_call', + toolCallId: callId, + status: 'in_progress', + title: invocation.getDescription(), + content: [], + locations: invocation.toolLocations(), + kind: tool.kind, + }); + } + + try { + const toolResult: ToolResult = await invocation.execute(abortSignal); + const content = toToolCallContent(toolResult); + + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'completed', + content: content ? [content] : [], }); const durationMs = Date.now() - startTime; @@ -317,31 +449,55 @@ class GeminiAgent implements Agent { return convertToFunctionResponse(fc.name, callId, toolResult.llmContent); } catch (e) { const error = e instanceof Error ? e : new Error(String(e)); - if (toolCallId) { - await this.client.updateToolCall({ - toolCallId, - status: 'error', - content: { type: 'markdown', markdown: error.message }, - }); - } + + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'failed', + content: [ + { type: 'content', content: { type: 'text', text: error.message } }, + ], + }); + return errorResponse(error); } } - async #resolveUserMessage( - message: acp.SendUserMessageParams, + async #resolvePrompt( + message: acp.ContentBlock[], abortSignal: AbortSignal, ): Promise { - const atPathCommandParts = message.chunks.filter((part) => 'path' in part); + const parts = message.map((part) => { + switch (part.type) { + case 'text': + return { text: part.text }; + case 'resource_link': + return { + fileData: { + mimeData: part.mimeType, + name: part.name, + fileUri: part.uri, + }, + }; + case 'resource': { + return { + fileData: { + mimeData: part.resource.mimeType, + name: part.resource.uri, + fileUri: part.resource.uri, + }, + }; + } + default: { + throw new Error(`Unexpected chunk type: '${part.type}'`); + } + } + }); + + const atPathCommandParts = parts.filter((part) => 'fileData' in part); if (atPathCommandParts.length === 0) { - return message.chunks.map((chunk) => { - if ('text' in chunk) { - return { text: chunk.text }; - } else { - throw new Error('Unexpected chunk type'); - } - }); + return parts; } // Get centralized file discovery service @@ -362,8 +518,7 @@ class GeminiAgent implements Agent { } for (const atPathPart of atPathCommandParts) { - const pathName = atPathPart.path; - + const pathName = atPathPart.fileData!.fileUri; // Check if path should be ignored by git if (fileDiscovery.shouldGitIgnoreFile(pathName)) { ignoredPaths.push(pathName); @@ -373,10 +528,8 @@ class GeminiAgent implements Agent { console.warn(`Path ${pathName} is ${reason}.`); continue; } - let currentPathSpec = pathName; let resolvedSuccessfully = false; - try { const absolutePath = path.resolve(this.config.getTargetDir(), pathName); if (isWithinRoot(absolutePath, this.config.getTargetDir())) { @@ -385,24 +538,22 @@ class GeminiAgent implements Agent { currentPathSpec = pathName.endsWith('/') ? `${pathName}**` : `${pathName}/**`; - this.#debug( + this.debug( `Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`, ); } else { - this.#debug( - `Path ${pathName} resolved to file: ${currentPathSpec}`, - ); + this.debug(`Path ${pathName} resolved to file: ${currentPathSpec}`); } resolvedSuccessfully = true; } else { - this.#debug( + this.debug( `Path ${pathName} is outside the project directory. Skipping.`, ); } } catch (error) { if (isNodeError(error) && error.code === 'ENOENT') { if (this.config.getEnableRecursiveFileSearch() && globTool) { - this.#debug( + this.debug( `Path ${pathName} not found directly, attempting glob search.`, ); try { @@ -426,17 +577,17 @@ class GeminiAgent implements Agent { this.config.getTargetDir(), firstMatchAbsolute, ); - this.#debug( + this.debug( `Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`, ); resolvedSuccessfully = true; } else { - this.#debug( + this.debug( `Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`, ); } } else { - this.#debug( + this.debug( `Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`, ); } @@ -446,7 +597,7 @@ class GeminiAgent implements Agent { ); } } else { - this.#debug( + this.debug( `Glob tool not found. Path ${pathName} will be skipped.`, ); } @@ -456,23 +607,22 @@ class GeminiAgent implements Agent { ); } } - if (resolvedSuccessfully) { pathSpecsToRead.push(currentPathSpec); atPathToResolvedSpecMap.set(pathName, currentPathSpec); contentLabelsForDisplay.push(pathName); } } - // Construct the initial part of the query for the LLM let initialQueryText = ''; - for (let i = 0; i < message.chunks.length; i++) { - const chunk = message.chunks[i]; + for (let i = 0; i < parts.length; i++) { + const chunk = parts[i]; if ('text' in chunk) { initialQueryText += chunk.text; } else { // type === 'atPath' - const resolvedSpec = atPathToResolvedSpecMap.get(chunk.path); + const resolvedSpec = + chunk.fileData && atPathToResolvedSpecMap.get(chunk.fileData.fileUri); if ( i > 0 && initialQueryText.length > 0 && @@ -480,10 +630,11 @@ class GeminiAgent implements Agent { resolvedSpec ) { // Add space if previous part was text and didn't end with space, or if previous was @path - const prevPart = message.chunks[i - 1]; + const prevPart = parts[i - 1]; if ( 'text' in prevPart || - ('path' in prevPart && atPathToResolvedSpecMap.has(prevPart.path)) + ('fileData' in prevPart && + atPathToResolvedSpecMap.has(prevPart.fileData!.fileUri)) ) { initialQueryText += ' '; } @@ -497,56 +648,64 @@ class GeminiAgent implements Agent { i > 0 && initialQueryText.length > 0 && !initialQueryText.endsWith(' ') && - !chunk.path.startsWith(' ') + !chunk.fileData?.fileUri.startsWith(' ') ) { initialQueryText += ' '; } - initialQueryText += `@${chunk.path}`; + if (chunk.fileData?.fileUri) { + initialQueryText += `@${chunk.fileData.fileUri}`; + } } } } initialQueryText = initialQueryText.trim(); - // Inform user about ignored paths if (ignoredPaths.length > 0) { const ignoreType = respectGitIgnore ? 'git-ignored' : 'custom-ignored'; - this.#debug( + this.debug( `Ignored ${ignoredPaths.length} ${ignoreType} files: ${ignoredPaths.join(', ')}`, ); } - // Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText if (pathSpecsToRead.length === 0) { console.warn('No valid file paths found in @ commands to read.'); return [{ text: initialQueryText }]; } - const processedQueryParts: Part[] = [{ text: initialQueryText }]; - const toolArgs = { paths: pathSpecsToRead, respectGitIgnore, // Use configuration setting }; - let toolCallId: number | undefined = undefined; + const callId = `${readManyFilesTool.name}-${Date.now()}`; + try { const invocation = readManyFilesTool.build(toolArgs); - const toolCall = await this.client.pushToolCall({ - icon: readManyFilesTool.icon, - label: invocation.getDescription(), - }); - toolCallId = toolCall.id; - const result = await invocation.execute(abortSignal); - const content = toToolCallContent(result) || { - type: 'markdown', - markdown: `Successfully read: ${contentLabelsForDisplay.join(', ')}`, - }; - await this.client.updateToolCall({ - toolCallId: toolCall.id, - status: 'finished', - content, + + await this.sendUpdate({ + sessionUpdate: 'tool_call', + toolCallId: callId, + status: 'in_progress', + title: invocation.getDescription(), + content: [], + locations: invocation.toolLocations(), + kind: readManyFilesTool.kind, }); + const result = await invocation.execute(abortSignal); + const content = toToolCallContent(result) || { + type: 'content', + content: { + type: 'text', + text: `Successfully read: ${contentLabelsForDisplay.join(', ')}`, + }, + }; + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'completed', + content: content ? [content] : [], + }); if (Array.isArray(result.llmContent)) { const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/; processedQueryParts.push({ @@ -576,24 +735,28 @@ class GeminiAgent implements Agent { 'read_many_files tool returned no content or empty content.', ); } - return processedQueryParts; } catch (error: unknown) { - if (toolCallId) { - await this.client.updateToolCall({ - toolCallId, - status: 'error', - content: { - type: 'markdown', - markdown: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`, + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'failed', + content: [ + { + type: 'content', + content: { + type: 'text', + text: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`, + }, }, - }); - } + ], + }); + throw error; } } - #debug(msg: string) { + debug(msg: string) { if (this.config.getDebugMode()) { console.warn(msg); } @@ -604,8 +767,8 @@ function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { if (toolResult.returnDisplay) { if (typeof toolResult.returnDisplay === 'string') { return { - type: 'markdown', - markdown: toolResult.returnDisplay, + type: 'content', + content: { type: 'text', text: toolResult.returnDisplay }, }; } else { return { @@ -620,57 +783,66 @@ function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { } } -function toAcpToolCallConfirmation( - confirmationDetails: ToolCallConfirmationDetails, -): acp.ToolCallConfirmation { - switch (confirmationDetails.type) { - case 'edit': - return { type: 'edit' }; - case 'exec': - return { - type: 'execute', - rootCommand: confirmationDetails.rootCommand, - command: confirmationDetails.command, - }; - case 'mcp': - return { - type: 'mcp', - serverName: confirmationDetails.serverName, - toolName: confirmationDetails.toolName, - toolDisplayName: confirmationDetails.toolDisplayName, - }; - case 'info': - return { - type: 'fetch', - urls: confirmationDetails.urls || [], - description: confirmationDetails.urls?.length - ? null - : confirmationDetails.prompt, - }; - default: { - const unreachable: never = confirmationDetails; - throw new Error(`Unexpected: ${unreachable}`); - } - } -} +const basicPermissionOptions = [ + { + optionId: ToolConfirmationOutcome.ProceedOnce, + name: 'Allow', + kind: 'allow_once', + }, + { + optionId: ToolConfirmationOutcome.Cancel, + name: 'Reject', + kind: 'reject_once', + }, +] as const; -function toToolCallOutcome( - outcome: acp.ToolCallConfirmationOutcome, -): ToolConfirmationOutcome { - switch (outcome) { - case 'allow': - return ToolConfirmationOutcome.ProceedOnce; - case 'alwaysAllow': - return ToolConfirmationOutcome.ProceedAlways; - case 'alwaysAllowMcpServer': - return ToolConfirmationOutcome.ProceedAlwaysServer; - case 'alwaysAllowTool': - return ToolConfirmationOutcome.ProceedAlwaysTool; - case 'reject': - case 'cancel': - return ToolConfirmationOutcome.Cancel; +function toPermissionOptions( + confirmation: ToolCallConfirmationDetails, +): acp.PermissionOption[] { + switch (confirmation.type) { + case 'edit': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: 'Allow All Edits', + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'exec': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: `Always Allow ${confirmation.rootCommand}`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'mcp': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlwaysServer, + name: `Always Allow ${confirmation.serverName}`, + kind: 'allow_always', + }, + { + optionId: ToolConfirmationOutcome.ProceedAlwaysTool, + name: `Always Allow ${confirmation.toolName}`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'info': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: `Always Allow`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; default: { - const unreachable: never = outcome; + const unreachable: never = confirmation; throw new Error(`Unexpected: ${unreachable}`); } } diff --git a/packages/core/index.ts b/packages/core/index.ts index 65a214ae..7b75b365 100644 --- a/packages/core/index.ts +++ b/packages/core/index.ts @@ -15,3 +15,4 @@ export { IdeConnectionEvent, IdeConnectionType, } from './src/telemetry/types.js'; +export { makeFakeConfig } from './src/test-utils/config.js'; diff --git a/packages/core/package.json b/packages/core/package.json index 7b84fd01..3363a669 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -20,7 +20,7 @@ "dist" ], "dependencies": { - "@google/genai": "1.9.0", + "@google/genai": "1.13.0", "@modelcontextprotocol/sdk": "^1.11.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0", diff --git a/packages/core/src/code_assist/setup.test.ts b/packages/core/src/code_assist/setup.test.ts index c1260e3f..cba051dd 100644 --- a/packages/core/src/code_assist/setup.test.ts +++ b/packages/core/src/code_assist/setup.test.ts @@ -16,9 +16,17 @@ const mockPaidTier: GeminiUserTier = { id: UserTierId.STANDARD, name: 'paid', description: 'Paid tier', + isDefault: true, }; -describe('setupUser', () => { +const mockFreeTier: GeminiUserTier = { + id: UserTierId.FREE, + name: 'free', + description: 'Free tier', + isDefault: true, +}; + +describe('setupUser for existing user', () => { let mockLoad: ReturnType; let mockOnboardUser: ReturnType; @@ -42,7 +50,7 @@ describe('setupUser', () => { ); }); - it('should use GOOGLE_CLOUD_PROJECT when set', async () => { + it('should use GOOGLE_CLOUD_PROJECT when set and project from server is undefined', async () => { process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; mockLoad.mockResolvedValue({ currentTier: mockPaidTier, @@ -57,8 +65,8 @@ describe('setupUser', () => { ); }); - it('should treat empty GOOGLE_CLOUD_PROJECT as undefined and use project from server', async () => { - process.env.GOOGLE_CLOUD_PROJECT = ''; + it('should ignore GOOGLE_CLOUD_PROJECT when project from server is set', async () => { + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; mockLoad.mockResolvedValue({ cloudaicompanionProject: 'server-project', currentTier: mockPaidTier, @@ -66,7 +74,7 @@ describe('setupUser', () => { const projectId = await setupUser({} as OAuth2Client); expect(CodeAssistServer).toHaveBeenCalledWith( {}, - undefined, + 'test-project', {}, '', undefined, @@ -89,3 +97,119 @@ describe('setupUser', () => { ); }); }); + +describe('setupUser for new user', () => { + let mockLoad: ReturnType; + let mockOnboardUser: ReturnType; + + beforeEach(() => { + vi.resetAllMocks(); + mockLoad = vi.fn(); + mockOnboardUser = vi.fn().mockResolvedValue({ + done: true, + response: { + cloudaicompanionProject: { + id: 'server-project', + }, + }, + }); + vi.mocked(CodeAssistServer).mockImplementation( + () => + ({ + loadCodeAssist: mockLoad, + onboardUser: mockOnboardUser, + }) as unknown as CodeAssistServer, + ); + }); + + it('should use GOOGLE_CLOUD_PROJECT when set and onboard a new paid user', async () => { + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + mockLoad.mockResolvedValue({ + allowedTiers: [mockPaidTier], + }); + const userData = await setupUser({} as OAuth2Client); + expect(CodeAssistServer).toHaveBeenCalledWith( + {}, + 'test-project', + {}, + '', + undefined, + ); + expect(mockLoad).toHaveBeenCalled(); + expect(mockOnboardUser).toHaveBeenCalledWith({ + tierId: 'standard-tier', + cloudaicompanionProject: 'test-project', + metadata: { + ideType: 'IDE_UNSPECIFIED', + platform: 'PLATFORM_UNSPECIFIED', + pluginType: 'GEMINI', + duetProject: 'test-project', + }, + }); + expect(userData).toEqual({ + projectId: 'server-project', + userTier: 'standard-tier', + }); + }); + + it('should onboard a new free user when GOOGLE_CLOUD_PROJECT is not set', async () => { + delete process.env.GOOGLE_CLOUD_PROJECT; + mockLoad.mockResolvedValue({ + allowedTiers: [mockFreeTier], + }); + const userData = await setupUser({} as OAuth2Client); + expect(CodeAssistServer).toHaveBeenCalledWith( + {}, + undefined, + {}, + '', + undefined, + ); + expect(mockLoad).toHaveBeenCalled(); + expect(mockOnboardUser).toHaveBeenCalledWith({ + tierId: 'free-tier', + cloudaicompanionProject: undefined, + metadata: { + ideType: 'IDE_UNSPECIFIED', + platform: 'PLATFORM_UNSPECIFIED', + pluginType: 'GEMINI', + }, + }); + expect(userData).toEqual({ + projectId: 'server-project', + userTier: 'free-tier', + }); + }); + + it('should use GOOGLE_CLOUD_PROJECT when onboard response has no project ID', async () => { + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + mockLoad.mockResolvedValue({ + allowedTiers: [mockPaidTier], + }); + mockOnboardUser.mockResolvedValue({ + done: true, + response: { + cloudaicompanionProject: undefined, + }, + }); + const userData = await setupUser({} as OAuth2Client); + expect(userData).toEqual({ + projectId: 'test-project', + userTier: 'standard-tier', + }); + }); + + it('should throw ProjectIdRequiredError when no project ID is available', async () => { + delete process.env.GOOGLE_CLOUD_PROJECT; + mockLoad.mockResolvedValue({ + allowedTiers: [mockPaidTier], + }); + mockOnboardUser.mockResolvedValue({ + done: true, + response: {}, + }); + await expect(setupUser({} as OAuth2Client)).rejects.toThrow( + ProjectIdRequiredError, + ); + }); +}); diff --git a/packages/core/src/code_assist/setup.ts b/packages/core/src/code_assist/setup.ts index 02c9406c..2e460c98 100644 --- a/packages/core/src/code_assist/setup.ts +++ b/packages/core/src/code_assist/setup.ts @@ -33,32 +33,58 @@ export interface UserData { * @returns the user's actual project id */ export async function setupUser(client: OAuth2Client): Promise { - let projectId = process.env.GOOGLE_CLOUD_PROJECT || undefined; + const projectId = process.env.GOOGLE_CLOUD_PROJECT || undefined; const caServer = new CodeAssistServer(client, projectId, {}, '', undefined); - - const clientMetadata: ClientMetadata = { + const coreClientMetadata: ClientMetadata = { ideType: 'IDE_UNSPECIFIED', platform: 'PLATFORM_UNSPECIFIED', pluginType: 'GEMINI', - duetProject: projectId, }; const loadRes = await caServer.loadCodeAssist({ cloudaicompanionProject: projectId, - metadata: clientMetadata, + metadata: { + ...coreClientMetadata, + duetProject: projectId, + }, }); - if (!projectId && loadRes.cloudaicompanionProject) { - projectId = loadRes.cloudaicompanionProject; + if (loadRes.currentTier) { + if (!loadRes.cloudaicompanionProject) { + if (projectId) { + return { + projectId, + userTier: loadRes.currentTier.id, + }; + } + throw new ProjectIdRequiredError(); + } + return { + projectId: loadRes.cloudaicompanionProject, + userTier: loadRes.currentTier.id, + }; } const tier = getOnboardTier(loadRes); - const onboardReq: OnboardUserRequest = { - tierId: tier.id, - cloudaicompanionProject: projectId, - metadata: clientMetadata, - }; + let onboardReq: OnboardUserRequest; + if (tier.id === UserTierId.FREE) { + // The free tier uses a managed google cloud project. Setting a project in the `onboardUser` request causes a `Precondition Failed` error. + onboardReq = { + tierId: tier.id, + cloudaicompanionProject: undefined, + metadata: coreClientMetadata, + }; + } else { + onboardReq = { + tierId: tier.id, + cloudaicompanionProject: projectId, + metadata: { + ...coreClientMetadata, + duetProject: projectId, + }, + }; + } // Poll onboardUser until long running operation is complete. let lroRes = await caServer.onboardUser(onboardReq); @@ -67,20 +93,23 @@ export async function setupUser(client: OAuth2Client): Promise { lroRes = await caServer.onboardUser(onboardReq); } - if (!lroRes.response?.cloudaicompanionProject?.id && !projectId) { + if (!lroRes.response?.cloudaicompanionProject?.id) { + if (projectId) { + return { + projectId, + userTier: tier.id, + }; + } throw new ProjectIdRequiredError(); } return { - projectId: lroRes.response?.cloudaicompanionProject?.id || projectId!, + projectId: lroRes.response.cloudaicompanionProject.id, userTier: tier.id, }; } function getOnboardTier(res: LoadCodeAssistResponse): GeminiUserTier { - if (res.currentTier) { - return res.currentTier; - } for (const tier of res.allowedTiers || []) { if (tier.isDefault) { return tier; diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index 6d6789c7..eecfcbc1 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -4,7 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, vi, beforeEach, Mock } from 'vitest'; +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { Mock } from 'vitest'; import { Config, ConfigParameters, SandboxConfig } from './config.js'; import * as path from 'path'; import { setGeminiMdFilename as mockSetGeminiMdFilename } from '../tools/memoryTool.js'; @@ -14,10 +15,12 @@ import { } from '../telemetry/index.js'; import { AuthType, + ContentGeneratorConfig, createContentGeneratorConfig, } from '../core/contentGenerator.js'; import { GeminiClient } from '../core/client.js'; import { GitService } from '../services/gitService.js'; +import { ClearcutLogger } from '../telemetry/clearcut-logger/clearcut-logger.js'; vi.mock('fs', async (importOriginal) => { const actual = await importOriginal(); @@ -131,11 +134,16 @@ describe('Server Config (config.ts)', () => { telemetry: TELEMETRY_SETTINGS, sessionId: SESSION_ID, model: MODEL, + usageStatisticsEnabled: false, }; beforeEach(() => { // Reset mocks if necessary vi.clearAllMocks(); + vi.spyOn( + ClearcutLogger.prototype, + 'logStartSessionEvent', + ).mockImplementation(() => undefined); }); describe('initialize', () => { @@ -254,6 +262,7 @@ describe('Server Config (config.ts)', () => { // Verify that history was restored to the new client expect(mockNewClient.setHistory).toHaveBeenCalledWith( mockExistingHistory, + { stripThoughts: false }, ); }); @@ -287,6 +296,92 @@ describe('Server Config (config.ts)', () => { // Verify that setHistory was not called since there was no existing history expect(mockNewClient.setHistory).not.toHaveBeenCalled(); }); + + it('should strip thoughts when switching from GenAI to Vertex', async () => { + const config = new Config(baseParams); + const mockContentConfig = { + model: 'gemini-pro', + apiKey: 'test-key', + authType: AuthType.USE_GEMINI, + }; + ( + config as unknown as { contentGeneratorConfig: ContentGeneratorConfig } + ).contentGeneratorConfig = mockContentConfig; + + (createContentGeneratorConfig as Mock).mockReturnValue({ + ...mockContentConfig, + authType: AuthType.LOGIN_WITH_GOOGLE, + }); + + const mockExistingHistory = [ + { role: 'user', parts: [{ text: 'Hello' }] }, + ]; + const mockExistingClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue(mockExistingHistory), + }; + const mockNewClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue([]), + setHistory: vi.fn(), + initialize: vi.fn().mockResolvedValue(undefined), + }; + + ( + config as unknown as { geminiClient: typeof mockExistingClient } + ).geminiClient = mockExistingClient; + (GeminiClient as Mock).mockImplementation(() => mockNewClient); + + await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE); + + expect(mockNewClient.setHistory).toHaveBeenCalledWith( + mockExistingHistory, + { stripThoughts: true }, + ); + }); + + it('should not strip thoughts when switching from Vertex to GenAI', async () => { + const config = new Config(baseParams); + const mockContentConfig = { + model: 'gemini-pro', + apiKey: 'test-key', + authType: AuthType.LOGIN_WITH_GOOGLE, + }; + ( + config as unknown as { contentGeneratorConfig: ContentGeneratorConfig } + ).contentGeneratorConfig = mockContentConfig; + + (createContentGeneratorConfig as Mock).mockReturnValue({ + ...mockContentConfig, + authType: AuthType.USE_GEMINI, + }); + + const mockExistingHistory = [ + { role: 'user', parts: [{ text: 'Hello' }] }, + ]; + const mockExistingClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue(mockExistingHistory), + }; + const mockNewClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue([]), + setHistory: vi.fn(), + initialize: vi.fn().mockResolvedValue(undefined), + }; + + ( + config as unknown as { geminiClient: typeof mockExistingClient } + ).geminiClient = mockExistingClient; + (GeminiClient as Mock).mockImplementation(() => mockNewClient); + + await config.refreshAuth(AuthType.USE_GEMINI); + + expect(mockNewClient.setHistory).toHaveBeenCalledWith( + mockExistingHistory, + { stripThoughts: false }, + ); + }); }); it('Config constructor should store userMemory correctly', () => { @@ -384,6 +479,39 @@ describe('Server Config (config.ts)', () => { expect(fileService).toBeDefined(); }); + describe('Usage Statistics', () => { + it('defaults usage statistics to enabled if not specified', () => { + const config = new Config({ + ...baseParams, + usageStatisticsEnabled: undefined, + }); + + expect(config.getUsageStatisticsEnabled()).toBe(true); + }); + + it.each([{ enabled: true }, { enabled: false }])( + 'sets usage statistics based on the provided value (enabled: $enabled)', + ({ enabled }) => { + const config = new Config({ + ...baseParams, + usageStatisticsEnabled: enabled, + }); + expect(config.getUsageStatisticsEnabled()).toBe(enabled); + }, + ); + + it('logs the session start event', () => { + new Config({ + ...baseParams, + usageStatisticsEnabled: true, + }); + + expect( + ClearcutLogger.prototype.logStartSessionEvent, + ).toHaveBeenCalledOnce(); + }); + }); + describe('Telemetry Settings', () => { it('should return default telemetry target if not provided', () => { const params: ConfigParameters = { diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index b1a2a096..621a1769 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -193,13 +193,12 @@ export interface ConfigParameters { extensionContextFilePaths?: string[]; maxSessionTurns?: number; sessionTokenLimit?: number; - experimentalAcp?: boolean; + experimentalZedIntegration?: boolean; listExtensions?: boolean; extensions?: GeminiCLIExtension[]; blockedMcpServers?: Array<{ name: string; extensionName: string }>; noBrowser?: boolean; summarizeToolOutput?: Record; - ideModeFeature?: boolean; folderTrustFeature?: boolean; folderTrust?: boolean; ideMode?: boolean; @@ -220,6 +219,7 @@ export interface ConfigParameters { tavilyApiKey?: string; chatCompression?: ChatCompressionSettings; interactive?: boolean; + trustedFolder?: boolean; } export class Config { @@ -263,7 +263,6 @@ export class Config { private readonly model: string; private readonly extensionContextFilePaths: string[]; private readonly noBrowser: boolean; - private readonly ideModeFeature: boolean; private readonly folderTrustFeature: boolean; private readonly folderTrust: boolean; private ideMode: boolean; @@ -287,7 +286,6 @@ export class Config { private readonly summarizeToolOutput: | Record | undefined; - private readonly experimentalAcp: boolean = false; private readonly enableOpenAILogging: boolean; private readonly sampling_params?: Record; private readonly contentGenerator?: { @@ -295,10 +293,12 @@ export class Config { maxRetries?: number; }; private readonly cliVersion?: string; + private readonly experimentalZedIntegration: boolean = false; private readonly loadMemoryFromIncludeDirectories: boolean = false; private readonly tavilyApiKey?: string; private readonly chatCompression: ChatCompressionSettings | undefined; private readonly interactive: boolean; + private readonly trustedFolder: boolean | undefined; private initialized: boolean = false; constructor(params: ConfigParameters) { @@ -354,13 +354,13 @@ export class Config { this.extensionContextFilePaths = params.extensionContextFilePaths ?? []; this.maxSessionTurns = params.maxSessionTurns ?? -1; this.sessionTokenLimit = params.sessionTokenLimit ?? -1; - this.experimentalAcp = params.experimentalAcp ?? false; + this.experimentalZedIntegration = + params.experimentalZedIntegration ?? false; this.listExtensions = params.listExtensions ?? false; this._extensions = params.extensions ?? []; this._blockedMcpServers = params.blockedMcpServers ?? []; this.noBrowser = params.noBrowser ?? false; this.summarizeToolOutput = params.summarizeToolOutput; - this.ideModeFeature = params.ideModeFeature ?? false; this.folderTrustFeature = params.folderTrustFeature ?? false; this.folderTrust = params.folderTrust ?? false; this.ideMode = params.ideMode ?? false; @@ -375,6 +375,7 @@ export class Config { params.loadMemoryFromIncludeDirectories ?? false; this.chatCompression = params.chatCompression; this.interactive = params.interactive ?? false; + this.trustedFolder = params.trustedFolder; // Web search this.tavilyApiKey = params.tavilyApiKey; @@ -430,13 +431,21 @@ export class Config { const newGeminiClient = new GeminiClient(this); await newGeminiClient.initialize(newContentGeneratorConfig); + // Vertex and Genai have incompatible encryption and sending history with + // throughtSignature from Genai to Vertex will fail, we need to strip them + const fromGenaiToVertex = + this.contentGeneratorConfig?.authType === AuthType.USE_GEMINI && + authMethod === AuthType.LOGIN_WITH_GOOGLE; + // Only assign to instance properties after successful initialization this.contentGeneratorConfig = newContentGeneratorConfig; this.geminiClient = newGeminiClient; // Restore the conversation history to the new client if (existingHistory.length > 0) { - this.geminiClient.setHistory(existingHistory); + this.geminiClient.setHistory(existingHistory, { + stripThoughts: fromGenaiToVertex, + }); } // Reset the session flag since we're explicitly changing auth and using default model @@ -684,8 +693,8 @@ export class Config { return this.extensionContextFilePaths; } - getExperimentalAcp(): boolean { - return this.experimentalAcp; + getExperimentalZedIntegration(): boolean { + return this.experimentalZedIntegration; } getListExtensions(): boolean { @@ -719,10 +728,6 @@ export class Config { return this.tavilyApiKey; } - getIdeModeFeature(): boolean { - return this.ideModeFeature; - } - getIdeClient(): IdeClient { return this.ideClient; } @@ -739,6 +744,10 @@ export class Config { return this.folderTrust; } + isTrustedFolder(): boolean | undefined { + return this.trustedFolder; + } + setIdeMode(value: boolean): void { this.ideMode = value; } diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index af8249e9..7db24360 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -708,7 +708,7 @@ describe('Gemini Client (client.ts)', () => { }); describe('sendMessageStream', () => { - it('should include editor context when ideModeFeature is enabled', async () => { + it('should include editor context when ideMode is enabled', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -732,7 +732,7 @@ describe('Gemini Client (client.ts)', () => { }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -792,7 +792,7 @@ ${JSON.stringify( }); }); - it('should not add context if ideModeFeature is enabled but no open files', async () => { + it('should not add context if ideMode is enabled but no open files', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -800,7 +800,7 @@ ${JSON.stringify( }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -839,7 +839,7 @@ ${JSON.stringify( ); }); - it('should add context if ideModeFeature is enabled and there is one active file', async () => { + it('should add context if ideMode is enabled and there is one active file', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -855,7 +855,7 @@ ${JSON.stringify( }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -914,7 +914,7 @@ ${JSON.stringify( }); }); - it('should add context if ideModeFeature is enabled and there are open files but no active file', async () => { + it('should add context if ideMode is enabled and there are open files but no active file', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -931,7 +931,7 @@ ${JSON.stringify( }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -1267,7 +1267,7 @@ ${JSON.stringify( beforeEach(() => { client['forceFullIdeContext'] = false; // Reset before each delta test vi.spyOn(client, 'tryCompressChat').mockResolvedValue(null); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); mockTurnRunFn.mockReturnValue(mockStream); const mockChat: Partial = { @@ -1637,4 +1637,73 @@ ${JSON.stringify( ); }); }); + + describe('setHistory', () => { + it('should strip thought signatures when stripThoughts is true', () => { + const mockChat = { + setHistory: vi.fn(), + }; + client['chat'] = mockChat as unknown as GeminiChat; + + const historyWithThoughts: Content[] = [ + { + role: 'user', + parts: [{ text: 'hello' }], + }, + { + role: 'model', + parts: [ + { text: 'thinking...', thoughtSignature: 'thought-123' }, + { + functionCall: { name: 'test', args: {} }, + thoughtSignature: 'thought-456', + }, + ], + }, + ]; + + client.setHistory(historyWithThoughts, { stripThoughts: true }); + + const expectedHistory: Content[] = [ + { + role: 'user', + parts: [{ text: 'hello' }], + }, + { + role: 'model', + parts: [ + { text: 'thinking...' }, + { functionCall: { name: 'test', args: {} } }, + ], + }, + ]; + + expect(mockChat.setHistory).toHaveBeenCalledWith(expectedHistory); + }); + + it('should not strip thought signatures when stripThoughts is false', () => { + const mockChat = { + setHistory: vi.fn(), + }; + client['chat'] = mockChat as unknown as GeminiChat; + + const historyWithThoughts: Content[] = [ + { + role: 'user', + parts: [{ text: 'hello' }], + }, + { + role: 'model', + parts: [ + { text: 'thinking...', thoughtSignature: 'thought-123' }, + { text: 'ok', thoughtSignature: 'thought-456' }, + ], + }, + ]; + + client.setHistory(historyWithThoughts, { stripThoughts: false }); + + expect(mockChat.setHistory).toHaveBeenCalledWith(historyWithThoughts); + }); + }); }); diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index f190df10..33235a67 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -162,8 +162,32 @@ export class GeminiClient { return this.getChat().getHistory(); } - setHistory(history: Content[]) { - this.getChat().setHistory(history); + setHistory( + history: Content[], + { stripThoughts = false }: { stripThoughts?: boolean } = {}, + ) { + const historyToSet = stripThoughts + ? history.map((content) => { + const newContent = { ...content }; + if (newContent.parts) { + newContent.parts = newContent.parts.map((part) => { + if ( + part && + typeof part === 'object' && + 'thoughtSignature' in part + ) { + const newPart = { ...part }; + delete (newPart as { thoughtSignature?: string }) + .thoughtSignature; + return newPart; + } + return part; + }); + } + return newContent; + }) + : history; + this.getChat().setHistory(historyToSet); this.forceFullIdeContext = true; } @@ -485,7 +509,7 @@ export class GeminiClient { } } - if (this.config.getIdeModeFeature() && this.config.getIdeMode()) { + if (this.config.getIdeMode()) { const { contextParts, newIdeContext } = this.getIdeContextParts( this.forceFullIdeContext || this.getHistory().length === 0, ); diff --git a/packages/core/src/core/contentGenerator.ts b/packages/core/src/core/contentGenerator.ts index 2c90e9c6..ed6a8b66 100644 --- a/packages/core/src/core/contentGenerator.ts +++ b/packages/core/src/core/contentGenerator.ts @@ -16,7 +16,7 @@ import { import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js'; import { DEFAULT_GEMINI_MODEL, DEFAULT_QWEN_MODEL } from '../config/models.js'; import { Config } from '../config/config.js'; -import { getEffectiveModel } from './modelCheck.js'; + import { UserTierId } from '../code_assist/types.js'; import { LoggingContentGenerator } from './loggingContentGenerator.js'; @@ -106,11 +106,6 @@ export function createContentGeneratorConfig( if (authType === AuthType.USE_GEMINI && geminiApiKey) { contentGeneratorConfig.apiKey = geminiApiKey; contentGeneratorConfig.vertexai = false; - getEffectiveModel( - contentGeneratorConfig.apiKey, - contentGeneratorConfig.model, - contentGeneratorConfig.proxy, - ); return contentGeneratorConfig; } diff --git a/packages/core/src/core/coreToolScheduler.test.ts b/packages/core/src/core/coreToolScheduler.test.ts index a3a25707..71b2d64c 100644 --- a/packages/core/src/core/coreToolScheduler.test.ts +++ b/packages/core/src/core/coreToolScheduler.test.ts @@ -9,7 +9,6 @@ import { describe, it, expect, vi } from 'vitest'; import { CoreToolScheduler, ToolCall, - ValidatingToolCall, convertToFunctionResponse, } from './coreToolScheduler.js'; import { @@ -19,7 +18,7 @@ import { ToolConfirmationPayload, ToolResult, Config, - Icon, + Kind, ApprovalMode, } from '../index.js'; import { Part, PartListUnion } from '@google/genai'; @@ -54,7 +53,9 @@ class MockModifiableTool }; } - async shouldConfirmExecute(): Promise { + override async shouldConfirmExecute(): Promise< + ToolCallConfirmationDetails | false + > { if (this.shouldConfirm) { return { type: 'edit', @@ -121,8 +122,6 @@ describe('CoreToolScheduler', () => { abortController.abort(); await scheduler.schedule([request], abortController.signal); - const _waitingCall = onToolCallsUpdate.mock - .calls[1][0][0] as ValidatingToolCall; const confirmationDetails = await mockTool.shouldConfirmExecute( {}, abortController.signal, @@ -389,12 +388,12 @@ describe('CoreToolScheduler edit cancellation', () => { 'mockEditTool', 'mockEditTool', 'A mock edit tool', - Icon.Pencil, + Kind.Edit, {}, ); } - async shouldConfirmExecute( + override async shouldConfirmExecute( _params: Record, _abortSignal: AbortSignal, ): Promise { diff --git a/packages/core/src/core/modelCheck.ts b/packages/core/src/core/modelCheck.ts deleted file mode 100644 index 78530232..00000000 --- a/packages/core/src/core/modelCheck.ts +++ /dev/null @@ -1,24 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -// 移陀ζœͺδ½Ώη”¨ηš„ε―Όε…₯ - -/** - * Checks if the default "pro" model is rate-limited and returns a fallback "flash" - * model if necessary. This function is designed to be silent. - * @param apiKey The API key to use for the check. - * @param currentConfiguredModel The model currently configured in settings. - * @returns An object indicating the model to use, whether a switch occurred, - * and the original model if a switch happened. - */ -export async function getEffectiveModel( - _apiKey: string, - currentConfiguredModel: string, - _proxy: string | undefined, -): Promise { - // Disable Google API Model Check - return currentConfiguredModel; -} diff --git a/packages/core/src/ide/detect-ide.test.ts b/packages/core/src/ide/detect-ide.test.ts new file mode 100644 index 00000000..85249ad6 --- /dev/null +++ b/packages/core/src/ide/detect-ide.test.ts @@ -0,0 +1,68 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, afterEach, vi } from 'vitest'; +import { detectIde, DetectedIde } from './detect-ide.js'; + +describe('detectIde', () => { + afterEach(() => { + vi.unstubAllEnvs(); + }); + + it.each([ + { + env: {}, + expected: DetectedIde.VSCode, + }, + { + env: { __COG_BASHRC_SOURCED: '1' }, + expected: DetectedIde.Devin, + }, + { + env: { REPLIT_USER: 'test' }, + expected: DetectedIde.Replit, + }, + { + env: { CURSOR_TRACE_ID: 'test' }, + expected: DetectedIde.Cursor, + }, + { + env: { CODESPACES: 'true' }, + expected: DetectedIde.Codespaces, + }, + { + env: { EDITOR_IN_CLOUD_SHELL: 'true' }, + expected: DetectedIde.CloudShell, + }, + { + env: { CLOUD_SHELL: 'true' }, + expected: DetectedIde.CloudShell, + }, + { + env: { TERM_PRODUCT: 'Trae' }, + expected: DetectedIde.Trae, + }, + { + env: { FIREBASE_DEPLOY_AGENT: 'true' }, + expected: DetectedIde.FirebaseStudio, + }, + { + env: { MONOSPACE_ENV: 'true' }, + expected: DetectedIde.FirebaseStudio, + }, + ])('detects the IDE for $expected', ({ env, expected }) => { + vi.stubEnv('TERM_PROGRAM', 'vscode'); + for (const [key, value] of Object.entries(env)) { + vi.stubEnv(key, value); + } + expect(detectIde()).toBe(expected); + }); + + it('returns undefined for non-vscode', () => { + vi.stubEnv('TERM_PROGRAM', 'definitely-not-vscode'); + expect(detectIde()).toBeUndefined(); + }); +}); diff --git a/packages/core/src/ide/detect-ide.ts b/packages/core/src/ide/detect-ide.ts index 759c4103..5cc3cb56 100644 --- a/packages/core/src/ide/detect-ide.ts +++ b/packages/core/src/ide/detect-ide.ts @@ -5,34 +5,54 @@ */ export enum DetectedIde { + Devin = 'devin', + Replit = 'replit', VSCode = 'vscode', - VSCodium = 'vscodium', Cursor = 'cursor', CloudShell = 'cloudshell', Codespaces = 'codespaces', - Windsurf = 'windsurf', FirebaseStudio = 'firebasestudio', Trae = 'trae', } -export function getIdeDisplayName(ide: DetectedIde): string { +export interface IdeInfo { + displayName: string; +} + +export function getIdeInfo(ide: DetectedIde): IdeInfo { switch (ide) { + case DetectedIde.Devin: + return { + displayName: 'Devin', + }; + case DetectedIde.Replit: + return { + displayName: 'Replit', + }; case DetectedIde.VSCode: - return 'VS Code'; - case DetectedIde.VSCodium: - return 'VSCodium'; + return { + displayName: 'VS Code', + }; case DetectedIde.Cursor: - return 'Cursor'; + return { + displayName: 'Cursor', + }; case DetectedIde.CloudShell: - return 'Cloud Shell'; + return { + displayName: 'Cloud Shell', + }; case DetectedIde.Codespaces: - return 'GitHub Codespaces'; - case DetectedIde.Windsurf: - return 'Windsurf'; + return { + displayName: 'GitHub Codespaces', + }; case DetectedIde.FirebaseStudio: - return 'Firebase Studio'; + return { + displayName: 'Firebase Studio', + }; case DetectedIde.Trae: - return 'Trae'; + return { + displayName: 'Trae', + }; default: { // This ensures that if a new IDE is added to the enum, we get a compile-time error. const exhaustiveCheck: never = ide; @@ -46,19 +66,25 @@ export function detectIde(): DetectedIde | undefined { if (process.env.TERM_PROGRAM !== 'vscode') { return undefined; } + if (process.env.__COG_BASHRC_SOURCED) { + return DetectedIde.Devin; + } + if (process.env.REPLIT_USER) { + return DetectedIde.Replit; + } if (process.env.CURSOR_TRACE_ID) { return DetectedIde.Cursor; } if (process.env.CODESPACES) { return DetectedIde.Codespaces; } - if (process.env.EDITOR_IN_CLOUD_SHELL) { + if (process.env.EDITOR_IN_CLOUD_SHELL || process.env.CLOUD_SHELL) { return DetectedIde.CloudShell; } if (process.env.TERM_PRODUCT === 'Trae') { return DetectedIde.Trae; } - if (process.env.FIREBASE_DEPLOY_AGENT) { + if (process.env.FIREBASE_DEPLOY_AGENT || process.env.MONOSPACE_ENV) { return DetectedIde.FirebaseStudio; } return DetectedIde.VSCode; diff --git a/packages/core/src/ide/ide-client.ts b/packages/core/src/ide/ide-client.ts index 1d369385..94ebe82b 100644 --- a/packages/core/src/ide/ide-client.ts +++ b/packages/core/src/ide/ide-client.ts @@ -6,11 +6,7 @@ import * as fs from 'node:fs'; import * as path from 'node:path'; -import { - detectIde, - DetectedIde, - getIdeDisplayName, -} from '../ide/detect-ide.js'; +import { detectIde, DetectedIde, getIdeInfo } from '../ide/detect-ide.js'; import { ideContext, IdeContextNotificationSchema, @@ -68,7 +64,7 @@ export class IdeClient { private constructor() { this.currentIde = detectIde(); if (this.currentIde) { - this.currentIdeDisplayName = getIdeDisplayName(this.currentIde); + this.currentIdeDisplayName = getIdeInfo(this.currentIde).displayName; } } @@ -86,7 +82,7 @@ export class IdeClient { `IDE integration is not supported in your current environment. To use this feature, run Qwen Code in one of these supported IDEs: ${Object.values( DetectedIde, ) - .map((ide) => getIdeDisplayName(ide)) + .map((ide) => getIdeInfo(ide).displayName) .join(', ')}`, false, ); diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 3a1b0c3a..2914f200 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -42,6 +42,7 @@ export * from './utils/systemEncoding.js'; export * from './utils/textUtils.js'; export * from './utils/formatters.js'; export * from './utils/filesearch/fileSearch.js'; +export * from './utils/errorParsing.js'; // Export services export * from './services/fileDiscoveryService.js'; @@ -51,8 +52,8 @@ export * from './services/gitService.js'; export * from './ide/ide-client.js'; export * from './ide/ideContext.js'; export * from './ide/ide-installer.js'; -export { getIdeDisplayName, DetectedIde } from './ide/detect-ide.js'; export * from './ide/constants.js'; +export { getIdeInfo, DetectedIde, IdeInfo } from './ide/detect-ide.js'; // Export Shell Execution Service export * from './services/shellExecutionService.js'; diff --git a/packages/core/src/mcp/oauth-provider.ts b/packages/core/src/mcp/oauth-provider.ts index b876655b..05b57440 100644 --- a/packages/core/src/mcp/oauth-provider.ts +++ b/packages/core/src/mcp/oauth-provider.ts @@ -91,7 +91,6 @@ export class MCPOAuthProvider { private static readonly REDIRECT_PORT = 7777; private static readonly REDIRECT_PATH = '/oauth/callback'; private static readonly HTTP_OK = 200; - private static readonly HTTP_REDIRECT = 302; /** * Register a client dynamically with the OAuth server. diff --git a/packages/core/src/mocks/msw.ts b/packages/core/src/mocks/msw.ts new file mode 100644 index 00000000..4bf93138 --- /dev/null +++ b/packages/core/src/mocks/msw.ts @@ -0,0 +1,9 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { setupServer } from 'msw/node'; + +export const server = setupServer(); diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts index f955eb5a..f2ce4d19 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts @@ -4,176 +4,290 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; -import * as https from 'https'; -import { ClientRequest, IncomingMessage } from 'http'; -import { Readable, Writable } from 'stream'; - import { - ClearcutLogger, - LogResponse, - LogEventEntry, -} from './clearcut-logger.js'; -import { Config } from '../../config/config.js'; + vi, + describe, + it, + expect, + afterEach, + beforeAll, + afterAll, +} from 'vitest'; + +import { ClearcutLogger, LogEventEntry, TEST_ONLY } from './clearcut-logger.js'; +import { ConfigParameters } from '../../config/config.js'; import * as userAccount from '../../utils/user_account.js'; import * as userId from '../../utils/user_id.js'; +import { EventMetadataKey } from './event-metadata-key.js'; +import { makeFakeConfig } from '../../test-utils/config.js'; +import { http, HttpResponse } from 'msw'; +import { server } from '../../mocks/msw.js'; -// Mock dependencies -vi.mock('https-proxy-agent'); -vi.mock('https'); vi.mock('../../utils/user_account'); vi.mock('../../utils/user_id'); -const mockHttps = vi.mocked(https); const mockUserAccount = vi.mocked(userAccount); const mockUserId = vi.mocked(userId); -describe('ClearcutLogger', () => { - let mockConfig: Config; - let logger: ClearcutLogger | undefined; +// TODO(richieforeman): Consider moving this to test setup globally. +beforeAll(() => { + server.listen({}); +}); +afterEach(() => { + server.resetHandlers(); +}); + +afterAll(() => { + server.close(); +}); + +describe('ClearcutLogger', () => { + const NEXT_WAIT_MS = 1234; + const CLEARCUT_URL = 'https://play.googleapis.com/log'; + const MOCK_DATE = new Date('2025-01-02T00:00:00.000Z'); + const EXAMPLE_RESPONSE = `["${NEXT_WAIT_MS}",null,[[["ANDROID_BACKUP",0],["BATTERY_STATS",0],["SMART_SETUP",0],["TRON",0]],-3334737594024971225],[]]`; // A helper to get the internal events array for testing const getEvents = (l: ClearcutLogger): LogEventEntry[][] => l['events'].toArray() as LogEventEntry[][]; const getEventsSize = (l: ClearcutLogger): number => l['events'].size; - const getMaxEvents = (l: ClearcutLogger): number => l['max_events']; - - const getMaxRetryEvents = (l: ClearcutLogger): number => - l['max_retry_events']; - const requeueFailedEvents = (l: ClearcutLogger, events: LogEventEntry[][]) => l['requeueFailedEvents'](events); - beforeEach(() => { - vi.useFakeTimers(); - vi.setSystemTime(new Date()); - - mockConfig = { - getUsageStatisticsEnabled: vi.fn().mockReturnValue(true), - getDebugMode: vi.fn().mockReturnValue(false), - getSessionId: vi.fn().mockReturnValue('test-session-id'), - getProxy: vi.fn().mockReturnValue(undefined), - } as unknown as Config; - - mockUserAccount.getCachedGoogleAccount.mockReturnValue('test@google.com'); - mockUserAccount.getLifetimeGoogleAccounts.mockReturnValue(1); - mockUserId.getInstallationId.mockReturnValue('test-installation-id'); - - logger = ClearcutLogger.getInstance(mockConfig); - expect(logger).toBeDefined(); + afterEach(() => { + vi.unstubAllEnvs(); }); + function setup({ + config = {} as Partial, + lifetimeGoogleAccounts = 1, + cachedGoogleAccount = 'test@google.com', + installationId = 'test-installation-id', + } = {}) { + server.resetHandlers( + http.post(CLEARCUT_URL, () => HttpResponse.text(EXAMPLE_RESPONSE)), + ); + + vi.useFakeTimers(); + vi.setSystemTime(MOCK_DATE); + + const loggerConfig = makeFakeConfig({ + ...config, + }); + ClearcutLogger.clearInstance(); + + mockUserAccount.getCachedGoogleAccount.mockReturnValue(cachedGoogleAccount); + mockUserAccount.getLifetimeGoogleAccounts.mockReturnValue( + lifetimeGoogleAccounts, + ); + mockUserId.getInstallationId.mockReturnValue(installationId); + + const logger = ClearcutLogger.getInstance(loggerConfig); + + return { logger, loggerConfig }; + } + afterEach(() => { ClearcutLogger.clearInstance(); vi.useRealTimers(); vi.restoreAllMocks(); }); - it('should not return an instance if usage statistics are disabled', () => { - ClearcutLogger.clearInstance(); - vi.spyOn(mockConfig, 'getUsageStatisticsEnabled').mockReturnValue(false); - const disabledLogger = ClearcutLogger.getInstance(mockConfig); - expect(disabledLogger).toBeUndefined(); + describe('getInstance', () => { + it.each([ + { usageStatisticsEnabled: false, expectedValue: undefined }, + { + usageStatisticsEnabled: true, + expectedValue: expect.any(ClearcutLogger), + }, + ])( + 'returns an instance if usage statistics are enabled', + ({ usageStatisticsEnabled, expectedValue }) => { + ClearcutLogger.clearInstance(); + const { logger } = setup({ + config: { + usageStatisticsEnabled, + }, + }); + expect(logger).toEqual(expectedValue); + }, + ); + + it('is a singleton', () => { + ClearcutLogger.clearInstance(); + const { loggerConfig } = setup(); + const logger1 = ClearcutLogger.getInstance(loggerConfig); + const logger2 = ClearcutLogger.getInstance(loggerConfig); + expect(logger1).toBe(logger2); + }); + }); + + describe('createLogEvent', () => { + it('logs the total number of google accounts', () => { + const { logger } = setup({ + lifetimeGoogleAccounts: 9001, + }); + + const event = logger?.createLogEvent('abc', []); + + expect(event?.event_metadata[0][0]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT, + value: '9001', + }); + }); + + it('logs the current surface from a github action', () => { + const { logger } = setup({}); + + vi.stubEnv('GITHUB_SHA', '8675309'); + + const event = logger?.createLogEvent('abc', []); + + expect(event?.event_metadata[0][1]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: 'GitHub', + }); + }); + + it('honors the value from env.SURFACE over all others', () => { + const { logger } = setup({}); + + vi.stubEnv('TERM_PROGRAM', 'vscode'); + vi.stubEnv('SURFACE', 'ide-1234'); + + const event = logger?.createLogEvent('abc', []); + + expect(event?.event_metadata[0][1]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: 'ide-1234', + }); + }); + + it.each([ + { + env: { + CURSOR_TRACE_ID: 'abc123', + GITHUB_SHA: undefined, + }, + expectedValue: 'cursor', + }, + { + env: { + TERM_PROGRAM: 'vscode', + GITHUB_SHA: undefined, + }, + expectedValue: 'vscode', + }, + { + env: { + MONOSPACE_ENV: 'true', + GITHUB_SHA: undefined, + }, + expectedValue: 'firebasestudio', + }, + { + env: { + __COG_BASHRC_SOURCED: 'true', + GITHUB_SHA: undefined, + }, + expectedValue: 'devin', + }, + { + env: { + CLOUD_SHELL: 'true', + GITHUB_SHA: undefined, + }, + expectedValue: 'cloudshell', + }, + ])( + 'logs the current surface for as $expectedValue, preempting vscode detection', + ({ env, expectedValue }) => { + const { logger } = setup({}); + for (const [key, value] of Object.entries(env)) { + vi.stubEnv(key, value); + } + vi.stubEnv('TERM_PROGRAM', 'vscode'); + const event = logger?.createLogEvent('abc', []); + expect(event?.event_metadata[0][1]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: expectedValue, + }); + }, + ); }); describe('enqueueLogEvent', () => { it('should add events to the queue', () => { + const { logger } = setup(); logger!.enqueueLogEvent({ test: 'event1' }); expect(getEventsSize(logger!)).toBe(1); }); it('should evict the oldest event when the queue is full', () => { - const maxEvents = getMaxEvents(logger!); + const { logger } = setup(); - for (let i = 0; i < maxEvents; i++) { + for (let i = 0; i < TEST_ONLY.MAX_EVENTS; i++) { logger!.enqueueLogEvent({ event_id: i }); } - expect(getEventsSize(logger!)).toBe(maxEvents); + expect(getEventsSize(logger!)).toBe(TEST_ONLY.MAX_EVENTS); const firstEvent = JSON.parse( getEvents(logger!)[0][0].source_extension_json, ); expect(firstEvent.event_id).toBe(0); // This should push out the first event - logger!.enqueueLogEvent({ event_id: maxEvents }); + logger!.enqueueLogEvent({ event_id: TEST_ONLY.MAX_EVENTS }); - expect(getEventsSize(logger!)).toBe(maxEvents); + expect(getEventsSize(logger!)).toBe(TEST_ONLY.MAX_EVENTS); const newFirstEvent = JSON.parse( getEvents(logger!)[0][0].source_extension_json, ); expect(newFirstEvent.event_id).toBe(1); const lastEvent = JSON.parse( - getEvents(logger!)[maxEvents - 1][0].source_extension_json, + getEvents(logger!)[TEST_ONLY.MAX_EVENTS - 1][0].source_extension_json, ); - expect(lastEvent.event_id).toBe(maxEvents); + expect(lastEvent.event_id).toBe(TEST_ONLY.MAX_EVENTS); }); }); describe('flushToClearcut', () => { - let mockRequest: Writable; - let mockResponse: Readable & Partial; - - beforeEach(() => { - mockRequest = new Writable({ - write(chunk, encoding, callback) { - callback(); + it('allows for usage with a configured proxy agent', async () => { + const { logger } = setup({ + config: { + proxy: 'http://mycoolproxy.whatever.com:3128', }, }); - vi.spyOn(mockRequest, 'on'); - vi.spyOn(mockRequest, 'end').mockReturnThis(); - vi.spyOn(mockRequest, 'destroy').mockReturnThis(); - mockResponse = new Readable({ read() {} }) as Readable & - Partial; + logger!.enqueueLogEvent({ event_id: 1 }); - mockHttps.request.mockImplementation( - ( - _options: string | https.RequestOptions | URL, - ...args: unknown[] - ): ClientRequest => { - const callback = args.find((arg) => typeof arg === 'function') as - | ((res: IncomingMessage) => void) - | undefined; + const response = await logger!.flushToClearcut(); - if (callback) { - callback(mockResponse as IncomingMessage); - } - return mockRequest as ClientRequest; - }, - ); + expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS); }); it('should clear events on successful flush', async () => { - mockResponse.statusCode = 200; - const mockResponseBody = { nextRequestWaitMs: 1000 }; - // Encoded protobuf for {nextRequestWaitMs: 1000} which is `08 E8 07` - const encodedResponse = Buffer.from([8, 232, 7]); + const { logger } = setup(); logger!.enqueueLogEvent({ event_id: 1 }); - const flushPromise = logger!.flushToClearcut(); + const response = await logger!.flushToClearcut(); - mockResponse.push(encodedResponse); - mockResponse.push(null); // End the stream - - const response: LogResponse = await flushPromise; - - expect(getEventsSize(logger!)).toBe(0); - expect(response.nextRequestWaitMs).toBe( - mockResponseBody.nextRequestWaitMs, - ); + expect(getEvents(logger!)).toEqual([]); + expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS); }); it('should handle a network error and requeue events', async () => { + const { logger } = setup(); + + server.resetHandlers(http.post(CLEARCUT_URL, () => HttpResponse.error())); logger!.enqueueLogEvent({ event_id: 1 }); logger!.enqueueLogEvent({ event_id: 2 }); expect(getEventsSize(logger!)).toBe(2); - const flushPromise = logger!.flushToClearcut(); - mockRequest.emit('error', new Error('Network error')); - await flushPromise; + const x = logger!.flushToClearcut(); + await x; expect(getEventsSize(logger!)).toBe(2); const events = getEvents(logger!); @@ -181,18 +295,28 @@ describe('ClearcutLogger', () => { }); it('should handle an HTTP error and requeue events', async () => { - mockResponse.statusCode = 500; - mockResponse.statusMessage = 'Internal Server Error'; + const { logger } = setup(); + + server.resetHandlers( + http.post( + CLEARCUT_URL, + () => + new HttpResponse( + { 'the system is down': true }, + { + status: 500, + }, + ), + ), + ); logger!.enqueueLogEvent({ event_id: 1 }); logger!.enqueueLogEvent({ event_id: 2 }); - expect(getEventsSize(logger!)).toBe(2); - const flushPromise = logger!.flushToClearcut(); - mockResponse.emit('end'); // End the response to trigger promise resolution - await flushPromise; + expect(getEvents(logger!).length).toBe(2); + await logger!.flushToClearcut(); - expect(getEventsSize(logger!)).toBe(2); + expect(getEvents(logger!).length).toBe(2); const events = getEvents(logger!); expect(JSON.parse(events[0][0].source_extension_json).event_id).toBe(1); }); @@ -200,7 +324,8 @@ describe('ClearcutLogger', () => { describe('requeueFailedEvents logic', () => { it('should limit the number of requeued events to max_retry_events', () => { - const maxRetryEvents = getMaxRetryEvents(logger!); + const { logger } = setup(); + const maxRetryEvents = TEST_ONLY.MAX_RETRY_EVENTS; const eventsToLogCount = maxRetryEvents + 5; const eventsToSend: LogEventEntry[][] = []; for (let i = 0; i < eventsToLogCount; i++) { @@ -225,7 +350,8 @@ describe('ClearcutLogger', () => { }); it('should not requeue more events than available space in the queue', () => { - const maxEvents = getMaxEvents(logger!); + const { logger } = setup(); + const maxEvents = TEST_ONLY.MAX_EVENTS; const spaceToLeave = 5; const initialEventCount = maxEvents - spaceToLeave; for (let i = 0; i < initialEventCount; i++) { diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index 1e67d1cf..7ccfd440 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -4,10 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Buffer } from 'buffer'; -import * as https from 'https'; import { HttpsProxyAgent } from 'https-proxy-agent'; - import { StartSessionEvent, EndSessionEvent, @@ -22,6 +19,7 @@ import { SlashCommandEvent, MalformedJsonResponseEvent, IdeConnectionEvent, + KittySequenceOverflowEvent, } from '../types.js'; import { EventMetadataKey } from './event-metadata-key.js'; import { Config } from '../../config/config.js'; @@ -32,6 +30,7 @@ import { } from '../../utils/user_account.js'; import { getInstallationId } from '../../utils/user_id.js'; import { FixedDeque } from 'mnemonist'; +import { DetectedIde, detectIde } from '../../ide/detect-ide.js'; const start_session_event_name = 'start_session'; const new_prompt_event_name = 'new_prompt'; @@ -46,6 +45,7 @@ const next_speaker_check_event_name = 'next_speaker_check'; const slash_command_event_name = 'slash_command'; const malformed_json_response_event_name = 'malformed_json_response'; const ide_connection_event_name = 'ide_connection'; +const kitty_sequence_overflow_event_name = 'kitty_sequence_overflow'; export interface LogResponse { nextRequestWaitMs?: number; @@ -56,19 +56,25 @@ export interface LogEventEntry { source_extension_json: string; } -export type EventValue = { +export interface EventValue { gemini_cli_key: EventMetadataKey | string; value: string; -}; +} -export type LogEvent = { - console_type: string; +export interface LogEvent { + console_type: 'GEMINI_CLI'; application: number; event_name: string; event_metadata: EventValue[][]; client_email?: string; client_install_id?: string; -}; +} + +export interface LogRequest { + log_source_name: 'CONCORD'; + request_time_ms: number; + log_event: LogEventEntry[][]; +} /** * Determine the surface that the user is currently using. Surface is effectively the @@ -80,31 +86,70 @@ export type LogEvent = { * methods might have in their runtimes. */ function determineSurface(): string { - if (process.env.CLOUD_SHELL === 'true') { - return 'CLOUD_SHELL'; - } else if (process.env.MONOSPACE_ENV === 'true') { - return 'FIREBASE_STUDIO'; + if (process.env.SURFACE) { + return process.env.SURFACE; + } else if (process.env.GITHUB_SHA) { + return 'GitHub'; + } else if (process.env.TERM_PROGRAM === 'vscode') { + return detectIde() || DetectedIde.VSCode; } else { - return process.env.SURFACE || 'SURFACE_NOT_SET'; + return 'SURFACE_NOT_SET'; } } +/** + * Clearcut URL to send logging events to. + */ +const CLEARCUT_URL = 'https://play.googleapis.com/log?format=json&hasfast=true'; + +/** + * Interval in which buffered events are sent to clearcut. + */ +const FLUSH_INTERVAL_MS = 1000 * 60; + +/** + * Maximum amount of events to keep in memory. Events added after this amount + * are dropped until the next flush to clearcut, which happens periodically as + * defined by {@link FLUSH_INTERVAL_MS}. + */ +const MAX_EVENTS = 1000; + +/** + * Maximum events to retry after a failed clearcut flush + */ +const MAX_RETRY_EVENTS = 100; + // Singleton class for batch posting log events to Clearcut. When a new event comes in, the elapsed time // is checked and events are flushed to Clearcut if at least a minute has passed since the last flush. export class ClearcutLogger { private static instance: ClearcutLogger; private config?: Config; + + /** + * Queue of pending events that need to be flushed to the server. New events + * are added to this queue and then flushed on demand (via `flushToClearcut`) + */ private readonly events: FixedDeque; - private last_flush_time: number = Date.now(); - private flush_interval_ms: number = 1000 * 60; // Wait at least a minute before flushing events. - private readonly max_events: number = 1000; // Maximum events to keep in memory - private readonly max_retry_events: number = 100; // Maximum failed events to retry - private flushing: boolean = false; // Prevent concurrent flush operations - private pendingFlush: boolean = false; // Track if a flush was requested during an ongoing flush + + /** + * The last time that the events were successfully flushed to the server. + */ + private lastFlushTime: number = Date.now(); + + /** + * the value is true when there is a pending flush happening. This prevents + * concurrent flush operations. + */ + private flushing: boolean = false; + + /** + * This value is true when a flush was requested during an ongoing flush. + */ + private pendingFlush: boolean = false; private constructor(config?: Config) { this.config = config; - this.events = new FixedDeque(Array, this.max_events); + this.events = new FixedDeque(Array, MAX_EVENTS); } static getInstance(config?: Config): ClearcutLogger | undefined { @@ -125,7 +170,7 @@ export class ClearcutLogger { enqueueLogEvent(event: object): void { try { // Manually handle overflow for FixedDeque, which throws when full. - const wasAtCapacity = this.events.size >= this.max_events; + const wasAtCapacity = this.events.size >= MAX_EVENTS; if (wasAtCapacity) { this.events.shift(); // Evict oldest element to make space. @@ -150,31 +195,14 @@ export class ClearcutLogger { } } - addDefaultFields(data: EventValue[]): void { - const totalAccounts = getLifetimeGoogleAccounts(); - const surface = determineSurface(); - const defaultLogMetadata = [ - { - gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT, - value: totalAccounts.toString(), - }, - { - gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, - value: surface, - }, - ]; - data.push(...defaultLogMetadata); - } - createLogEvent(name: string, data: EventValue[]): LogEvent { const email = getCachedGoogleAccount(); - // Add default fields that should exist for all logs - this.addDefaultFields(data); + data = addDefaultFields(data); const logEvent: LogEvent = { console_type: 'GEMINI_CLI', - application: 102, + application: 102, // GEMINI_CLI event_name: name, event_metadata: [data], }; @@ -190,7 +218,7 @@ export class ClearcutLogger { } flushIfNeeded(): void { - if (Date.now() - this.last_flush_time < this.flush_interval_ms) { + if (Date.now() - this.lastFlushTime < FLUSH_INTERVAL_MS) { return; } @@ -217,140 +245,67 @@ export class ClearcutLogger { const eventsToSend = this.events.toArray() as LogEventEntry[][]; this.events.clear(); - return new Promise<{ buffer: Buffer; statusCode?: number }>( - (resolve, reject) => { - const request = [ - { - log_source_name: 'CONCORD', - request_time_ms: Date.now(), - log_event: eventsToSend, - }, - ]; - const body = safeJsonStringify(request); - const options = { - hostname: 'play.googleapis.com', - path: '/log', - method: 'POST', - headers: { 'Content-Length': Buffer.byteLength(body) }, - timeout: 30000, // 30-second timeout - }; - const bufs: Buffer[] = []; - const req = https.request( - { - ...options, - agent: this.getProxyAgent(), - }, - (res) => { - res.on('error', reject); // Handle stream errors - res.on('data', (buf) => bufs.push(buf)); - res.on('end', () => { - try { - const buffer = Buffer.concat(bufs); - // Check if we got a successful response - if ( - res.statusCode && - res.statusCode >= 200 && - res.statusCode < 300 - ) { - resolve({ buffer, statusCode: res.statusCode }); - } else { - // HTTP error - reject with status code for retry handling - reject( - new Error(`HTTP ${res.statusCode}: ${res.statusMessage}`), - ); - } - } catch (e) { - reject(e); - } - }); - }, - ); - req.on('error', (e) => { - // Network-level error - reject(e); - }); - req.on('timeout', () => { - if (!req.destroyed) { - req.destroy(new Error('Request timeout after 30 seconds')); - } - }); - req.end(body); + const request: LogRequest[] = [ + { + log_source_name: 'CONCORD', + request_time_ms: Date.now(), + log_event: eventsToSend, }, - ) - .then(({ buffer }) => { - try { - this.last_flush_time = Date.now(); - return this.decodeLogResponse(buffer) || {}; - } catch (error: unknown) { - console.error('Error decoding log response:', error); - return {}; - } - }) - .catch((error: unknown) => { - // Handle both network-level and HTTP-level errors + ]; + + let result: LogResponse = {}; + + try { + const response = await fetch(CLEARCUT_URL, { + method: 'POST', + body: safeJsonStringify(request), + headers: { + 'Content-Type': 'application/json', + }, + }); + + const responseBody = await response.text(); + + if (response.status >= 200 && response.status < 300) { + this.lastFlushTime = Date.now(); + const nextRequestWaitMs = Number(JSON.parse(responseBody)[0]); + result = { + ...result, + nextRequestWaitMs, + }; + } else { if (this.config?.getDebugMode()) { - console.error('Error flushing log events:', error); + console.error( + `Error flushing log events: HTTP ${response.status}: ${response.statusText}`, + ); } // Re-queue failed events for retry this.requeueFailedEvents(eventsToSend); + } + } catch (e: unknown) { + if (this.config?.getDebugMode()) { + console.error('Error flushing log events:', e as Error); + } - // Return empty response to maintain the Promise contract - return {}; - }) - .finally(() => { - this.flushing = false; + // Re-queue failed events for retry + this.requeueFailedEvents(eventsToSend); + } - // If a flush was requested while we were flushing, flush again - if (this.pendingFlush) { - this.pendingFlush = false; - // Fire and forget the pending flush - this.flushToClearcut().catch((error) => { - if (this.config?.getDebugMode()) { - console.debug('Error in pending flush to Clearcut:', error); - } - }); + this.flushing = false; + + // If a flush was requested while we were flushing, flush again + if (this.pendingFlush) { + this.pendingFlush = false; + // Fire and forget the pending flush + this.flushToClearcut().catch((error) => { + if (this.config?.getDebugMode()) { + console.debug('Error in pending flush to Clearcut:', error); } }); - } - - // Visible for testing. Decodes protobuf-encoded response from Clearcut server. - decodeLogResponse(buf: Buffer): LogResponse | undefined { - // TODO(obrienowen): return specific errors to facilitate debugging. - if (buf.length < 1) { - return undefined; } - // The first byte of the buffer is `field<<3 | type`. We're looking for field - // 1, with type varint, represented by type=0. If the first byte isn't 8, that - // means field 1 is missing or the message is corrupted. Either way, we return - // undefined. - if (buf.readUInt8(0) !== 8) { - return undefined; - } - - let ms = BigInt(0); - let cont = true; - - // In each byte, the most significant bit is the continuation bit. If it's - // set, we keep going. The lowest 7 bits, are data bits. They are concatenated - // in reverse order to form the final number. - for (let i = 1; cont && i < buf.length; i++) { - const byte = buf.readUInt8(i); - ms |= BigInt(byte & 0x7f) << BigInt(7 * (i - 1)); - cont = (byte & 0x80) !== 0; - } - - if (cont) { - // We have fallen off the buffer without seeing a terminating byte. The - // message is corrupted. - return undefined; - } - - const returnVal = { - nextRequestWaitMs: Number(ms), - }; - return returnVal; + return result; } logStartSessionEvent(event: StartSessionEvent): void { @@ -687,6 +642,13 @@ export class ClearcutLogger { }); } + if (event.status) { + data.push({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_STATUS, + value: JSON.stringify(event.status), + }); + } + this.enqueueLogEvent(this.createLogEvent(slash_command_event_name, data)); this.flushIfNeeded(); } @@ -718,6 +680,24 @@ export class ClearcutLogger { this.flushIfNeeded(); } + logKittySequenceOverflowEvent(event: KittySequenceOverflowEvent): void { + const data: EventValue[] = [ + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_SEQUENCE_LENGTH, + value: event.sequence_length.toString(), + }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE, + value: event.truncated_sequence, + }, + ]; + + this.enqueueLogEvent( + this.createLogEvent(kitty_sequence_overflow_event_name, data), + ); + this.flushIfNeeded(); + } + logEndSessionEvent(event: EndSessionEvent): void { const data: EventValue[] = [ { @@ -752,24 +732,21 @@ export class ClearcutLogger { private requeueFailedEvents(eventsToSend: LogEventEntry[][]): void { // Add the events back to the front of the queue to be retried, but limit retry queue size - const eventsToRetry = eventsToSend.slice(-this.max_retry_events); // Keep only the most recent events + const eventsToRetry = eventsToSend.slice(-MAX_RETRY_EVENTS); // Keep only the most recent events // Log a warning if we're dropping events - if ( - eventsToSend.length > this.max_retry_events && - this.config?.getDebugMode() - ) { + if (eventsToSend.length > MAX_RETRY_EVENTS && this.config?.getDebugMode()) { console.warn( `ClearcutLogger: Dropping ${ - eventsToSend.length - this.max_retry_events + eventsToSend.length - MAX_RETRY_EVENTS } events due to retry queue limit. Total events: ${ eventsToSend.length - }, keeping: ${this.max_retry_events}`, + }, keeping: ${MAX_RETRY_EVENTS}`, ); } // Determine how many events can be re-queued - const availableSpace = this.max_events - this.events.size; + const availableSpace = MAX_EVENTS - this.events.size; const numEventsToRequeue = Math.min(eventsToRetry.length, availableSpace); if (numEventsToRequeue === 0) { @@ -792,7 +769,7 @@ export class ClearcutLogger { this.events.unshift(eventsToRequeue[i]); } // Clear any potential overflow - while (this.events.size > this.max_events) { + while (this.events.size > MAX_EVENTS) { this.events.pop(); } @@ -803,3 +780,28 @@ export class ClearcutLogger { } } } + +/** + * Adds default fields to data, and returns a new data array. This fields + * should exist on all log events. + */ +function addDefaultFields(data: EventValue[]): EventValue[] { + const totalAccounts = getLifetimeGoogleAccounts(); + const surface = determineSurface(); + const defaultLogMetadata: EventValue[] = [ + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT, + value: `${totalAccounts}`, + }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: surface, + }, + ]; + return [...data, ...defaultLogMetadata]; +} + +export const TEST_ONLY = { + MAX_RETRY_EVENTS, + MAX_EVENTS, +}; diff --git a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts index dc2ed796..9e4c93db 100644 --- a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts +++ b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts @@ -174,6 +174,9 @@ export enum EventMetadataKey { // Logs the subcommand of the slash command. GEMINI_CLI_SLASH_COMMAND_SUBCOMMAND = 42, + // Logs the status of the slash command (e.g. 'success', 'error') + GEMINI_CLI_SLASH_COMMAND_STATUS = 51, + // ========================================================================== // Next Speaker Check Event Keys // =========================================================================== @@ -209,6 +212,16 @@ export enum EventMetadataKey { // Logs user removed lines in edit/write tool response. GEMINI_CLI_USER_REMOVED_LINES = 50, + + // ========================================================================== + // Kitty Sequence Overflow Event Keys + // =========================================================================== + + // Logs the length of the kitty sequence that overflowed. + GEMINI_CLI_KITTY_SEQUENCE_LENGTH = 53, + + // Logs the truncated kitty sequence. + GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE = 52, } export function getEventMetadataKey( diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts index 47dc4ff0..f03b7b85 100644 --- a/packages/core/src/telemetry/index.ts +++ b/packages/core/src/telemetry/index.ts @@ -28,6 +28,7 @@ export { logApiResponse, logFlashFallback, logSlashCommand, + logKittySequenceOverflow, } from './loggers.js'; export { StartSessionEvent, @@ -39,7 +40,10 @@ export { ApiResponseEvent, TelemetryEvent, FlashFallbackEvent, + KittySequenceOverflowEvent, SlashCommandEvent, + makeSlashCommandEvent, + SlashCommandStatus, } from './types.js'; export { SpanStatusCode, ValueType } from '@opentelemetry/api'; export { SemanticAttributes } from '@opentelemetry/semantic-conventions'; diff --git a/packages/core/src/telemetry/loggers.ts b/packages/core/src/telemetry/loggers.ts index a4ba104a..a5ab3566 100644 --- a/packages/core/src/telemetry/loggers.ts +++ b/packages/core/src/telemetry/loggers.ts @@ -32,6 +32,7 @@ import { NextSpeakerCheckEvent, LoopDetectedEvent, SlashCommandEvent, + KittySequenceOverflowEvent, } from './types.js'; import { recordApiErrorMetrics, @@ -377,3 +378,21 @@ export function logIdeConnection( }; logger.emit(logRecord); } + +export function logKittySequenceOverflow( + config: Config, + event: KittySequenceOverflowEvent, +): void { + ClearcutLogger.getInstance(config)?.logKittySequenceOverflowEvent(event); + if (!isTelemetrySdkInitialized()) return; + const attributes: LogAttributes = { + ...getCommonAttributes(config), + ...event, + }; + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `Kitty sequence buffer overflow: ${event.sequence_length} bytes`, + attributes, + }; + logger.emit(logRecord); +} diff --git a/packages/core/src/telemetry/sdk.ts b/packages/core/src/telemetry/sdk.ts index 531c905f..5dc26dfd 100644 --- a/packages/core/src/telemetry/sdk.ts +++ b/packages/core/src/telemetry/sdk.ts @@ -124,24 +124,32 @@ export function initializeTelemetry(config: Config): void { try { sdk.start(); - console.log('OpenTelemetry SDK started successfully.'); + if (config.getDebugMode()) { + console.log('OpenTelemetry SDK started successfully.'); + } telemetryInitialized = true; initializeMetrics(config); } catch (error) { console.error('Error starting OpenTelemetry SDK:', error); } - process.on('SIGTERM', shutdownTelemetry); - process.on('SIGINT', shutdownTelemetry); + process.on('SIGTERM', () => { + shutdownTelemetry(config); + }); + process.on('SIGINT', () => { + shutdownTelemetry(config); + }); } -export async function shutdownTelemetry(): Promise { +export async function shutdownTelemetry(config: Config): Promise { if (!telemetryInitialized || !sdk) { return; } try { await sdk.shutdown(); - console.log('OpenTelemetry SDK shut down successfully.'); + if (config.getDebugMode()) { + console.log('OpenTelemetry SDK shut down successfully.'); + } } catch (error) { console.error('Error shutting down SDK:', error); } finally { diff --git a/packages/core/src/telemetry/telemetry.test.ts b/packages/core/src/telemetry/telemetry.test.ts index 9734e382..15bd2e95 100644 --- a/packages/core/src/telemetry/telemetry.test.ts +++ b/packages/core/src/telemetry/telemetry.test.ts @@ -45,7 +45,7 @@ describe('telemetry', () => { afterEach(async () => { // Ensure we shut down telemetry even if a test fails. if (isTelemetrySdkInitialized()) { - await shutdownTelemetry(); + await shutdownTelemetry(mockConfig); } }); @@ -57,7 +57,7 @@ describe('telemetry', () => { it('should shutdown the telemetry service', async () => { initializeTelemetry(mockConfig); - await shutdownTelemetry(); + await shutdownTelemetry(mockConfig); expect(mockNodeSdk.shutdown).toHaveBeenCalled(); }); diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index 9d04b39d..9bc9f0a0 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -14,9 +14,17 @@ import { ToolCallDecision, } from './tool-call-decision.js'; -export class StartSessionEvent { +interface BaseTelemetryEvent { + 'event.name': string; + /** Current timestamp in ISO 8601 format */ + 'event.timestamp': string; +} + +type CommonFields = keyof BaseTelemetryEvent; + +export class StartSessionEvent implements BaseTelemetryEvent { 'event.name': 'cli_config'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; embedding_model: string; sandbox_enabled: boolean; @@ -60,9 +68,9 @@ export class StartSessionEvent { } } -export class EndSessionEvent { +export class EndSessionEvent implements BaseTelemetryEvent { 'event.name': 'end_session'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; session_id?: string; constructor(config?: Config) { @@ -72,9 +80,9 @@ export class EndSessionEvent { } } -export class UserPromptEvent { +export class UserPromptEvent implements BaseTelemetryEvent { 'event.name': 'user_prompt'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; prompt_length: number; prompt_id: string; auth_type?: string; @@ -95,9 +103,9 @@ export class UserPromptEvent { } } -export class ToolCallEvent { +export class ToolCallEvent implements BaseTelemetryEvent { 'event.name': 'tool_call'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; function_name: string; function_args: Record; duration_ms: number; @@ -142,9 +150,9 @@ export class ToolCallEvent { } } -export class ApiRequestEvent { +export class ApiRequestEvent implements BaseTelemetryEvent { 'event.name': 'api_request'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; prompt_id: string; request_text?: string; @@ -158,7 +166,7 @@ export class ApiRequestEvent { } } -export class ApiErrorEvent { +export class ApiErrorEvent implements BaseTelemetryEvent { 'event.name': 'api_error'; 'event.timestamp': string; // ISO 8601 response_id?: string; @@ -193,7 +201,7 @@ export class ApiErrorEvent { } } -export class ApiResponseEvent { +export class ApiResponseEvent implements BaseTelemetryEvent { 'event.name': 'api_response'; 'event.timestamp': string; // ISO 8601 response_id: string; @@ -240,9 +248,9 @@ export class ApiResponseEvent { } } -export class FlashFallbackEvent { +export class FlashFallbackEvent implements BaseTelemetryEvent { 'event.name': 'flash_fallback'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; auth_type: string; constructor(auth_type: string) { @@ -258,9 +266,9 @@ export enum LoopType { LLM_DETECTED_LOOP = 'llm_detected_loop', } -export class LoopDetectedEvent { +export class LoopDetectedEvent implements BaseTelemetryEvent { 'event.name': 'loop_detected'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; loop_type: LoopType; prompt_id: string; @@ -272,9 +280,9 @@ export class LoopDetectedEvent { } } -export class NextSpeakerCheckEvent { +export class NextSpeakerCheckEvent implements BaseTelemetryEvent { 'event.name': 'next_speaker_check'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; prompt_id: string; finish_reason: string; result: string; @@ -288,23 +296,36 @@ export class NextSpeakerCheckEvent { } } -export class SlashCommandEvent { +export interface SlashCommandEvent extends BaseTelemetryEvent { 'event.name': 'slash_command'; 'event.timestamp': string; // ISO 8106 command: string; subcommand?: string; - - constructor(command: string, subcommand?: string) { - this['event.name'] = 'slash_command'; - this['event.timestamp'] = new Date().toISOString(); - this.command = command; - this.subcommand = subcommand; - } + status?: SlashCommandStatus; } -export class MalformedJsonResponseEvent { +export function makeSlashCommandEvent({ + command, + subcommand, + status, +}: Omit): SlashCommandEvent { + return { + 'event.name': 'slash_command', + 'event.timestamp': new Date().toISOString(), + command, + subcommand, + status, + }; +} + +export enum SlashCommandStatus { + SUCCESS = 'success', + ERROR = 'error', +} + +export class MalformedJsonResponseEvent implements BaseTelemetryEvent { 'event.name': 'malformed_json_response'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; constructor(model: string) { @@ -321,7 +342,7 @@ export enum IdeConnectionType { export class IdeConnectionEvent { 'event.name': 'ide_connection'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; connection_type: IdeConnectionType; constructor(connection_type: IdeConnectionType) { @@ -331,6 +352,20 @@ export class IdeConnectionEvent { } } +export class KittySequenceOverflowEvent { + 'event.name': 'kitty_sequence_overflow'; + 'event.timestamp': string; // ISO 8601 + sequence_length: number; + truncated_sequence: string; + constructor(sequence_length: number, truncated_sequence: string) { + this['event.name'] = 'kitty_sequence_overflow'; + this['event.timestamp'] = new Date().toISOString(); + this.sequence_length = sequence_length; + // Truncate to first 20 chars for logging (avoid logging sensitive data) + this.truncated_sequence = truncated_sequence.substring(0, 20); + } +} + export type TelemetryEvent = | StartSessionEvent | EndSessionEvent @@ -342,6 +377,7 @@ export type TelemetryEvent = | FlashFallbackEvent | LoopDetectedEvent | NextSpeakerCheckEvent - | SlashCommandEvent + | KittySequenceOverflowEvent | MalformedJsonResponseEvent - | IdeConnectionEvent; + | IdeConnectionEvent + | SlashCommandEvent; diff --git a/packages/core/src/test-utils/config.ts b/packages/core/src/test-utils/config.ts new file mode 100644 index 00000000..08faf8c3 --- /dev/null +++ b/packages/core/src/test-utils/config.ts @@ -0,0 +1,36 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Config, ConfigParameters } from '../config/config.js'; + +/** + * Default parameters used for {@link FAKE_CONFIG} + */ +export const DEFAULT_CONFIG_PARAMETERS: ConfigParameters = { + usageStatisticsEnabled: true, + debugMode: false, + sessionId: 'test-session-id', + proxy: undefined, + model: 'gemini-9001-super-duper', + targetDir: '/', + cwd: '/', +}; + +/** + * Produces a config. Default paramters are set to + * {@link DEFAULT_CONFIG_PARAMETERS}, optionally, fields can be specified to + * override those defaults. + */ +export function makeFakeConfig( + config: Partial = { + ...DEFAULT_CONFIG_PARAMETERS, + }, +): Config { + return new Config({ + ...DEFAULT_CONFIG_PARAMETERS, + ...config, + }); +} diff --git a/packages/core/src/test-utils/tools.ts b/packages/core/src/test-utils/tools.ts index b168db9c..da642212 100644 --- a/packages/core/src/test-utils/tools.ts +++ b/packages/core/src/test-utils/tools.ts @@ -7,9 +7,9 @@ import { vi } from 'vitest'; import { BaseTool, - Icon, ToolCallConfirmationDetails, ToolResult, + Kind, } from '../tools/tools.js'; import { Schema, Type } from '@google/genai'; @@ -29,7 +29,7 @@ export class MockTool extends BaseTool<{ [key: string]: unknown }, ToolResult> { properties: { param: { type: Type.STRING } }, }, ) { - super(name, displayName ?? name, description, Icon.Hammer, params); + super(name, displayName ?? name, description, Kind.Other, params); } async execute( @@ -45,7 +45,7 @@ export class MockTool extends BaseTool<{ [key: string]: unknown }, ToolResult> { ); } - async shouldConfirmExecute( + override async shouldConfirmExecute( _params: { [key: string]: unknown }, _abortSignal: AbortSignal, ): Promise { diff --git a/packages/core/src/tools/edit.test.ts b/packages/core/src/tools/edit.test.ts index 3e0dba61..b2e31fdd 100644 --- a/packages/core/src/tools/edit.test.ts +++ b/packages/core/src/tools/edit.test.ts @@ -62,7 +62,6 @@ describe('EditTool', () => { getWorkspaceContext: () => createMockWorkspaceContext(rootDir), getIdeClient: () => undefined, getIdeMode: () => false, - getIdeModeFeature: () => false, // getGeminiConfig: () => ({ apiKey: 'test-api-key' }), // This was not a real Config method // Add other properties/methods of Config if EditTool uses them // Minimal other methods to satisfy Config type if needed by EditTool constructor or other direct uses: @@ -810,7 +809,6 @@ describe('EditTool', () => { }), }; (mockConfig as any).getIdeMode = () => true; - (mockConfig as any).getIdeModeFeature = () => true; (mockConfig as any).getIdeClient = () => ideClient; }); diff --git a/packages/core/src/tools/edit.ts b/packages/core/src/tools/edit.ts index 86641300..8d90dfe4 100644 --- a/packages/core/src/tools/edit.ts +++ b/packages/core/src/tools/edit.ts @@ -9,7 +9,7 @@ import * as path from 'path'; import * as Diff from 'diff'; import { BaseDeclarativeTool, - Icon, + Kind, ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolEditConfirmationDetails, @@ -250,7 +250,6 @@ class EditToolInvocation implements ToolInvocation { ); const ideClient = this.config.getIdeClient(); const ideConfirmation = - this.config.getIdeModeFeature() && this.config.getIdeMode() && ideClient?.getConnectionStatus().status === IDEConnectionStatus.Connected ? ideClient.openDiff(this.params.file_path, editData.newContent) @@ -436,7 +435,7 @@ Expectation for required parameters: 4. NEVER escape \`old_string\` or \`new_string\`, that would break the exact literal text requirement. **Important:** If ANY of the above are not satisfied, the tool will fail. CRITICAL for \`old_string\`: Must uniquely identify the single instance to change. Include at least 3 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations, or does not match exactly, the tool will fail. **Multiple replacements:** Set \`expected_replacements\` to the number of occurrences you want to replace. The tool will replace ALL occurrences that match \`old_string\` exactly. Ensure the number of replacements matches your expectation.`, - Icon.Pencil, + Kind.Edit, { properties: { file_path: { @@ -472,7 +471,7 @@ Expectation for required parameters: * @param params Parameters to validate * @returns Error message string or null if valid */ - validateToolParams(params: EditToolParams): string | null { + override validateToolParams(params: EditToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/glob.ts b/packages/core/src/tools/glob.ts index eaedc20f..65454232 100644 --- a/packages/core/src/tools/glob.ts +++ b/packages/core/src/tools/glob.ts @@ -11,7 +11,7 @@ import { SchemaValidator } from '../utils/schemaValidator.js'; import { BaseDeclarativeTool, BaseToolInvocation, - Icon, + Kind, ToolInvocation, ToolResult, } from './tools.js'; @@ -248,7 +248,7 @@ export class GlobTool extends BaseDeclarativeTool { GlobTool.Name, 'FindFiles', 'Efficiently finds files matching specific glob patterns (e.g., `src/**/*.ts`, `**/*.md`), returning absolute paths sorted by modification time (newest first). Ideal for quickly locating files based on their name or path structure, especially in large codebases.', - Icon.FileSearch, + Kind.Search, { properties: { pattern: { @@ -281,7 +281,7 @@ export class GlobTool extends BaseDeclarativeTool { /** * Validates the parameters for the tool. */ - validateToolParams(params: GlobToolParams): string | null { + override validateToolParams(params: GlobToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/grep.ts b/packages/core/src/tools/grep.ts index 41e77c0f..e5b834d4 100644 --- a/packages/core/src/tools/grep.ts +++ b/packages/core/src/tools/grep.ts @@ -13,7 +13,7 @@ import { globStream } from 'glob'; import { BaseDeclarativeTool, BaseToolInvocation, - Icon, + Kind, ToolInvocation, ToolResult, } from './tools.js'; @@ -549,7 +549,7 @@ export class GrepTool extends BaseDeclarativeTool { GrepTool.Name, 'SearchText', 'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.', - Icon.Regex, + Kind.Search, { properties: { pattern: { @@ -620,7 +620,7 @@ export class GrepTool extends BaseDeclarativeTool { * @param params Parameters to validate * @returns An error message string if invalid, null otherwise */ - validateToolParams(params: GrepToolParams): string | null { + override validateToolParams(params: GrepToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/ls.test.ts b/packages/core/src/tools/ls.test.ts index fb99d829..2fbeb37a 100644 --- a/packages/core/src/tools/ls.test.ts +++ b/packages/core/src/tools/ls.test.ts @@ -74,9 +74,11 @@ describe('LSTool', () => { const params = { path: '/home/user/project/src', }; - - const error = lsTool.validateToolParams(params); - expect(error).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + expect(invocation).toBeDefined(); }); it('should reject relative paths', () => { @@ -84,8 +86,9 @@ describe('LSTool', () => { path: './src', }; - const error = lsTool.validateToolParams(params); - expect(error).toBe('Path must be absolute: ./src'); + expect(() => lsTool.build(params)).toThrow( + 'Path must be absolute: ./src', + ); }); it('should reject paths outside workspace with clear error message', () => { @@ -93,8 +96,7 @@ describe('LSTool', () => { path: '/etc/passwd', }; - const error = lsTool.validateToolParams(params); - expect(error).toBe( + expect(() => lsTool.build(params)).toThrow( 'Path must be within one of the workspace directories: /home/user/project, /home/user/other-project', ); }); @@ -103,9 +105,11 @@ describe('LSTool', () => { const params = { path: '/home/user/other-project/lib', }; - - const error = lsTool.validateToolParams(params); - expect(error).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + expect(invocation).toBeDefined(); }); }); @@ -133,10 +137,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('[DIR] subdir'); expect(result.llmContent).toContain('file1.ts'); @@ -161,10 +163,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('module1.js'); expect(result.llmContent).toContain('module2.js'); @@ -179,10 +179,8 @@ describe('LSTool', () => { } as fs.Stats); vi.mocked(fs.readdirSync).mockReturnValue([]); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toBe( 'Directory /home/user/project/empty is empty.', @@ -207,10 +205,11 @@ describe('LSTool', () => { }); vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath, ignore: ['*.spec.js'] }, - new AbortController().signal, - ); + const invocation = lsTool.build({ + path: testPath, + ignore: ['*.spec.js'], + }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('test.js'); expect(result.llmContent).toContain('index.js'); @@ -238,10 +237,8 @@ describe('LSTool', () => { (path: string) => path.includes('ignored.js'), ); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('file1.js'); expect(result.llmContent).toContain('file2.js'); @@ -269,10 +266,8 @@ describe('LSTool', () => { (path: string) => path.includes('private.js'), ); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('file1.js'); expect(result.llmContent).toContain('file2.js'); @@ -287,10 +282,8 @@ describe('LSTool', () => { isDirectory: () => false, } as fs.Stats); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('Path is not a directory'); expect(result.returnDisplay).toBe('Error: Path is not a directory.'); @@ -303,10 +296,8 @@ describe('LSTool', () => { throw new Error('ENOENT: no such file or directory'); }); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('Error listing directory'); expect(result.returnDisplay).toBe('Error: Failed to list directory.'); @@ -336,10 +327,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); const lines = ( typeof result.llmContent === 'string' ? result.llmContent : '' @@ -361,24 +350,18 @@ describe('LSTool', () => { throw new Error('EACCES: permission denied'); }); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('Error listing directory'); expect(result.llmContent).toContain('permission denied'); expect(result.returnDisplay).toBe('Error: Failed to list directory.'); }); - it('should validate parameters and return error for invalid params', async () => { - const result = await lsTool.execute( - { path: '../outside' }, - new AbortController().signal, + it('should throw for invalid params at build time', async () => { + expect(() => lsTool.build({ path: '../outside' })).toThrow( + 'Path must be absolute: ../outside', ); - - expect(result.llmContent).toContain('Invalid parameters provided'); - expect(result.returnDisplay).toBe('Error: Failed to execute tool.'); }); it('should handle errors accessing individual files during listing', async () => { @@ -406,10 +389,8 @@ describe('LSTool', () => { .spyOn(console, 'error') .mockImplementation(() => {}); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); // Should still list the accessible file expect(result.llmContent).toContain('accessible.ts'); @@ -428,19 +409,25 @@ describe('LSTool', () => { describe('getDescription', () => { it('should return shortened relative path', () => { const params = { - path: path.join(mockPrimaryDir, 'deeply', 'nested', 'directory'), + path: `${mockPrimaryDir}/deeply/nested/directory`, }; - - const description = lsTool.getDescription(params); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + const description = invocation.getDescription(); expect(description).toBe(path.join('deeply', 'nested', 'directory')); }); it('should handle paths in secondary workspace', () => { const params = { - path: path.join(mockSecondaryDir, 'lib'), + path: `${mockSecondaryDir}/lib`, }; - - const description = lsTool.getDescription(params); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + const description = invocation.getDescription(); expect(description).toBe(path.join('..', 'other-project', 'lib')); }); }); @@ -448,22 +435,25 @@ describe('LSTool', () => { describe('workspace boundary validation', () => { it('should accept paths in primary workspace directory', () => { const params = { path: `${mockPrimaryDir}/src` }; - expect(lsTool.validateToolParams(params)).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + expect(lsTool.build(params)).toBeDefined(); }); it('should accept paths in secondary workspace directory', () => { const params = { path: `${mockSecondaryDir}/lib` }; - expect(lsTool.validateToolParams(params)).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + expect(lsTool.build(params)).toBeDefined(); }); it('should reject paths outside all workspace directories', () => { const params = { path: '/etc/passwd' }; - const error = lsTool.validateToolParams(params); - expect(error).toContain( + expect(() => lsTool.build(params)).toThrow( 'Path must be within one of the workspace directories', ); - expect(error).toContain(mockPrimaryDir); - expect(error).toContain(mockSecondaryDir); }); it('should list files from secondary workspace directory', async () => { @@ -483,10 +473,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('test1.spec.ts'); expect(result.llmContent).toContain('test2.spec.ts'); diff --git a/packages/core/src/tools/ls.ts b/packages/core/src/tools/ls.ts index 79820246..918c0b2b 100644 --- a/packages/core/src/tools/ls.ts +++ b/packages/core/src/tools/ls.ts @@ -6,7 +6,13 @@ import fs from 'fs'; import path from 'path'; -import { BaseTool, Icon, ToolResult } from './tools.js'; +import { + BaseDeclarativeTool, + BaseToolInvocation, + Kind, + ToolInvocation, + ToolResult, +} from './tools.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { makeRelative, shortenPath } from '../utils/paths.js'; import { Config, DEFAULT_FILE_FILTERING_OPTIONS } from '../config/config.js'; @@ -64,79 +70,12 @@ export interface FileEntry { modifiedTime: Date; } -/** - * Implementation of the LS tool logic - */ -export class LSTool extends BaseTool { - static readonly Name = 'list_directory'; - - constructor(private config: Config) { - super( - LSTool.Name, - 'ReadFolder', - 'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.', - Icon.Folder, - { - properties: { - path: { - description: - 'The absolute path to the directory to list (must be absolute, not relative)', - type: 'string', - }, - ignore: { - description: 'List of glob patterns to ignore', - items: { - type: 'string', - }, - type: 'array', - }, - file_filtering_options: { - description: - 'Optional: Whether to respect ignore patterns from .gitignore or .geminiignore', - type: 'object', - properties: { - respect_git_ignore: { - description: - 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', - type: 'boolean', - }, - respect_gemini_ignore: { - description: - 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', - type: 'boolean', - }, - }, - }, - }, - required: ['path'], - type: 'object', - }, - ); - } - - /** - * Validates the parameters for the tool - * @param params Parameters to validate - * @returns An error message string if invalid, null otherwise - */ - validateToolParams(params: LSToolParams): string | null { - const errors = SchemaValidator.validate( - this.schema.parametersJsonSchema, - params, - ); - if (errors) { - return errors; - } - if (!path.isAbsolute(params.path)) { - return `Path must be absolute: ${params.path}`; - } - - const workspaceContext = this.config.getWorkspaceContext(); - if (!workspaceContext.isPathWithinWorkspace(params.path)) { - const directories = workspaceContext.getDirectories(); - return `Path must be within one of the workspace directories: ${directories.join(', ')}`; - } - return null; +class LSToolInvocation extends BaseToolInvocation { + constructor( + private readonly config: Config, + params: LSToolParams, + ) { + super(params); } /** @@ -165,11 +104,13 @@ export class LSTool extends BaseTool { /** * Gets a description of the file reading operation - * @param params Parameters for the file reading * @returns A string describing the file being read */ - getDescription(params: LSToolParams): string { - const relativePath = makeRelative(params.path, this.config.getTargetDir()); + getDescription(): string { + const relativePath = makeRelative( + this.params.path, + this.config.getTargetDir(), + ); return shortenPath(relativePath); } @@ -184,49 +125,37 @@ export class LSTool extends BaseTool { /** * Executes the LS operation with the given parameters - * @param params Parameters for the LS operation * @returns Result of the LS operation */ - async execute( - params: LSToolParams, - _signal: AbortSignal, - ): Promise { - const validationError = this.validateToolParams(params); - if (validationError) { - return this.errorResult( - `Error: Invalid parameters provided. Reason: ${validationError}`, - `Failed to execute tool.`, - ); - } - + async execute(_signal: AbortSignal): Promise { try { - const stats = fs.statSync(params.path); + const stats = fs.statSync(this.params.path); if (!stats) { // fs.statSync throws on non-existence, so this check might be redundant // but keeping for clarity. Error message adjusted. return this.errorResult( - `Error: Directory not found or inaccessible: ${params.path}`, + `Error: Directory not found or inaccessible: ${this.params.path}`, `Directory not found or inaccessible.`, ); } if (!stats.isDirectory()) { return this.errorResult( - `Error: Path is not a directory: ${params.path}`, + `Error: Path is not a directory: ${this.params.path}`, `Path is not a directory.`, ); } - const files = fs.readdirSync(params.path); + const files = fs.readdirSync(this.params.path); const defaultFileIgnores = this.config.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; const fileFilteringOptions = { respectGitIgnore: - params.file_filtering_options?.respect_git_ignore ?? + this.params.file_filtering_options?.respect_git_ignore ?? defaultFileIgnores.respectGitIgnore, respectGeminiIgnore: - params.file_filtering_options?.respect_gemini_ignore ?? + this.params.file_filtering_options?.respect_gemini_ignore ?? defaultFileIgnores.respectGeminiIgnore, }; @@ -241,17 +170,17 @@ export class LSTool extends BaseTool { if (files.length === 0) { // Changed error message to be more neutral for LLM return { - llmContent: `Directory ${params.path} is empty.`, + llmContent: `Directory ${this.params.path} is empty.`, returnDisplay: `Directory is empty.`, }; } for (const file of files) { - if (this.shouldIgnore(file, params.ignore)) { + if (this.shouldIgnore(file, this.params.ignore)) { continue; } - const fullPath = path.join(params.path, file); + const fullPath = path.join(this.params.path, file); const relativePath = path.relative( this.config.getTargetDir(), fullPath, @@ -301,7 +230,7 @@ export class LSTool extends BaseTool { .map((entry) => `${entry.isDirectory ? '[DIR] ' : ''}${entry.name}`) .join('\n'); - let resultMessage = `Directory listing for ${params.path}:\n${directoryContent}`; + let resultMessage = `Directory listing for ${this.params.path}:\n${directoryContent}`; const ignoredMessages = []; if (gitIgnoredCount > 0) { ignoredMessages.push(`${gitIgnoredCount} git-ignored`); @@ -329,3 +258,87 @@ export class LSTool extends BaseTool { } } } + +/** + * Implementation of the LS tool logic + */ +export class LSTool extends BaseDeclarativeTool { + static readonly Name = 'list_directory'; + + constructor(private config: Config) { + super( + LSTool.Name, + 'ReadFolder', + 'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.', + Kind.Search, + { + properties: { + path: { + description: + 'The absolute path to the directory to list (must be absolute, not relative)', + type: 'string', + }, + ignore: { + description: 'List of glob patterns to ignore', + items: { + type: 'string', + }, + type: 'array', + }, + file_filtering_options: { + description: + 'Optional: Whether to respect ignore patterns from .gitignore or .geminiignore', + type: 'object', + properties: { + respect_git_ignore: { + description: + 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', + type: 'boolean', + }, + respect_gemini_ignore: { + description: + 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', + type: 'boolean', + }, + }, + }, + }, + required: ['path'], + type: 'object', + }, + ); + } + + /** + * Validates the parameters for the tool + * @param params Parameters to validate + * @returns An error message string if invalid, null otherwise + */ + override validateToolParams(params: LSToolParams): string | null { + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + if (!path.isAbsolute(params.path)) { + return `Path must be absolute: ${params.path}`; + } + + const workspaceContext = this.config.getWorkspaceContext(); + if (!workspaceContext.isPathWithinWorkspace(params.path)) { + const directories = workspaceContext.getDirectories(); + return `Path must be within one of the workspace directories: ${directories.join( + ', ', + )}`; + } + return null; + } + + protected createInvocation( + params: LSToolParams, + ): ToolInvocation { + return new LSToolInvocation(this.config, params); + } +} diff --git a/packages/core/src/tools/mcp-tool.test.ts b/packages/core/src/tools/mcp-tool.test.ts index f8a9a8ba..36602d49 100644 --- a/packages/core/src/tools/mcp-tool.test.ts +++ b/packages/core/src/tools/mcp-tool.test.ts @@ -73,11 +73,21 @@ describe('DiscoveredMCPTool', () => { required: ['param'], }; + let tool: DiscoveredMCPTool; + beforeEach(() => { mockCallTool.mockClear(); mockToolMethod.mockClear(); + tool = new DiscoveredMCPTool( + mockCallableToolInstance, + serverName, + serverToolName, + baseDescription, + inputSchema, + ); // Clear allowlist before each relevant test, especially for shouldConfirmExecute - (DiscoveredMCPTool as any).allowlist.clear(); + const invocation = tool.build({}) as any; + invocation.constructor.allowlist.clear(); }); afterEach(() => { @@ -86,14 +96,6 @@ describe('DiscoveredMCPTool', () => { describe('constructor', () => { it('should set properties correctly', () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - expect(tool.name).toBe(serverToolName); expect(tool.schema.name).toBe(serverToolName); expect(tool.schema.description).toBe(baseDescription); @@ -105,7 +107,7 @@ describe('DiscoveredMCPTool', () => { it('should accept and store a custom timeout', () => { const customTimeout = 5000; - const tool = new DiscoveredMCPTool( + const toolWithTimeout = new DiscoveredMCPTool( mockCallableToolInstance, serverName, serverToolName, @@ -113,19 +115,12 @@ describe('DiscoveredMCPTool', () => { inputSchema, customTimeout, ); - expect(tool.timeout).toBe(customTimeout); + expect(toolWithTimeout.timeout).toBe(customTimeout); }); }); describe('execute', () => { it('should call mcpTool.callTool with correct parameters and format display output', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { param: 'testValue' }; const mockToolSuccessResultObject = { success: true, @@ -147,7 +142,10 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(mockMcpToolResponseParts); - const toolResult: ToolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult: ToolResult = await invocation.execute( + new AbortController().signal, + ); expect(mockCallTool).toHaveBeenCalledWith([ { name: serverToolName, args: params }, @@ -163,17 +161,13 @@ describe('DiscoveredMCPTool', () => { }); it('should handle empty result from getStringifiedResultForDisplay', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { param: 'testValue' }; const mockMcpToolResponsePartsEmpty: Part[] = []; mockCallTool.mockResolvedValue(mockMcpToolResponsePartsEmpty); - const toolResult: ToolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult: ToolResult = await invocation.execute( + new AbortController().signal, + ); expect(toolResult.returnDisplay).toBe('```json\n[]\n```'); expect(toolResult.llmContent).toEqual([ { text: '[Error: Could not parse tool response]' }, @@ -181,28 +175,17 @@ describe('DiscoveredMCPTool', () => { }); it('should propagate rejection if mcpTool.callTool rejects', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { param: 'failCase' }; const expectedError = new Error('MCP call failed'); mockCallTool.mockRejectedValue(expectedError); - await expect(tool.execute(params)).rejects.toThrow(expectedError); + const invocation = tool.build(params); + await expect( + invocation.execute(new AbortController().signal), + ).rejects.toThrow(expectedError); }); it('should handle a simple text response correctly', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { query: 'test' }; const successMessage = 'This is a success message.'; @@ -221,7 +204,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); // 1. Assert that the llmContent sent to the scheduler is a clean Part array. expect(toolResult.llmContent).toEqual([{ text: successMessage }]); @@ -236,13 +220,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle an AudioBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'play' }; const sdkResponse: Part[] = [ { @@ -262,7 +239,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { @@ -279,13 +257,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle a ResourceLinkBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { resource: 'get' }; const sdkResponse: Part[] = [ { @@ -306,7 +277,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { @@ -319,13 +291,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle an embedded text ResourceBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { resource: 'get' }; const sdkResponse: Part[] = [ { @@ -348,7 +313,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { text: 'This is the text content.' }, @@ -357,13 +323,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle an embedded binary ResourceBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { resource: 'get' }; const sdkResponse: Part[] = [ { @@ -386,7 +345,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { @@ -405,13 +365,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle a mix of content block types', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'complex' }; const sdkResponse: Part[] = [ { @@ -433,7 +386,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { text: 'First part.' }, @@ -454,13 +408,6 @@ describe('DiscoveredMCPTool', () => { }); it('should ignore unknown content block types', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'test' }; const sdkResponse: Part[] = [ { @@ -477,7 +424,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([{ text: 'Valid part.' }]); expect(toolResult.returnDisplay).toBe( @@ -486,13 +434,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle a complex mix of content block types', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'super-complex' }; const sdkResponse: Part[] = [ { @@ -527,7 +468,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { text: 'Here is a resource.' }, @@ -552,10 +494,8 @@ describe('DiscoveredMCPTool', () => { }); describe('shouldConfirmExecute', () => { - // beforeEach is already clearing allowlist - it('should return false if trust is true', async () => { - const tool = new DiscoveredMCPTool( + const trustedTool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, serverToolName, @@ -564,50 +504,32 @@ describe('DiscoveredMCPTool', () => { undefined, true, ); + const invocation = trustedTool.build({}); expect( - await tool.shouldConfirmExecute({}, new AbortController().signal), + await invocation.shouldConfirmExecute(new AbortController().signal), ).toBe(false); }); it('should return false if server is allowlisted', async () => { - (DiscoveredMCPTool as any).allowlist.add(serverName); - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); + const invocation = tool.build({}) as any; + invocation.constructor.allowlist.add(serverName); expect( - await tool.shouldConfirmExecute({}, new AbortController().signal), + await invocation.shouldConfirmExecute(new AbortController().signal), ).toBe(false); }); it('should return false if tool is allowlisted', async () => { const toolAllowlistKey = `${serverName}.${serverToolName}`; - (DiscoveredMCPTool as any).allowlist.add(toolAllowlistKey); - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); + const invocation = tool.build({}) as any; + invocation.constructor.allowlist.add(toolAllowlistKey); expect( - await tool.shouldConfirmExecute({}, new AbortController().signal), + await invocation.shouldConfirmExecute(new AbortController().signal), ).toBe(false); }); it('should return confirmation details if not trusted and not allowlisted', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}); + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -629,15 +551,8 @@ describe('DiscoveredMCPTool', () => { }); it('should add server to allowlist on ProceedAlwaysServer', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -650,7 +565,7 @@ describe('DiscoveredMCPTool', () => { await confirmation.onConfirm( ToolConfirmationOutcome.ProceedAlwaysServer, ); - expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe(true); + expect(invocation.constructor.allowlist.has(serverName)).toBe(true); } else { throw new Error( 'Confirmation details or onConfirm not in expected format', @@ -659,16 +574,9 @@ describe('DiscoveredMCPTool', () => { }); it('should add tool to allowlist on ProceedAlwaysTool', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const toolAllowlistKey = `${serverName}.${serverToolName}`; - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -679,7 +587,7 @@ describe('DiscoveredMCPTool', () => { typeof confirmation.onConfirm === 'function' ) { await confirmation.onConfirm(ToolConfirmationOutcome.ProceedAlwaysTool); - expect((DiscoveredMCPTool as any).allowlist.has(toolAllowlistKey)).toBe( + expect(invocation.constructor.allowlist.has(toolAllowlistKey)).toBe( true, ); } else { @@ -690,15 +598,8 @@ describe('DiscoveredMCPTool', () => { }); it('should handle Cancel confirmation outcome', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -710,11 +611,9 @@ describe('DiscoveredMCPTool', () => { ) { // Cancel should not add anything to allowlist await confirmation.onConfirm(ToolConfirmationOutcome.Cancel); - expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe( - false, - ); + expect(invocation.constructor.allowlist.has(serverName)).toBe(false); expect( - (DiscoveredMCPTool as any).allowlist.has( + invocation.constructor.allowlist.has( `${serverName}.${serverToolName}`, ), ).toBe(false); @@ -726,15 +625,8 @@ describe('DiscoveredMCPTool', () => { }); it('should handle ProceedOnce confirmation outcome', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -746,11 +638,9 @@ describe('DiscoveredMCPTool', () => { ) { // ProceedOnce should not add anything to allowlist await confirmation.onConfirm(ToolConfirmationOutcome.ProceedOnce); - expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe( - false, - ); + expect(invocation.constructor.allowlist.has(serverName)).toBe(false); expect( - (DiscoveredMCPTool as any).allowlist.has( + invocation.constructor.allowlist.has( `${serverName}.${serverToolName}`, ), ).toBe(false); diff --git a/packages/core/src/tools/mcp-tool.ts b/packages/core/src/tools/mcp-tool.ts index 4b9a9818..fbb104fd 100644 --- a/packages/core/src/tools/mcp-tool.ts +++ b/packages/core/src/tools/mcp-tool.ts @@ -5,14 +5,16 @@ */ import { - BaseTool, - ToolResult, + BaseDeclarativeTool, + BaseToolInvocation, + Kind, ToolCallConfirmationDetails, ToolConfirmationOutcome, + ToolInvocation, ToolMcpConfirmationDetails, - Icon, + ToolResult, } from './tools.js'; -import { CallableTool, Part, FunctionCall } from '@google/genai'; +import { CallableTool, FunctionCall, Part } from '@google/genai'; type ToolParams = Record; @@ -50,15 +52,90 @@ type McpContentBlock = | McpResourceBlock | McpResourceLinkBlock; -export class DiscoveredMCPTool extends BaseTool { +class DiscoveredMCPToolInvocation extends BaseToolInvocation< + ToolParams, + ToolResult +> { private static readonly allowlist: Set = new Set(); + constructor( + private readonly mcpTool: CallableTool, + readonly serverName: string, + readonly serverToolName: string, + readonly displayName: string, + readonly timeout?: number, + readonly trust?: boolean, + params: ToolParams = {}, + ) { + super(params); + } + + override async shouldConfirmExecute( + _abortSignal: AbortSignal, + ): Promise { + const serverAllowListKey = this.serverName; + const toolAllowListKey = `${this.serverName}.${this.serverToolName}`; + + if (this.trust) { + return false; // server is trusted, no confirmation needed + } + + if ( + DiscoveredMCPToolInvocation.allowlist.has(serverAllowListKey) || + DiscoveredMCPToolInvocation.allowlist.has(toolAllowListKey) + ) { + return false; // server and/or tool already allowlisted + } + + const confirmationDetails: ToolMcpConfirmationDetails = { + type: 'mcp', + title: 'Confirm MCP Tool Execution', + serverName: this.serverName, + toolName: this.serverToolName, // Display original tool name in confirmation + toolDisplayName: this.displayName, // Display global registry name exposed to model and user + onConfirm: async (outcome: ToolConfirmationOutcome) => { + if (outcome === ToolConfirmationOutcome.ProceedAlwaysServer) { + DiscoveredMCPToolInvocation.allowlist.add(serverAllowListKey); + } else if (outcome === ToolConfirmationOutcome.ProceedAlwaysTool) { + DiscoveredMCPToolInvocation.allowlist.add(toolAllowListKey); + } + }, + }; + return confirmationDetails; + } + + async execute(): Promise { + const functionCalls: FunctionCall[] = [ + { + name: this.serverToolName, + args: this.params, + }, + ]; + + const rawResponseParts = await this.mcpTool.callTool(functionCalls); + const transformedParts = transformMcpContentToParts(rawResponseParts); + + return { + llmContent: transformedParts, + returnDisplay: getStringifiedResultForDisplay(rawResponseParts), + }; + } + + getDescription(): string { + return this.displayName; + } +} + +export class DiscoveredMCPTool extends BaseDeclarativeTool< + ToolParams, + ToolResult +> { constructor( private readonly mcpTool: CallableTool, readonly serverName: string, readonly serverToolName: string, description: string, - readonly parameterSchema: unknown, + override readonly parameterSchema: unknown, readonly timeout?: number, readonly trust?: boolean, nameOverride?: string, @@ -67,7 +144,7 @@ export class DiscoveredMCPTool extends BaseTool { nameOverride ?? generateValidName(serverToolName), `${serverToolName} (${serverName} MCP Server)`, description, - Icon.Hammer, + Kind.Other, parameterSchema, true, // isOutputMarkdown false, // canUpdateOutput @@ -87,56 +164,18 @@ export class DiscoveredMCPTool extends BaseTool { ); } - async shouldConfirmExecute( - _params: ToolParams, - _abortSignal: AbortSignal, - ): Promise { - const serverAllowListKey = this.serverName; - const toolAllowListKey = `${this.serverName}.${this.serverToolName}`; - - if (this.trust) { - return false; // server is trusted, no confirmation needed - } - - if ( - DiscoveredMCPTool.allowlist.has(serverAllowListKey) || - DiscoveredMCPTool.allowlist.has(toolAllowListKey) - ) { - return false; // server and/or tool already allowlisted - } - - const confirmationDetails: ToolMcpConfirmationDetails = { - type: 'mcp', - title: 'Confirm MCP Tool Execution', - serverName: this.serverName, - toolName: this.serverToolName, // Display original tool name in confirmation - toolDisplayName: this.name, // Display global registry name exposed to model and user - onConfirm: async (outcome: ToolConfirmationOutcome) => { - if (outcome === ToolConfirmationOutcome.ProceedAlwaysServer) { - DiscoveredMCPTool.allowlist.add(serverAllowListKey); - } else if (outcome === ToolConfirmationOutcome.ProceedAlwaysTool) { - DiscoveredMCPTool.allowlist.add(toolAllowListKey); - } - }, - }; - return confirmationDetails; - } - - async execute(params: ToolParams): Promise { - const functionCalls: FunctionCall[] = [ - { - name: this.serverToolName, - args: params, - }, - ]; - - const rawResponseParts = await this.mcpTool.callTool(functionCalls); - const transformedParts = transformMcpContentToParts(rawResponseParts); - - return { - llmContent: transformedParts, - returnDisplay: getStringifiedResultForDisplay(rawResponseParts), - }; + protected createInvocation( + params: ToolParams, + ): ToolInvocation { + return new DiscoveredMCPToolInvocation( + this.mcpTool, + this.serverName, + this.serverToolName, + this.displayName, + this.timeout, + this.trust, + params, + ); } } diff --git a/packages/core/src/tools/memoryTool.test.ts b/packages/core/src/tools/memoryTool.test.ts index b01471f7..7eede859 100644 --- a/packages/core/src/tools/memoryTool.test.ts +++ b/packages/core/src/tools/memoryTool.test.ts @@ -202,9 +202,11 @@ describe('MemoryTool', () => { expect(memoryTool.schema.parametersJsonSchema).toBeDefined(); }); - it('should call performAddMemoryEntry with correct parameters and return success', async () => { + it('should call performAddMemoryEntry with correct parameters and return success for global scope', async () => { const params = { fact: 'The sky is blue', scope: 'global' as const }; - const result = await memoryTool.execute(params, mockAbortSignal); + const invocation = memoryTool.build(params); + const result = await invocation.execute(mockAbortSignal); + // Use getCurrentGeminiMdFilename for the default expectation before any setGeminiMdFilename calls in a test const expectedFilePath = path.join( os.homedir(), @@ -231,16 +233,44 @@ describe('MemoryTool', () => { expect(result.returnDisplay).toBe(successMessage); }); + it('should call performAddMemoryEntry with correct parameters and return success for project scope', async () => { + const params = { fact: 'The sky is blue', scope: 'project' as const }; + const invocation = memoryTool.build(params); + const result = await invocation.execute(mockAbortSignal); + + // For project scope, expect the file to be in current working directory + const expectedFilePath = path.join( + process.cwd(), + getCurrentGeminiMdFilename(), + ); + + // For this test, we expect the actual fs methods to be passed + const expectedFsArgument = { + readFile: fs.readFile, + writeFile: fs.writeFile, + mkdir: fs.mkdir, + }; + + expect(performAddMemoryEntrySpy).toHaveBeenCalledWith( + params.fact, + expectedFilePath, + expectedFsArgument, + ); + const successMessage = `Okay, I've remembered that in project memory: "${params.fact}"`; + expect(result.llmContent).toBe( + JSON.stringify({ success: true, message: successMessage }), + ); + expect(result.returnDisplay).toBe(successMessage); + }); + it('should return an error if fact is empty', async () => { const params = { fact: ' ' }; // Empty fact - const result = await memoryTool.execute(params, mockAbortSignal); - const errorMessage = 'Parameter "fact" must be a non-empty string.'; - - expect(performAddMemoryEntrySpy).not.toHaveBeenCalled(); - expect(result.llmContent).toBe( - JSON.stringify({ success: false, error: errorMessage }), + expect(memoryTool.validateToolParams(params)).toBe( + 'Parameter "fact" must be a non-empty string.', + ); + expect(() => memoryTool.build(params)).toThrow( + 'Parameter "fact" must be a non-empty string.', ); - expect(result.returnDisplay).toBe(`Error: ${errorMessage}`); }); it('should handle errors from performAddMemoryEntry', async () => { @@ -250,7 +280,8 @@ describe('MemoryTool', () => { ); performAddMemoryEntrySpy.mockRejectedValue(underlyingError); - const result = await memoryTool.execute(params, mockAbortSignal); + const invocation = memoryTool.build(params); + const result = await invocation.execute(mockAbortSignal); expect(result.llmContent).toBe( JSON.stringify({ @@ -262,6 +293,18 @@ describe('MemoryTool', () => { `Error saving memory: ${underlyingError.message}`, ); }); + + it('should return error when executing without scope parameter', async () => { + const params = { fact: 'Test fact' }; + const invocation = memoryTool.build(params); + const result = await invocation.execute(mockAbortSignal); + + expect(result.llmContent).toContain( + 'Please specify where to save this memory', + ); + expect(result.returnDisplay).toContain('Global:'); + expect(result.returnDisplay).toContain('Project:'); + }); }); describe('shouldConfirmExecute', () => { @@ -269,18 +312,14 @@ describe('MemoryTool', () => { beforeEach(() => { memoryTool = new MemoryTool(); - // Clear the allowlist before each test - (MemoryTool as unknown as { allowlist: Set }).allowlist.clear(); // Mock fs.readFile to return empty string (file doesn't exist) vi.mocked(fs.readFile).mockResolvedValue(''); }); - it('should return confirmation details when memory file is not allowlisted', async () => { + it('should return confirmation details when memory file is not allowlisted for global scope', async () => { const params = { fact: 'Test fact', scope: 'global' as const }; - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -301,7 +340,30 @@ describe('MemoryTool', () => { } }); - it('should return false when memory file is already allowlisted', async () => { + it('should return confirmation details when memory file is not allowlisted for project scope', async () => { + const params = { fact: 'Test fact', scope: 'project' as const }; + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); + + expect(result).toBeDefined(); + expect(result).not.toBe(false); + + if (result && result.type === 'edit') { + const expectedPath = path.join(process.cwd(), 'QWEN.md'); + expect(result.title).toBe( + `Confirm Memory Save: ${expectedPath} (project)`, + ); + expect(result.fileName).toBe(expectedPath); + expect(result.fileDiff).toContain('Index: QWEN.md'); + expect(result.fileDiff).toContain('+## Qwen Added Memories'); + expect(result.fileDiff).toContain('+- Test fact'); + expect(result.originalContent).toBe(''); + expect(result.newContent).toContain('## Qwen Added Memories'); + expect(result.newContent).toContain('- Test fact'); + } + }); + + it('should return false when memory file is already allowlisted for global scope', async () => { const params = { fact: 'Test fact', scope: 'global' as const }; const memoryFilePath = path.join( os.homedir(), @@ -309,20 +371,36 @@ describe('MemoryTool', () => { getCurrentGeminiMdFilename(), ); - // Add the memory file to the allowlist with the new key format - (MemoryTool as unknown as { allowlist: Set }).allowlist.add( - `${memoryFilePath}_global`, - ); + const invocation = memoryTool.build(params); + // Add the memory file to the allowlist with the scope-specific key format + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (invocation.constructor as any).allowlist.add(`${memoryFilePath}_global`); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBe(false); }); - it('should add memory file to allowlist when ProceedAlways is confirmed', async () => { + it('should return false when memory file is already allowlisted for project scope', async () => { + const params = { fact: 'Test fact', scope: 'project' as const }; + const memoryFilePath = path.join( + process.cwd(), + getCurrentGeminiMdFilename(), + ); + + const invocation = memoryTool.build(params); + // Add the memory file to the allowlist with the scope-specific key format + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (invocation.constructor as any).allowlist.add( + `${memoryFilePath}_project`, + ); + + const result = await invocation.shouldConfirmExecute(mockAbortSignal); + + expect(result).toBe(false); + }); + + it('should add memory file to allowlist when ProceedAlways is confirmed for global scope', async () => { const params = { fact: 'Test fact', scope: 'global' as const }; const memoryFilePath = path.join( os.homedir(), @@ -330,10 +408,8 @@ describe('MemoryTool', () => { getCurrentGeminiMdFilename(), ); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -342,27 +418,53 @@ describe('MemoryTool', () => { // Simulate the onConfirm callback await result.onConfirm(ToolConfirmationOutcome.ProceedAlways); - // Check that the memory file was added to the allowlist with the new key format + // Check that the memory file was added to the allowlist with the scope-specific key format expect( - (MemoryTool as unknown as { allowlist: Set }).allowlist.has( + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (invocation.constructor as any).allowlist.has( `${memoryFilePath}_global`, ), ).toBe(true); } }); + it('should add memory file to allowlist when ProceedAlways is confirmed for project scope', async () => { + const params = { fact: 'Test fact', scope: 'project' as const }; + const memoryFilePath = path.join( + process.cwd(), + getCurrentGeminiMdFilename(), + ); + + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); + + expect(result).toBeDefined(); + expect(result).not.toBe(false); + + if (result && result.type === 'edit') { + // Simulate the onConfirm callback + await result.onConfirm(ToolConfirmationOutcome.ProceedAlways); + + // Check that the memory file was added to the allowlist with the scope-specific key format + expect( + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (invocation.constructor as any).allowlist.has( + `${memoryFilePath}_project`, + ), + ).toBe(true); + } + }); + it('should not add memory file to allowlist when other outcomes are confirmed', async () => { - const params = { fact: 'Test fact' }; + const params = { fact: 'Test fact', scope: 'global' as const }; const memoryFilePath = path.join( os.homedir(), '.qwen', getCurrentGeminiMdFilename(), ); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -370,22 +472,16 @@ describe('MemoryTool', () => { if (result && result.type === 'edit') { // Simulate the onConfirm callback with different outcomes await result.onConfirm(ToolConfirmationOutcome.ProceedOnce); - expect( - (MemoryTool as unknown as { allowlist: Set }).allowlist.has( - memoryFilePath, - ), - ).toBe(false); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const allowlist = (invocation.constructor as any).allowlist; + expect(allowlist.has(`${memoryFilePath}_global`)).toBe(false); await result.onConfirm(ToolConfirmationOutcome.Cancel); - expect( - (MemoryTool as unknown as { allowlist: Set }).allowlist.has( - memoryFilePath, - ), - ).toBe(false); + expect(allowlist.has(`${memoryFilePath}_global`)).toBe(false); } }); - it('should handle existing memory file with content', async () => { + it('should handle existing memory file with content for global scope', async () => { const params = { fact: 'New fact', scope: 'global' as const }; const existingContent = 'Some existing content.\n\n## Qwen Added Memories\n- Old fact\n'; @@ -393,10 +489,8 @@ describe('MemoryTool', () => { // Mock fs.readFile to return existing content vi.mocked(fs.readFile).mockResolvedValue(existingContent); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -416,10 +510,8 @@ describe('MemoryTool', () => { it('should prompt for scope selection when scope is not specified', async () => { const params = { fact: 'Test fact' }; - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -435,15 +527,58 @@ describe('MemoryTool', () => { } }); - it('should return error when executing without scope parameter', async () => { + it('should show correct file paths in scope selection prompt', async () => { const params = { fact: 'Test fact' }; - const result = await memoryTool.execute(params, mockAbortSignal); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); - expect(result.llmContent).toContain( - 'Please specify where to save this memory', - ); - expect(result.returnDisplay).toContain('Global:'); - expect(result.returnDisplay).toContain('Project:'); + expect(result).toBeDefined(); + expect(result).not.toBe(false); + + if (result && result.type === 'edit') { + const globalPath = path.join('~', '.qwen', 'QWEN.md'); + const projectPath = path.join(process.cwd(), 'QWEN.md'); + + expect(result.fileDiff).toContain(`Global: ${globalPath}`); + expect(result.fileDiff).toContain(`Project: ${projectPath}`); + expect(result.fileDiff).toContain('(shared across all projects)'); + expect(result.fileDiff).toContain('(current project only)'); + } + }); + }); + + describe('getDescription', () => { + let memoryTool: MemoryTool; + + beforeEach(() => { + memoryTool = new MemoryTool(); + }); + + it('should return correct description for global scope', () => { + const params = { fact: 'Test fact', scope: 'global' as const }; + const invocation = memoryTool.build(params); + const description = invocation.getDescription(); + + const expectedPath = path.join('~', '.qwen', 'QWEN.md'); + expect(description).toBe(`${expectedPath} (global)`); + }); + + it('should return correct description for project scope', () => { + const params = { fact: 'Test fact', scope: 'project' as const }; + const invocation = memoryTool.build(params); + const description = invocation.getDescription(); + + const expectedPath = path.join(process.cwd(), 'QWEN.md'); + expect(description).toBe(`${expectedPath} (project)`); + }); + + it('should default to global scope when scope is not specified', () => { + const params = { fact: 'Test fact' }; + const invocation = memoryTool.build(params); + const description = invocation.getDescription(); + + const expectedPath = path.join('~', '.qwen', 'QWEN.md'); + expect(description).toBe(`${expectedPath} (global)`); }); }); }); diff --git a/packages/core/src/tools/memoryTool.ts b/packages/core/src/tools/memoryTool.ts index 4b8fe065..492d2330 100644 --- a/packages/core/src/tools/memoryTool.ts +++ b/packages/core/src/tools/memoryTool.ts @@ -5,11 +5,12 @@ */ import { - BaseTool, + BaseDeclarativeTool, + BaseToolInvocation, + Kind, ToolResult, ToolEditConfirmationDetails, ToolConfirmationOutcome, - Icon, } from './tools.js'; import { FunctionDeclaration } from '@google/genai'; import * as fs from 'fs/promises'; @@ -19,6 +20,7 @@ import * as Diff from 'diff'; import { DEFAULT_DIFF_OPTIONS } from './diffOptions.js'; import { tildeifyPath } from '../utils/paths.js'; import { ModifiableDeclarativeTool, ModifyContext } from './modifiable-tool.js'; +import { SchemaValidator } from '../utils/schemaValidator.js'; const memoryToolSchemaData: FunctionDeclaration = { name: 'save_memory', @@ -131,94 +133,82 @@ function ensureNewlineSeparation(currentContent: string): string { return '\n\n'; } -export class MemoryTool - extends BaseTool - implements ModifiableDeclarativeTool -{ - private static readonly allowlist: Set = new Set(); +/** + * Reads the current content of the memory file + */ +async function readMemoryFileContent( + scope: 'global' | 'project' = 'global', +): Promise { + try { + return await fs.readFile(getMemoryFilePath(scope), 'utf-8'); + } catch (err) { + const error = err as Error & { code?: string }; + if (!(error instanceof Error) || error.code !== 'ENOENT') throw err; + return ''; + } +} - static readonly Name: string = memoryToolSchemaData.name!; - constructor() { - super( - MemoryTool.Name, - 'Save Memory', - memoryToolDescription, - Icon.LightBulb, - memoryToolSchemaData.parametersJsonSchema as Record, +/** + * Computes the new content that would result from adding a memory entry + */ +function computeNewContent(currentContent: string, fact: string): string { + let processedText = fact.trim(); + processedText = processedText.replace(/^(-+\s*)+/, '').trim(); + const newMemoryItem = `- ${processedText}`; + + const headerIndex = currentContent.indexOf(MEMORY_SECTION_HEADER); + + if (headerIndex === -1) { + // Header not found, append header and then the entry + const separator = ensureNewlineSeparation(currentContent); + return ( + currentContent + + `${separator}${MEMORY_SECTION_HEADER}\n${newMemoryItem}\n` + ); + } else { + // Header found, find where to insert the new memory entry + const startOfSectionContent = headerIndex + MEMORY_SECTION_HEADER.length; + let endOfSectionIndex = currentContent.indexOf( + '\n## ', + startOfSectionContent, + ); + if (endOfSectionIndex === -1) { + endOfSectionIndex = currentContent.length; // End of file + } + + const beforeSectionMarker = currentContent + .substring(0, startOfSectionContent) + .trimEnd(); + let sectionContent = currentContent + .substring(startOfSectionContent, endOfSectionIndex) + .trimEnd(); + const afterSectionMarker = currentContent.substring(endOfSectionIndex); + + sectionContent += `\n${newMemoryItem}`; + return ( + `${beforeSectionMarker}\n${sectionContent.trimStart()}\n${afterSectionMarker}`.trimEnd() + + '\n' ); } +} - getDescription(params: SaveMemoryParams): string { - const scope = params.scope || 'global'; +class MemoryToolInvocation extends BaseToolInvocation< + SaveMemoryParams, + ToolResult +> { + private static readonly allowlist: Set = new Set(); + + getDescription(): string { + const scope = this.params.scope || 'global'; const memoryFilePath = getMemoryFilePath(scope); return `in ${tildeifyPath(memoryFilePath)} (${scope})`; } - /** - * Reads the current content of the memory file - */ - private async readMemoryFileContent( - scope: 'global' | 'project' = 'global', - ): Promise { - try { - return await fs.readFile(getMemoryFilePath(scope), 'utf-8'); - } catch (err) { - const error = err as Error & { code?: string }; - if (!(error instanceof Error) || error.code !== 'ENOENT') throw err; - return ''; - } - } - - /** - * Computes the new content that would result from adding a memory entry - */ - private computeNewContent(currentContent: string, fact: string): string { - let processedText = fact.trim(); - processedText = processedText.replace(/^(-+\s*)+/, '').trim(); - const newMemoryItem = `- ${processedText}`; - - const headerIndex = currentContent.indexOf(MEMORY_SECTION_HEADER); - - if (headerIndex === -1) { - // Header not found, append header and then the entry - const separator = ensureNewlineSeparation(currentContent); - return ( - currentContent + - `${separator}${MEMORY_SECTION_HEADER}\n${newMemoryItem}\n` - ); - } else { - // Header found, find where to insert the new memory entry - const startOfSectionContent = headerIndex + MEMORY_SECTION_HEADER.length; - let endOfSectionIndex = currentContent.indexOf( - '\n## ', - startOfSectionContent, - ); - if (endOfSectionIndex === -1) { - endOfSectionIndex = currentContent.length; // End of file - } - - const beforeSectionMarker = currentContent - .substring(0, startOfSectionContent) - .trimEnd(); - let sectionContent = currentContent - .substring(startOfSectionContent, endOfSectionIndex) - .trimEnd(); - const afterSectionMarker = currentContent.substring(endOfSectionIndex); - - sectionContent += `\n${newMemoryItem}`; - return ( - `${beforeSectionMarker}\n${sectionContent.trimStart()}\n${afterSectionMarker}`.trimEnd() + - '\n' - ); - } - } - - async shouldConfirmExecute( - params: SaveMemoryParams, + override async shouldConfirmExecute( _abortSignal: AbortSignal, ): Promise { // If scope is not specified, prompt the user to choose - if (!params.scope) { + if (!this.params.scope) { const globalPath = tildeifyPath(getMemoryFilePath('global')); const projectPath = tildeifyPath(getMemoryFilePath('project')); @@ -227,9 +217,9 @@ export class MemoryTool title: `Choose Memory Storage Location`, fileName: 'Memory Storage Options', filePath: '', - fileDiff: `Choose where to save this memory:\n\n"${params.fact}"\n\nOptions:\n- Global: ${globalPath} (shared across all projects)\n- Project: ${projectPath} (current project only)\n\nPlease specify the scope parameter: "global" or "project"`, + fileDiff: `Choose where to save this memory:\n\n"${this.params.fact}"\n\nOptions:\n- Global: ${globalPath} (shared across all projects)\n- Project: ${projectPath} (current project only)\n\nPlease specify the scope parameter: "global" or "project"`, originalContent: '', - newContent: `Memory to save: ${params.fact}\n\nScope options:\n- global: ${globalPath}\n- project: ${projectPath}`, + newContent: `Memory to save: ${this.params.fact}\n\nScope options:\n- global: ${globalPath}\n- project: ${projectPath}`, onConfirm: async (_outcome: ToolConfirmationOutcome) => { // This will be handled by the execution flow }, @@ -237,19 +227,19 @@ export class MemoryTool return confirmationDetails; } - const scope = params.scope; + const scope = this.params.scope; const memoryFilePath = getMemoryFilePath(scope); const allowlistKey = `${memoryFilePath}_${scope}`; - if (MemoryTool.allowlist.has(allowlistKey)) { + if (MemoryToolInvocation.allowlist.has(allowlistKey)) { return false; } // Read current content of the memory file - const currentContent = await this.readMemoryFileContent(scope); + const currentContent = await readMemoryFileContent(scope); // Calculate the new content that will be written to the memory file - const newContent = this.computeNewContent(currentContent, params.fact); + const newContent = computeNewContent(currentContent, this.params.fact); const fileName = path.basename(memoryFilePath); const fileDiff = Diff.createPatch( @@ -271,13 +261,120 @@ export class MemoryTool newContent, onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { - MemoryTool.allowlist.add(allowlistKey); + MemoryToolInvocation.allowlist.add(allowlistKey); } }, }; return confirmationDetails; } + async execute(_signal: AbortSignal): Promise { + const { fact, modified_by_user, modified_content } = this.params; + + if (!fact || typeof fact !== 'string' || fact.trim() === '') { + const errorMessage = 'Parameter "fact" must be a non-empty string.'; + return { + llmContent: JSON.stringify({ success: false, error: errorMessage }), + returnDisplay: `Error: ${errorMessage}`, + }; + } + + // If scope is not specified, prompt the user to choose + if (!this.params.scope) { + const errorMessage = + 'Please specify where to save this memory. Use scope parameter: "global" for user-level (~/.qwen/QWEN.md) or "project" for current project (./QWEN.md).'; + return { + llmContent: JSON.stringify({ success: false, error: errorMessage }), + returnDisplay: `${errorMessage}\n\nGlobal: ${tildeifyPath(getMemoryFilePath('global'))}\nProject: ${tildeifyPath(getMemoryFilePath('project'))}`, + }; + } + + const scope = this.params.scope; + const memoryFilePath = getMemoryFilePath(scope); + + try { + if (modified_by_user && modified_content !== undefined) { + // User modified the content in external editor, write it directly + await fs.mkdir(path.dirname(memoryFilePath), { + recursive: true, + }); + await fs.writeFile(memoryFilePath, modified_content, 'utf-8'); + const successMessage = `Okay, I've updated the ${scope} memory file with your modifications.`; + return { + llmContent: JSON.stringify({ + success: true, + message: successMessage, + }), + returnDisplay: successMessage, + }; + } else { + // Use the normal memory entry logic + await MemoryTool.performAddMemoryEntry(fact, memoryFilePath, { + readFile: fs.readFile, + writeFile: fs.writeFile, + mkdir: fs.mkdir, + }); + const successMessage = `Okay, I've remembered that in ${scope} memory: "${fact}"`; + return { + llmContent: JSON.stringify({ + success: true, + message: successMessage, + }), + returnDisplay: successMessage, + }; + } + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + console.error( + `[MemoryTool] Error executing save_memory for fact "${fact}" in ${scope}: ${errorMessage}`, + ); + return { + llmContent: JSON.stringify({ + success: false, + error: `Failed to save memory. Detail: ${errorMessage}`, + }), + returnDisplay: `Error saving memory: ${errorMessage}`, + }; + } + } +} + +export class MemoryTool + extends BaseDeclarativeTool + implements ModifiableDeclarativeTool +{ + static readonly Name: string = memoryToolSchemaData.name!; + constructor() { + super( + MemoryTool.Name, + 'Save Memory', + memoryToolDescription, + Kind.Think, + memoryToolSchemaData.parametersJsonSchema as Record, + ); + } + + override validateToolParams(params: SaveMemoryParams): string | null { + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + + if (params.fact.trim() === '') { + return 'Parameter "fact" must be a non-empty string.'; + } + + return null; + } + + protected createInvocation(params: SaveMemoryParams) { + return new MemoryToolInvocation(params); + } + static async performAddMemoryEntry( text: string, memoryFilePath: string, @@ -348,90 +445,16 @@ export class MemoryTool } } - async execute( - params: SaveMemoryParams, - _signal: AbortSignal, - ): Promise { - const { fact, modified_by_user, modified_content } = params; - - if (!fact || typeof fact !== 'string' || fact.trim() === '') { - const errorMessage = 'Parameter "fact" must be a non-empty string.'; - return { - llmContent: JSON.stringify({ success: false, error: errorMessage }), - returnDisplay: `Error: ${errorMessage}`, - }; - } - - // If scope is not specified, prompt the user to choose - if (!params.scope) { - const errorMessage = - 'Please specify where to save this memory. Use scope parameter: "global" for user-level (~/.qwen/QWEN.md) or "project" for current project (./QWEN.md).'; - return { - llmContent: JSON.stringify({ success: false, error: errorMessage }), - returnDisplay: `${errorMessage}\n\nGlobal: ${tildeifyPath(getMemoryFilePath('global'))}\nProject: ${tildeifyPath(getMemoryFilePath('project'))}`, - }; - } - - const scope = params.scope; - const memoryFilePath = getMemoryFilePath(scope); - - try { - if (modified_by_user && modified_content !== undefined) { - // User modified the content in external editor, write it directly - await fs.mkdir(path.dirname(memoryFilePath), { - recursive: true, - }); - await fs.writeFile(memoryFilePath, modified_content, 'utf-8'); - const successMessage = `Okay, I've updated the ${scope} memory file with your modifications.`; - return { - llmContent: JSON.stringify({ - success: true, - message: successMessage, - }), - returnDisplay: successMessage, - }; - } else { - // Use the normal memory entry logic - await MemoryTool.performAddMemoryEntry(fact, memoryFilePath, { - readFile: fs.readFile, - writeFile: fs.writeFile, - mkdir: fs.mkdir, - }); - const successMessage = `Okay, I've remembered that in ${scope} memory: "${fact}"`; - return { - llmContent: JSON.stringify({ - success: true, - message: successMessage, - }), - returnDisplay: successMessage, - }; - } - } catch (error) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.error( - `[MemoryTool] Error executing save_memory for fact "${fact}" in ${scope}: ${errorMessage}`, - ); - return { - llmContent: JSON.stringify({ - success: false, - error: `Failed to save memory. Detail: ${errorMessage}`, - }), - returnDisplay: `Error saving memory: ${errorMessage}`, - }; - } - } - getModifyContext(_abortSignal: AbortSignal): ModifyContext { return { getFilePath: (params: SaveMemoryParams) => getMemoryFilePath(params.scope || 'global'), getCurrentContent: async (params: SaveMemoryParams): Promise => - this.readMemoryFileContent(params.scope || 'global'), + readMemoryFileContent(params.scope || 'global'), getProposedContent: async (params: SaveMemoryParams): Promise => { const scope = params.scope || 'global'; - const currentContent = await this.readMemoryFileContent(scope); - return this.computeNewContent(currentContent, params.fact); + const currentContent = await readMemoryFileContent(scope); + return computeNewContent(currentContent, params.fact); }, createUpdatedParams: ( _oldContent: string, diff --git a/packages/core/src/tools/read-file.ts b/packages/core/src/tools/read-file.ts index 0c040b66..f02db506 100644 --- a/packages/core/src/tools/read-file.ts +++ b/packages/core/src/tools/read-file.ts @@ -10,7 +10,7 @@ import { makeRelative, shortenPath } from '../utils/paths.js'; import { BaseDeclarativeTool, BaseToolInvocation, - Icon, + Kind, ToolInvocation, ToolLocation, ToolResult, @@ -173,7 +173,7 @@ export class ReadFileTool extends BaseDeclarativeTool< ReadFileTool.Name, 'ReadFile', `Reads and returns the content of a specified file. If the file is large, the content will be truncated. The tool's response will clearly indicate if truncation has occurred and will provide details on how to read more of the file using the 'offset' and 'limit' parameters. Handles text, images (PNG, JPG, GIF, WEBP, SVG, BMP), and PDF files. For text files, it can read specific line ranges.`, - Icon.FileSearch, + Kind.Read, { properties: { absolute_path: { @@ -198,7 +198,9 @@ export class ReadFileTool extends BaseDeclarativeTool< ); } - protected validateToolParams(params: ReadFileToolParams): string | null { + protected override validateToolParams( + params: ReadFileToolParams, + ): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/read-many-files.test.ts b/packages/core/src/tools/read-many-files.test.ts index c6b34665..af5012cd 100644 --- a/packages/core/src/tools/read-many-files.test.ts +++ b/packages/core/src/tools/read-many-files.test.ts @@ -121,66 +121,71 @@ describe('ReadManyFilesTool', () => { } }); - describe('validateParams', () => { - it('should return null for valid relative paths within root', () => { + describe('build', () => { + it('should return an invocation for valid relative paths within root', () => { const params = { paths: ['file1.txt', 'subdir/file2.txt'] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return null for valid glob patterns within root', () => { + it('should return an invocation for valid glob patterns within root', () => { const params = { paths: ['*.txt', 'subdir/**/*.js'] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return null for paths trying to escape the root (e.g., ../) as execute handles this', () => { + it('should return an invocation for paths trying to escape the root (e.g., ../) as execute handles this', () => { const params = { paths: ['../outside.txt'] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return null for absolute paths as execute handles this', () => { + it('should return an invocation for absolute paths as execute handles this', () => { const params = { paths: [path.join(tempDirOutsideRoot, 'absolute.txt')] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return error if paths array is empty', () => { + it('should throw error if paths array is empty', () => { const params = { paths: [] }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/paths must NOT have fewer than 1 items', ); }); - it('should return null for valid exclude and include patterns', () => { + it('should return an invocation for valid exclude and include patterns', () => { const params = { paths: ['src/**/*.ts'], exclude: ['**/*.test.ts'], include: ['src/utils/*.ts'], }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return error if paths array contains an empty string', () => { + it('should throw error if paths array contains an empty string', () => { const params = { paths: ['file1.txt', ''] }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/paths/1 must NOT have fewer than 1 characters', ); }); - it('should return error if include array contains non-string elements', () => { + it('should throw error if include array contains non-string elements', () => { const params = { paths: ['file1.txt'], include: ['*.ts', 123] as string[], }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/include/1 must be string', ); }); - it('should return error if exclude array contains non-string elements', () => { + it('should throw error if exclude array contains non-string elements', () => { const params = { paths: ['file1.txt'], exclude: ['*.log', {}] as string[], }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/exclude/1 must be string', ); }); @@ -201,7 +206,8 @@ describe('ReadManyFilesTool', () => { it('should read a single specified file', async () => { createFile('file1.txt', 'Content of file1'); const params = { paths: ['file1.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const expectedPath = path.join(tempRootDir, 'file1.txt'); expect(result.llmContent).toEqual([ `--- ${expectedPath} ---\n\nContent of file1\n\n`, @@ -215,7 +221,8 @@ describe('ReadManyFilesTool', () => { createFile('file1.txt', 'Content1'); createFile('subdir/file2.js', 'Content2'); const params = { paths: ['file1.txt', 'subdir/file2.js'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath1 = path.join(tempRootDir, 'file1.txt'); const expectedPath2 = path.join(tempRootDir, 'subdir/file2.js'); @@ -239,7 +246,8 @@ describe('ReadManyFilesTool', () => { createFile('another.txt', 'Another text'); createFile('sub/data.json', '{}'); const params = { paths: ['*.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath1 = path.join(tempRootDir, 'file.txt'); const expectedPath2 = path.join(tempRootDir, 'another.txt'); @@ -263,7 +271,8 @@ describe('ReadManyFilesTool', () => { createFile('src/main.ts', 'Main content'); createFile('src/main.test.ts', 'Test content'); const params = { paths: ['src/**/*.ts'], exclude: ['**/*.test.ts'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath = path.join(tempRootDir, 'src/main.ts'); expect(content).toEqual([`--- ${expectedPath} ---\n\nMain content\n\n`]); @@ -277,7 +286,8 @@ describe('ReadManyFilesTool', () => { it('should handle nonexistent specific files gracefully', async () => { const params = { paths: ['nonexistent-file.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ 'No files matching the criteria were found or all were skipped.', ]); @@ -290,7 +300,8 @@ describe('ReadManyFilesTool', () => { createFile('node_modules/some-lib/index.js', 'lib code'); createFile('src/app.js', 'app code'); const params = { paths: ['**/*.js'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath = path.join(tempRootDir, 'src/app.js'); expect(content).toEqual([`--- ${expectedPath} ---\n\napp code\n\n`]); @@ -306,7 +317,8 @@ describe('ReadManyFilesTool', () => { createFile('node_modules/some-lib/index.js', 'lib code'); createFile('src/app.js', 'app code'); const params = { paths: ['**/*.js'], useDefaultExcludes: false }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath1 = path.join( tempRootDir, @@ -334,7 +346,8 @@ describe('ReadManyFilesTool', () => { Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]), ); const params = { paths: ['*.png'] }; // Explicitly requesting .png - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -356,7 +369,8 @@ describe('ReadManyFilesTool', () => { Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]), ); const params = { paths: ['myExactImage.png'] }; // Explicitly requesting by full name - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -373,7 +387,8 @@ describe('ReadManyFilesTool', () => { createBinaryFile('document.pdf', Buffer.from('%PDF-1.4...')); createFile('notes.txt', 'text notes'); const params = { paths: ['*'] }; // Generic glob, not specific to .pdf - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath = path.join(tempRootDir, 'notes.txt'); expect( @@ -392,7 +407,8 @@ describe('ReadManyFilesTool', () => { it('should include PDF files as inlineData parts if explicitly requested by extension', async () => { createBinaryFile('important.pdf', Buffer.from('%PDF-1.4...')); const params = { paths: ['*.pdf'] }; // Explicitly requesting .pdf files - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -406,7 +422,8 @@ describe('ReadManyFilesTool', () => { it('should include PDF files as inlineData parts if explicitly requested by name', async () => { createBinaryFile('report-final.pdf', Buffer.from('%PDF-1.4...')); const params = { paths: ['report-final.pdf'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -422,7 +439,8 @@ describe('ReadManyFilesTool', () => { createFile('bar.ts', ''); createFile('foo.quux', ''); const params = { paths: ['foo.bar', 'bar.ts', 'foo.quux'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.returnDisplay).not.toContain('foo.bar'); expect(result.returnDisplay).not.toContain('foo.quux'); expect(result.returnDisplay).toContain('bar.ts'); @@ -451,7 +469,8 @@ describe('ReadManyFilesTool', () => { fs.writeFileSync(path.join(tempDir2, 'file2.txt'), 'Content2'); const params = { paths: ['*.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; if (!Array.isArray(content)) { throw new Error(`llmContent is not an array: ${content}`); @@ -486,7 +505,8 @@ describe('ReadManyFilesTool', () => { createFile('large-file.txt', longContent); const params = { paths: ['*.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const normalFileContent = content.find((c) => c.includes('file1.txt')); @@ -541,7 +561,8 @@ describe('ReadManyFilesTool', () => { }); const params = { paths: files }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); // Verify all files were processed const content = result.llmContent as string[]; @@ -569,7 +590,8 @@ describe('ReadManyFilesTool', () => { ], }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; // Should successfully process valid files despite one failure @@ -606,7 +628,8 @@ describe('ReadManyFilesTool', () => { return 'text'; }); - await tool.execute({ paths: files }, new AbortController().signal); + const invocation = tool.build({ paths: files }); + await invocation.execute(new AbortController().signal); console.log('Execution order:', executionOrder); diff --git a/packages/core/src/tools/read-many-files.ts b/packages/core/src/tools/read-many-files.ts index 1c92b4f3..aaf524c4 100644 --- a/packages/core/src/tools/read-many-files.ts +++ b/packages/core/src/tools/read-many-files.ts @@ -4,7 +4,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { BaseTool, Icon, ToolResult } from './tools.js'; +import { + BaseDeclarativeTool, + BaseToolInvocation, + Kind, + ToolInvocation, + ToolResult, +} from './tools.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; import * as path from 'path'; @@ -138,120 +144,28 @@ const DEFAULT_EXCLUDES: string[] = [ const DEFAULT_OUTPUT_SEPARATOR_FORMAT = '--- {filePath} ---'; -/** - * Tool implementation for finding and reading multiple text files from the local filesystem - * within a specified target directory. The content is concatenated. - * It is intended to run in an environment with access to the local file system (e.g., a Node.js backend). - */ -export class ReadManyFilesTool extends BaseTool< +class ReadManyFilesToolInvocation extends BaseToolInvocation< ReadManyFilesParams, ToolResult > { - static readonly Name: string = 'read_many_files'; - - constructor(private config: Config) { - const parameterSchema = { - type: 'object', - properties: { - paths: { - type: 'array', - items: { - type: 'string', - minLength: 1, - }, - minItems: 1, - description: - "Required. An array of glob patterns or paths relative to the tool's target directory. Examples: ['src/**/*.ts'], ['README.md', 'docs/']", - }, - include: { - type: 'array', - items: { - type: 'string', - minLength: 1, - }, - description: - 'Optional. Additional glob patterns to include. These are merged with `paths`. Example: ["*.test.ts"] to specifically add test files if they were broadly excluded.', - default: [], - }, - exclude: { - type: 'array', - items: { - type: 'string', - minLength: 1, - }, - description: - 'Optional. Glob patterns for files/directories to exclude. Added to default excludes if useDefaultExcludes is true. Example: ["**/*.log", "temp/"]', - default: [], - }, - recursive: { - type: 'boolean', - description: - 'Optional. Whether to search recursively (primarily controlled by `**` in glob patterns). Defaults to true.', - default: true, - }, - useDefaultExcludes: { - type: 'boolean', - description: - 'Optional. Whether to apply a list of default exclusion patterns (e.g., node_modules, .git, binary files). Defaults to true.', - default: true, - }, - file_filtering_options: { - description: - 'Whether to respect ignore patterns from .gitignore or .geminiignore', - type: 'object', - properties: { - respect_git_ignore: { - description: - 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', - type: 'boolean', - }, - respect_gemini_ignore: { - description: - 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', - type: 'boolean', - }, - }, - }, - }, - required: ['paths'], - }; - - super( - ReadManyFilesTool.Name, - 'ReadManyFiles', - `Reads content from multiple files specified by paths or glob patterns within a configured target directory. For text files, it concatenates their content into a single string. It is primarily designed for text-based files. However, it can also process image (e.g., .png, .jpg) and PDF (.pdf) files if their file names or extensions are explicitly included in the 'paths' argument. For these explicitly requested non-text files, their data is read and included in a format suitable for model consumption (e.g., base64 encoded). - -This tool is useful when you need to understand or analyze a collection of files, such as: -- Getting an overview of a codebase or parts of it (e.g., all TypeScript files in the 'src' directory). -- Finding where specific functionality is implemented if the user asks broad questions about code. -- Reviewing documentation files (e.g., all Markdown files in the 'docs' directory). -- Gathering context from multiple configuration files. -- When the user asks to "read all files in X directory" or "show me the content of all Y files". - -Use this tool when the user's query implies needing the content of several files simultaneously for context, analysis, or summarization. For text files, it uses default UTF-8 encoding and a '--- {filePath} ---' separator between file contents. Ensure paths are relative to the target directory. Glob patterns like 'src/**/*.js' are supported. Avoid using for single files if a more specific single-file reading tool is available, unless the user specifically requests to process a list containing just one file via this tool. Other binary files (not explicitly requested as image/PDF) are generally skipped. Default excludes apply to common non-text files (except for explicitly requested images/PDFs) and large dependency directories unless 'useDefaultExcludes' is false.`, - Icon.FileSearch, - parameterSchema, - ); + constructor( + private readonly config: Config, + params: ReadManyFilesParams, + ) { + super(params); } - validateParams(params: ReadManyFilesParams): string | null { - const errors = SchemaValidator.validate( - this.schema.parametersJsonSchema, - params, - ); - if (errors) { - return errors; - } - return null; - } - - getDescription(params: ReadManyFilesParams): string { - const allPatterns = [...params.paths, ...(params.include || [])]; - const pathDesc = `using patterns: \`${allPatterns.join('`, `')}\` (within target directory: \`${this.config.getTargetDir()}\`)`; + getDescription(): string { + const allPatterns = [...this.params.paths, ...(this.params.include || [])]; + const pathDesc = `using patterns: +${allPatterns.join('`, `')} + (within target directory: +${this.config.getTargetDir()} +) `; // Determine the final list of exclusion patterns exactly as in execute method - const paramExcludes = params.exclude || []; - const paramUseDefaultExcludes = params.useDefaultExcludes !== false; + const paramExcludes = this.params.exclude || []; + const paramUseDefaultExcludes = this.params.useDefaultExcludes !== false; const geminiIgnorePatterns = this.config .getFileService() .getGeminiIgnorePatterns(); @@ -260,7 +174,16 @@ Use this tool when the user's query implies needing the content of several files ? [...DEFAULT_EXCLUDES, ...paramExcludes, ...geminiIgnorePatterns] : [...paramExcludes, ...geminiIgnorePatterns]; - let excludeDesc = `Excluding: ${finalExclusionPatternsForDescription.length > 0 ? `patterns like \`${finalExclusionPatternsForDescription.slice(0, 2).join('`, `')}${finalExclusionPatternsForDescription.length > 2 ? '...`' : '`'}` : 'none specified'}`; + let excludeDesc = `Excluding: ${ + finalExclusionPatternsForDescription.length > 0 + ? `patterns like +${finalExclusionPatternsForDescription + .slice(0, 2) + .join( + '`, `', + )}${finalExclusionPatternsForDescription.length > 2 ? '...`' : '`'}` + : 'none specified' + }`; // Add a note if .geminiignore patterns contributed to the final list of exclusions if (geminiIgnorePatterns.length > 0) { @@ -272,37 +195,29 @@ Use this tool when the user's query implies needing the content of several files } } - return `Will attempt to read and concatenate files ${pathDesc}. ${excludeDesc}. File encoding: ${DEFAULT_ENCODING}. Separator: "${DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace('{filePath}', 'path/to/file.ext')}".`; + return `Will attempt to read and concatenate files ${pathDesc}. ${excludeDesc}. File encoding: ${DEFAULT_ENCODING}. Separator: "${DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace( + '{filePath}', + 'path/to/file.ext', + )}".`; } - async execute( - params: ReadManyFilesParams, - signal: AbortSignal, - ): Promise { - const validationError = this.validateParams(params); - if (validationError) { - return { - llmContent: `Error: Invalid parameters for ${this.displayName}. Reason: ${validationError}`, - returnDisplay: `## Parameter Error\n\n${validationError}`, - }; - } - + async execute(signal: AbortSignal): Promise { const { paths: inputPatterns, include = [], exclude = [], useDefaultExcludes = true, - } = params; + } = this.params; const defaultFileIgnores = this.config.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; const fileFilteringOptions = { respectGitIgnore: - params.file_filtering_options?.respect_git_ignore ?? + this.params.file_filtering_options?.respect_git_ignore ?? defaultFileIgnores.respectGitIgnore, // Use the property from the returned object respectGeminiIgnore: - params.file_filtering_options?.respect_gemini_ignore ?? + this.params.file_filtering_options?.respect_gemini_ignore ?? defaultFileIgnores.respectGeminiIgnore, // Use the property from the returned object }; // Get centralized file discovery service @@ -614,3 +529,119 @@ Use this tool when the user's query implies needing the content of several files }; } } + +/** + * Tool implementation for finding and reading multiple text files from the local filesystem + * within a specified target directory. The content is concatenated. + * It is intended to run in an environment with access to the local file system (e.g., a Node.js backend). + */ +export class ReadManyFilesTool extends BaseDeclarativeTool< + ReadManyFilesParams, + ToolResult +> { + static readonly Name: string = 'read_many_files'; + + constructor(private config: Config) { + const parameterSchema = { + type: 'object', + properties: { + paths: { + type: 'array', + items: { + type: 'string', + minLength: 1, + }, + minItems: 1, + description: + "Required. An array of glob patterns or paths relative to the tool's target directory. Examples: ['src/**/*.ts'], ['README.md', 'docs/']", + }, + include: { + type: 'array', + items: { + type: 'string', + minLength: 1, + }, + description: + 'Optional. Additional glob patterns to include. These are merged with `paths`. Example: "*.test.ts" to specifically add test files if they were broadly excluded.', + default: [], + }, + exclude: { + type: 'array', + items: { + type: 'string', + minLength: 1, + }, + description: + 'Optional. Glob patterns for files/directories to exclude. Added to default excludes if useDefaultExcludes is true. Example: "**/*.log", "temp/"', + default: [], + }, + recursive: { + type: 'boolean', + description: + 'Optional. Whether to search recursively (primarily controlled by `**` in glob patterns). Defaults to true.', + default: true, + }, + useDefaultExcludes: { + type: 'boolean', + description: + 'Optional. Whether to apply a list of default exclusion patterns (e.g., node_modules, .git, binary files). Defaults to true.', + default: true, + }, + file_filtering_options: { + description: + 'Whether to respect ignore patterns from .gitignore or .geminiignore', + type: 'object', + properties: { + respect_git_ignore: { + description: + 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', + type: 'boolean', + }, + respect_gemini_ignore: { + description: + 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', + type: 'boolean', + }, + }, + }, + }, + required: ['paths'], + }; + + super( + ReadManyFilesTool.Name, + 'ReadManyFiles', + `Reads content from multiple files specified by paths or glob patterns within a configured target directory. For text files, it concatenates their content into a single string. It is primarily designed for text-based files. However, it can also process image (e.g., .png, .jpg) and PDF (.pdf) files if their file names or extensions are explicitly included in the 'paths' argument. For these explicitly requested non-text files, their data is read and included in a format suitable for model consumption (e.g., base64 encoded). + +This tool is useful when you need to understand or analyze a collection of files, such as: +- Getting an overview of a codebase or parts of it (e.g., all TypeScript files in the 'src' directory). +- Finding where specific functionality is implemented if the user asks broad questions about code. +- Reviewing documentation files (e.g., all Markdown files in the 'docs' directory). +- Gathering context from multiple configuration files. +- When the user asks to "read all files in X directory" or "show me the content of all Y files". + +Use this tool when the user's query implies needing the content of several files simultaneously for context, analysis, or summarization. For text files, it uses default UTF-8 encoding and a '--- {filePath} ---' separator between file contents. Ensure paths are relative to the target directory. Glob patterns like 'src/**/*.js' are supported. Avoid using for single files if a more specific single-file reading tool is available, unless the user specifically requests to process a list containing just one file via this tool. Other binary files (not explicitly requested as image/PDF) are generally skipped. Default excludes apply to common non-text files (except for explicitly requested images/PDFs) and large dependency directories unless 'useDefaultExcludes' is false.`, + Kind.Read, + parameterSchema, + ); + } + + protected override validateToolParams( + params: ReadManyFilesParams, + ): string | null { + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + return null; + } + + protected createInvocation( + params: ReadManyFilesParams, + ): ToolInvocation { + return new ReadManyFilesToolInvocation(this.config, params); + } +} diff --git a/packages/core/src/tools/shell.test.ts b/packages/core/src/tools/shell.test.ts index de939494..34c6292a 100644 --- a/packages/core/src/tools/shell.test.ts +++ b/packages/core/src/tools/shell.test.ts @@ -25,7 +25,6 @@ vi.mock('../utils/summarizer.js'); import { isCommandAllowed } from '../utils/shell-utils.js'; import { ShellTool } from './shell.js'; -import { ToolErrorType } from './tool-error.js'; import { type Config } from '../config/config.js'; import { type ShellExecutionResult, @@ -98,22 +97,25 @@ describe('ShellTool', () => { }); }); - describe('validateToolParams', () => { - it('should return null for a valid command', () => { - expect(shellTool.validateToolParams({ command: 'ls -l' })).toBeNull(); + describe('build', () => { + it('should return an invocation for a valid command', () => { + const invocation = shellTool.build({ command: 'ls -l' }); + expect(invocation).toBeDefined(); }); - it('should return an error for an empty command', () => { - expect(shellTool.validateToolParams({ command: ' ' })).toBe( + it('should throw an error for an empty command', () => { + expect(() => shellTool.build({ command: ' ' })).toThrow( 'Command cannot be empty.', ); }); - it('should return an error for a non-existent directory', () => { + it('should throw an error for a non-existent directory', () => { vi.mocked(fs.existsSync).mockReturnValue(false); - expect( - shellTool.validateToolParams({ command: 'ls', directory: 'rel/path' }), - ).toBe("Directory 'rel/path' is not a registered workspace directory."); + expect(() => + shellTool.build({ command: 'ls', directory: 'rel/path' }), + ).toThrow( + "Directory 'rel/path' is not a registered workspace directory.", + ); }); }); @@ -139,10 +141,8 @@ describe('ShellTool', () => { }; it('should wrap command on linux and parse pgrep output', async () => { - const promise = shellTool.execute( - { command: 'my-command &' }, - mockAbortSignal, - ); + const invocation = shellTool.build({ command: 'my-command &' }); + const promise = invocation.execute(mockAbortSignal); resolveShellExecution({ pid: 54321 }); vi.mocked(fs.existsSync).mockReturnValue(true); @@ -164,8 +164,9 @@ describe('ShellTool', () => { it('should not wrap command on windows', async () => { vi.mocked(os.platform).mockReturnValue('win32'); - const promise = shellTool.execute({ command: 'dir' }, mockAbortSignal); - resolveExecutionPromise({ + const invocation = shellTool.build({ command: 'dir' }); + const promise = invocation.execute(mockAbortSignal); + resolveShellExecution({ rawOutput: Buffer.from(''), output: '', stdout: '', @@ -187,10 +188,8 @@ describe('ShellTool', () => { it('should format error messages correctly', async () => { const error = new Error('wrapped command failed'); - const promise = shellTool.execute( - { command: 'user-command' }, - mockAbortSignal, - ); + const invocation = shellTool.build({ command: 'user-command' }); + const promise = invocation.execute(mockAbortSignal); resolveShellExecution({ error, exitCode: 1, @@ -209,40 +208,19 @@ describe('ShellTool', () => { expect(result.llmContent).not.toContain('pgrep'); }); - it('should return error with error property for invalid parameters', async () => { - const result = await shellTool.execute( - { command: '' }, // Empty command is invalid - mockAbortSignal, + it('should throw an error for invalid parameters', () => { + expect(() => shellTool.build({ command: '' })).toThrow( + 'Command cannot be empty.', ); - - expect(result.llmContent).toContain( - 'Could not execute command due to invalid parameters:', - ); - expect(result.returnDisplay).toBe('Command cannot be empty.'); - expect(result.error).toEqual({ - message: 'Command cannot be empty.', - type: ToolErrorType.INVALID_TOOL_PARAMS, - }); }); - it('should return error with error property for invalid directory', async () => { + it('should throw an error for invalid directory', () => { vi.mocked(fs.existsSync).mockReturnValue(false); - const result = await shellTool.execute( - { command: 'ls', directory: 'nonexistent' }, - mockAbortSignal, + expect(() => + shellTool.build({ command: 'ls', directory: 'nonexistent' }), + ).toThrow( + `Directory 'nonexistent' is not a registered workspace directory.`, ); - - expect(result.llmContent).toContain( - 'Could not execute command due to invalid parameters:', - ); - expect(result.returnDisplay).toBe( - "Directory 'nonexistent' is not a registered workspace directory.", - ); - expect(result.error).toEqual({ - message: - "Directory 'nonexistent' is not a registered workspace directory.", - type: ToolErrorType.INVALID_TOOL_PARAMS, - }); }); it('should summarize output when configured', async () => { @@ -253,7 +231,8 @@ describe('ShellTool', () => { 'summarized output', ); - const promise = shellTool.execute({ command: 'ls' }, mockAbortSignal); + const invocation = shellTool.build({ command: 'ls' }); + const promise = invocation.execute(mockAbortSignal); resolveExecutionPromise({ output: 'long output', rawOutput: Buffer.from('long output'), @@ -285,9 +264,8 @@ describe('ShellTool', () => { }); vi.mocked(fs.existsSync).mockReturnValue(true); // Pretend the file exists - await expect( - shellTool.execute({ command: 'a-command' }, mockAbortSignal), - ).rejects.toThrow(error); + const invocation = shellTool.build({ command: 'a-command' }); + await expect(invocation.execute(mockAbortSignal)).rejects.toThrow(error); const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp'); expect(vi.mocked(fs.unlinkSync)).toHaveBeenCalledWith(tmpFile); @@ -304,11 +282,8 @@ describe('ShellTool', () => { }); it('should throttle text output updates', async () => { - const promise = shellTool.execute( - { command: 'stream' }, - mockAbortSignal, - updateOutputMock, - ); + const invocation = shellTool.build({ command: 'stream' }); + const promise = invocation.execute(mockAbortSignal, updateOutputMock); // First chunk, should be throttled. mockShellOutputCallback({ @@ -347,11 +322,8 @@ describe('ShellTool', () => { }); it('should immediately show binary detection message and throttle progress', async () => { - const promise = shellTool.execute( - { command: 'cat img' }, - mockAbortSignal, - updateOutputMock, - ); + const invocation = shellTool.build({ command: 'cat img' }); + const promise = invocation.execute(mockAbortSignal, updateOutputMock); mockShellOutputCallback({ type: 'binary_detected' }); expect(updateOutputMock).toHaveBeenCalledOnce(); @@ -399,8 +371,8 @@ describe('ShellTool', () => { describe('shouldConfirmExecute', () => { it('should request confirmation for a new command and whitelist it on "Always"', async () => { const params = { command: 'npm install' }; - const confirmation = await shellTool.shouldConfirmExecute( - params, + const invocation = shellTool.build(params); + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); @@ -413,19 +385,15 @@ describe('ShellTool', () => { ); // Should now be whitelisted - const secondConfirmation = await shellTool.shouldConfirmExecute( - { command: 'npm test' }, + const secondInvocation = shellTool.build({ command: 'npm test' }); + const secondConfirmation = await secondInvocation.shouldConfirmExecute( new AbortController().signal, ); expect(secondConfirmation).toBe(false); }); - it('should skip confirmation if validation fails', async () => { - const confirmation = await shellTool.shouldConfirmExecute( - { command: '' }, - new AbortController().signal, - ); - expect(confirmation).toBe(false); + it('should throw an error if validation fails', () => { + expect(() => shellTool.build({ command: '' })).toThrow(); }); }); @@ -581,8 +549,8 @@ describe('validateToolParams', () => { }); }); -describe('validateToolParams', () => { - it('should return null for valid directory', () => { +describe('build', () => { + it('should return an invocation for valid directory', () => { const config = { getCoreTools: () => undefined, getExcludeTools: () => undefined, @@ -591,14 +559,14 @@ describe('validateToolParams', () => { createMockWorkspaceContext('/root', ['/users/test']), } as unknown as Config; const shellTool = new ShellTool(config); - const result = shellTool.validateToolParams({ + const invocation = shellTool.build({ command: 'ls', directory: 'test', }); - expect(result).toBeNull(); + expect(invocation).toBeDefined(); }); - it('should return error for directory outside workspace', () => { + it('should throw an error for directory outside workspace', () => { const config = { getCoreTools: () => undefined, getExcludeTools: () => undefined, @@ -607,10 +575,11 @@ describe('validateToolParams', () => { createMockWorkspaceContext('/root', ['/users/test']), } as unknown as Config; const shellTool = new ShellTool(config); - const result = shellTool.validateToolParams({ - command: 'ls', - directory: 'test2', - }); - expect(result).toContain('is not a registered workspace directory'); + expect(() => + shellTool.build({ + command: 'ls', + directory: 'test2', + }), + ).toThrow('is not a registered workspace directory'); }); }); diff --git a/packages/core/src/tools/shell.ts b/packages/core/src/tools/shell.ts index 31ef5d48..eef7ea3e 100644 --- a/packages/core/src/tools/shell.ts +++ b/packages/core/src/tools/shell.ts @@ -10,14 +10,15 @@ import os from 'os'; import crypto from 'crypto'; import { Config } from '../config/config.js'; import { - BaseTool, + BaseDeclarativeTool, + BaseToolInvocation, + ToolInvocation, ToolResult, ToolCallConfirmationDetails, ToolExecuteConfirmationDetails, ToolConfirmationOutcome, - Icon, + Kind, } from './tools.js'; -import { ToolErrorType } from './tool-error.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; import { summarizeToolOutput } from '../utils/summarizer.js'; @@ -40,120 +41,36 @@ export interface ShellToolParams { directory?: string; } -export class ShellTool extends BaseTool { - static Name: string = 'run_shell_command'; - private allowlist: Set = new Set(); - - constructor(private readonly config: Config) { - super( - ShellTool.Name, - 'Shell', - `This tool executes a given shell command as \`bash -c \`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`. - - The following information is returned: - - Command: Executed command. - Directory: Directory (relative to project root) where command was executed, or \`(root)\`. - Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. - Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. - Error: Error or \`(none)\` if no error was reported for the subprocess. - Exit Code: Exit code or \`(none)\` if terminated by signal. - Signal: Signal number or \`(none)\` if no signal was received. - Background PIDs: List of background processes started or \`(none)\`. - Process Group PGID: Process group started or \`(none)\``, - Icon.Terminal, - { - type: 'object', - properties: { - command: { - type: 'string', - description: 'Exact bash command to execute as `bash -c `', - }, - description: { - type: 'string', - description: - 'Brief description of the command for the user. Be specific and concise. Ideally a single sentence. Can be up to 3 sentences for clarity. No line breaks.', - }, - directory: { - type: 'string', - description: - '(OPTIONAL) Directory to run the command in, if not the project root directory. Must be relative to the project root directory and must already exist.', - }, - }, - required: ['command'], - }, - false, // output is not markdown - true, // output can be updated - ); +class ShellToolInvocation extends BaseToolInvocation< + ShellToolParams, + ToolResult +> { + constructor( + private readonly config: Config, + params: ShellToolParams, + private readonly allowlist: Set, + ) { + super(params); } - getDescription(params: ShellToolParams): string { - let description = `${params.command}`; + getDescription(): string { + let description = `${this.params.command}`; // append optional [in directory] // note description is needed even if validation fails due to absolute path - if (params.directory) { - description += ` [in ${params.directory}]`; + if (this.params.directory) { + description += ` [in ${this.params.directory}]`; } // append optional (description), replacing any line breaks with spaces - if (params.description) { - description += ` (${params.description.replace(/\n/g, ' ')})`; + if (this.params.description) { + description += ` (${this.params.description.replace(/\n/g, ' ')})`; } return description; } - validateToolParams(params: ShellToolParams): string | null { - const commandCheck = isCommandAllowed(params.command, this.config); - if (!commandCheck.allowed) { - if (!commandCheck.reason) { - console.error( - 'Unexpected: isCommandAllowed returned false without a reason', - ); - return `Command is not allowed: ${params.command}`; - } - return commandCheck.reason; - } - const errors = SchemaValidator.validate( - this.schema.parametersJsonSchema, - params, - ); - if (errors) { - return errors; - } - if (!params.command.trim()) { - return 'Command cannot be empty.'; - } - if (getCommandRoots(params.command).length === 0) { - return 'Could not identify command root to obtain permission from user.'; - } - if (params.directory) { - if (path.isAbsolute(params.directory)) { - return 'Directory cannot be absolute. Please refer to workspace directories by their name.'; - } - const workspaceDirs = this.config.getWorkspaceContext().getDirectories(); - const matchingDirs = workspaceDirs.filter( - (dir) => path.basename(dir) === params.directory, - ); - - if (matchingDirs.length === 0) { - return `Directory '${params.directory}' is not a registered workspace directory.`; - } - - if (matchingDirs.length > 1) { - return `Directory name '${params.directory}' is ambiguous as it matches multiple workspace directories.`; - } - } - return null; - } - - async shouldConfirmExecute( - params: ShellToolParams, + override async shouldConfirmExecute( _abortSignal: AbortSignal, ): Promise { - if (this.validateToolParams(params)) { - return false; // skip confirmation, execute call will fail immediately - } - - const command = stripShellWrapper(params.command); + const command = stripShellWrapper(this.params.command); const rootCommands = [...new Set(getCommandRoots(command))]; const commandsToConfirm = rootCommands.filter( (command) => !this.allowlist.has(command), @@ -166,7 +83,7 @@ export class ShellTool extends BaseTool { const confirmationDetails: ToolExecuteConfirmationDetails = { type: 'exec', title: 'Confirm Shell Command', - command: params.command, + command: this.params.command, rootCommand: commandsToConfirm.join(', '), onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { @@ -178,25 +95,10 @@ export class ShellTool extends BaseTool { } async execute( - params: ShellToolParams, signal: AbortSignal, updateOutput?: (output: string) => void, ): Promise { - const strippedCommand = stripShellWrapper(params.command); - const validationError = this.validateToolParams({ - ...params, - command: strippedCommand, - }); - if (validationError) { - return { - llmContent: `Could not execute command due to invalid parameters: ${validationError}`, - returnDisplay: validationError, - error: { - message: validationError, - type: ToolErrorType.INVALID_TOOL_PARAMS, - }, - }; - } + const strippedCommand = stripShellWrapper(this.params.command); if (signal.aborted) { return { @@ -227,7 +129,7 @@ export class ShellTool extends BaseTool { const cwd = path.resolve( this.config.getTargetDir(), - params.directory || '', + this.params.directory || '', ); let cumulativeStdout = ''; @@ -327,12 +229,12 @@ export class ShellTool extends BaseTool { // Create a formatted error string for display, replacing the wrapper command // with the user-facing command. const finalError = result.error - ? result.error.message.replace(commandToExecute, params.command) + ? result.error.message.replace(commandToExecute, this.params.command) : '(none)'; llmContent = [ - `Command: ${params.command}`, - `Directory: ${params.directory || '(root)'}`, + `Command: ${this.params.command}`, + `Directory: ${this.params.directory || '(root)'}`, `Stdout: ${result.stdout || '(empty)'}`, `Stderr: ${result.stderr || '(empty)'}`, `Error: ${finalError}`, // Use the cleaned error string. @@ -369,12 +271,12 @@ export class ShellTool extends BaseTool { } const summarizeConfig = this.config.getSummarizeToolOutputConfig(); - if (summarizeConfig && summarizeConfig[this.name]) { + if (summarizeConfig && summarizeConfig[ShellTool.Name]) { const summary = await summarizeToolOutput( llmContent, this.config.getGeminiClient(), signal, - summarizeConfig[this.name].tokenBudget, + summarizeConfig[ShellTool.Name].tokenBudget, ); return { llmContent: summary, @@ -429,3 +331,106 @@ Co-authored-by: ${gitCoAuthorSettings.name} <${gitCoAuthorSettings.email}>`; return command; } } + +export class ShellTool extends BaseDeclarativeTool< + ShellToolParams, + ToolResult +> { + static Name: string = 'run_shell_command'; + private allowlist: Set = new Set(); + + constructor(private readonly config: Config) { + super( + ShellTool.Name, + 'Shell', + `This tool executes a given shell command as \`bash -c \`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`. + + The following information is returned: + + Command: Executed command. + Directory: Directory (relative to project root) where command was executed, or \`(root)\`. + Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. + Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. + Error: Error or \`(none)\` if no error was reported for the subprocess. + Exit Code: Exit code or \`(none)\` if terminated by signal. + Signal: Signal number or \`(none)\` if no signal was received. + Background PIDs: List of background processes started or \`(none)\`. + Process Group PGID: Process group started or \`(none)\``, + Kind.Execute, + { + type: 'object', + properties: { + command: { + type: 'string', + description: 'Exact bash command to execute as `bash -c `', + }, + description: { + type: 'string', + description: + 'Brief description of the command for the user. Be specific and concise. Ideally a single sentence. Can be up to 3 sentences for clarity. No line breaks.', + }, + directory: { + type: 'string', + description: + '(OPTIONAL) Directory to run the command in, if not the project root directory. Must be relative to the project root directory and must already exist.', + }, + }, + required: ['command'], + }, + false, // output is not markdown + true, // output can be updated + ); + } + + protected override validateToolParams( + params: ShellToolParams, + ): string | null { + const commandCheck = isCommandAllowed(params.command, this.config); + if (!commandCheck.allowed) { + if (!commandCheck.reason) { + console.error( + 'Unexpected: isCommandAllowed returned false without a reason', + ); + return `Command is not allowed: ${params.command}`; + } + return commandCheck.reason; + } + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + if (!params.command.trim()) { + return 'Command cannot be empty.'; + } + if (getCommandRoots(params.command).length === 0) { + return 'Could not identify command root to obtain permission from user.'; + } + if (params.directory) { + if (path.isAbsolute(params.directory)) { + return 'Directory cannot be absolute. Please refer to workspace directories by their name.'; + } + const workspaceDirs = this.config.getWorkspaceContext().getDirectories(); + const matchingDirs = workspaceDirs.filter( + (dir) => path.basename(dir) === params.directory, + ); + + if (matchingDirs.length === 0) { + return `Directory '${params.directory}' is not a registered workspace directory.`; + } + + if (matchingDirs.length > 1) { + return `Directory name '${params.directory}' is ambiguous as it matches multiple workspace directories.`; + } + } + return null; + } + + protected createInvocation( + params: ShellToolParams, + ): ToolInvocation { + return new ShellToolInvocation(this.config, params, this.allowlist); + } +} diff --git a/packages/core/src/tools/tool-registry.ts b/packages/core/src/tools/tool-registry.ts index 17d324b3..416ee99e 100644 --- a/packages/core/src/tools/tool-registry.ts +++ b/packages/core/src/tools/tool-registry.ts @@ -5,7 +5,7 @@ */ import { FunctionDeclaration } from '@google/genai'; -import { AnyDeclarativeTool, Icon, ToolResult, BaseTool } from './tools.js'; +import { AnyDeclarativeTool, Kind, ToolResult, BaseTool } from './tools.js'; import { Config } from '../config/config.js'; import { spawn } from 'node:child_process'; import { StringDecoder } from 'node:string_decoder'; @@ -19,8 +19,8 @@ export class DiscoveredTool extends BaseTool { constructor( private readonly config: Config, name: string, - readonly description: string, - readonly parameterSchema: Record, + override readonly description: string, + override readonly parameterSchema: Record, ) { const discoveryCmd = config.getToolDiscoveryCommand()!; const callCommand = config.getToolCallCommand()!; @@ -44,7 +44,7 @@ Signal: Signal number or \`(none)\` if no signal was received. name, name, description, - Icon.Hammer, + Kind.Other, parameterSchema, false, // isOutputMarkdown false, // canUpdateOutput @@ -158,6 +158,18 @@ export class ToolRegistry { } } + /** + * Removes all tools from a specific MCP server. + * @param serverName The name of the server to remove tools from. + */ + removeMcpToolsByServer(serverName: string): void { + for (const [name, tool] of this.tools.entries()) { + if (tool instanceof DiscoveredMCPTool && tool.serverName === serverName) { + this.tools.delete(name); + } + } + } + /** * Discovers tools from project (if available and configured). * Can be called multiple times to update discovered tools. diff --git a/packages/core/src/tools/tools.ts b/packages/core/src/tools/tools.ts index 4b13174c..00f2a842 100644 --- a/packages/core/src/tools/tools.ts +++ b/packages/core/src/tools/tools.ts @@ -145,9 +145,9 @@ export interface ToolBuilder< description: string; /** - * The icon to display when interacting via ACP. + * The kind of tool for categorization and permissions */ - icon: Icon; + kind: Kind; /** * Function declaration schema from @google/genai. @@ -185,7 +185,7 @@ export abstract class DeclarativeTool< readonly name: string, readonly displayName: string, readonly description: string, - readonly icon: Icon, + readonly kind: Kind, readonly parameterSchema: unknown, readonly isOutputMarkdown: boolean = true, readonly canUpdateOutput: boolean = false, @@ -284,19 +284,19 @@ export abstract class BaseTool< * @param parameterSchema JSON Schema defining the parameters */ constructor( - readonly name: string, - readonly displayName: string, - readonly description: string, - readonly icon: Icon, - readonly parameterSchema: unknown, - readonly isOutputMarkdown: boolean = true, - readonly canUpdateOutput: boolean = false, + override readonly name: string, + override readonly displayName: string, + override readonly description: string, + override readonly kind: Kind, + override readonly parameterSchema: unknown, + override readonly isOutputMarkdown: boolean = true, + override readonly canUpdateOutput: boolean = false, ) { super( name, displayName, description, - icon, + kind, parameterSchema, isOutputMarkdown, canUpdateOutput, @@ -320,7 +320,7 @@ export abstract class BaseTool< * @returns An error message string if invalid, null otherwise */ // eslint-disable-next-line @typescript-eslint/no-unused-vars - validateToolParams(params: TParams): string | null { + override validateToolParams(params: TParams): string | null { // Implementation would typically use a JSON Schema validator // This is a placeholder that should be implemented by derived classes return null; @@ -570,15 +570,16 @@ export enum ToolConfirmationOutcome { Cancel = 'cancel', } -export enum Icon { - FileSearch = 'fileSearch', - Folder = 'folder', - Globe = 'globe', - Hammer = 'hammer', - LightBulb = 'lightBulb', - Pencil = 'pencil', - Regex = 'regex', - Terminal = 'terminal', +export enum Kind { + Read = 'read', + Edit = 'edit', + Delete = 'delete', + Move = 'move', + Search = 'search', + Execute = 'execute', + Think = 'think', + Fetch = 'fetch', + Other = 'other', } export interface ToolLocation { diff --git a/packages/core/src/tools/web-fetch.test.ts b/packages/core/src/tools/web-fetch.test.ts index 1b82e726..8fe883a4 100644 --- a/packages/core/src/tools/web-fetch.test.ts +++ b/packages/core/src/tools/web-fetch.test.ts @@ -23,7 +23,10 @@ describe('WebFetchTool', () => { url: 'https://example.com', prompt: 'summarize this page', }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); expect(confirmationDetails).toEqual({ type: 'info', @@ -41,7 +44,10 @@ describe('WebFetchTool', () => { url: 'https://github.com/google/gemini-react/blob/main/README.md', prompt: 'summarize the README', }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); expect(confirmationDetails).toEqual({ type: 'info', @@ -62,7 +68,10 @@ describe('WebFetchTool', () => { url: 'https://example.com', prompt: 'summarize this page', }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); expect(confirmationDetails).toBe(false); }); @@ -77,7 +86,10 @@ describe('WebFetchTool', () => { url: 'https://example.com', prompt: 'summarize this page', }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); if ( confirmationDetails && diff --git a/packages/core/src/tools/web-search.ts b/packages/core/src/tools/web-search.ts index adeda1a7..b663c59f 100644 --- a/packages/core/src/tools/web-search.ts +++ b/packages/core/src/tools/web-search.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { BaseTool, Icon, ToolResult } from './tools.js'; +import { BaseTool, Kind, ToolResult } from './tools.js'; import { Type } from '@google/genai'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; @@ -55,7 +55,7 @@ export class WebSearchTool extends BaseTool< WebSearchTool.Name, 'TavilySearch', 'Performs a web search using the Tavily API and returns a concise answer with sources. Requires the TAVILY_API_KEY environment variable.', - Icon.Globe, + Kind.Search, { type: Type.OBJECT, properties: { @@ -89,7 +89,7 @@ export class WebSearchTool extends BaseTool< return null; } - getDescription(params: WebSearchToolParams): string { + override getDescription(params: WebSearchToolParams): string { return `Searching the web for: "${params.query}"`; } diff --git a/packages/core/src/tools/write-file.test.ts b/packages/core/src/tools/write-file.test.ts index 9911daa1..d6e5657e 100644 --- a/packages/core/src/tools/write-file.test.ts +++ b/packages/core/src/tools/write-file.test.ts @@ -58,7 +58,6 @@ const mockConfigInternal = { getGeminiClient: vi.fn(), // Initialize as a plain mock function getIdeClient: vi.fn(), getIdeMode: vi.fn(() => false), - getIdeModeFeature: vi.fn(() => false), getWorkspaceContext: () => createMockWorkspaceContext(rootDir), getApiKey: () => 'test-key', getModel: () => 'test-model', diff --git a/packages/core/src/tools/write-file.ts b/packages/core/src/tools/write-file.ts index 5cdba419..01c92865 100644 --- a/packages/core/src/tools/write-file.ts +++ b/packages/core/src/tools/write-file.ts @@ -15,7 +15,8 @@ import { ToolEditConfirmationDetails, ToolConfirmationOutcome, ToolCallConfirmationDetails, - Icon, + Kind, + ToolLocation, } from './tools.js'; import { ToolErrorType } from './tool-error.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; @@ -82,7 +83,7 @@ export class WriteFileTool `Writes content to a specified file in the local filesystem. The user has the ability to modify \`content\`. If modified, this will be stated in the response.`, - Icon.Pencil, + Kind.Edit, { properties: { file_path: { @@ -101,7 +102,11 @@ export class WriteFileTool ); } - validateToolParams(params: WriteFileToolParams): string | null { + override toolLocations(params: WriteFileToolParams): ToolLocation[] { + return [{ path: params.file_path }]; + } + + override validateToolParams(params: WriteFileToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, @@ -139,7 +144,7 @@ export class WriteFileTool return null; } - getDescription(params: WriteFileToolParams): string { + override getDescription(params: WriteFileToolParams): string { if (!params.file_path) { return `Model did not provide valid parameters for write file tool, missing or empty "file_path"`; } @@ -153,7 +158,7 @@ export class WriteFileTool /** * Handles the confirmation prompt for the WriteFile tool. */ - async shouldConfirmExecute( + override async shouldConfirmExecute( params: WriteFileToolParams, abortSignal: AbortSignal, ): Promise { @@ -195,7 +200,6 @@ export class WriteFileTool const ideClient = this.config.getIdeClient(); const ideConfirmation = - this.config.getIdeModeFeature() && this.config.getIdeMode() && ideClient.getConnectionStatus().status === IDEConnectionStatus.Connected ? ideClient.openDiff(params.file_path, correctedContent) diff --git a/packages/cli/src/ui/utils/errorParsing.test.ts b/packages/core/src/utils/errorParsing.test.ts similarity index 98% rename from packages/cli/src/ui/utils/errorParsing.test.ts rename to packages/core/src/utils/errorParsing.test.ts index 3d388add..72a84d82 100644 --- a/packages/cli/src/ui/utils/errorParsing.test.ts +++ b/packages/core/src/utils/errorParsing.test.ts @@ -6,16 +6,13 @@ import { describe, it, expect } from 'vitest'; import { parseAndFormatApiError } from './errorParsing.js'; -import { - AuthType, - UserTierId, - DEFAULT_GEMINI_FLASH_MODEL, - isProQuotaExceededError, -} from '@qwen-code/qwen-code-core'; +import { isProQuotaExceededError } from './quotaErrorDetection.js'; +import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js'; +import { UserTierId } from '../code_assist/types.js'; +import { AuthType } from '../core/contentGenerator.js'; +import { StructuredError } from '../core/turn.js'; describe('parseAndFormatApiError', () => { - const _enterpriseMessage = - 'upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits'; const vertexMessage = 'request a quota increase through Vertex'; const geminiMessage = 'request a quota increase through AI Studio'; diff --git a/packages/cli/src/ui/utils/errorParsing.ts b/packages/core/src/utils/errorParsing.ts similarity index 97% rename from packages/cli/src/ui/utils/errorParsing.ts rename to packages/core/src/utils/errorParsing.ts index 10616514..aa15a652 100644 --- a/packages/cli/src/ui/utils/errorParsing.ts +++ b/packages/core/src/utils/errorParsing.ts @@ -5,15 +5,17 @@ */ import { - AuthType, - UserTierId, - DEFAULT_GEMINI_FLASH_MODEL, - DEFAULT_GEMINI_MODEL, isProQuotaExceededError, isGenericQuotaExceededError, isApiError, isStructuredError, -} from '@qwen-code/qwen-code-core'; +} from './quotaErrorDetection.js'; +import { + DEFAULT_GEMINI_MODEL, + DEFAULT_GEMINI_FLASH_MODEL, +} from '../config/models.js'; +import { UserTierId } from '../code_assist/types.js'; +import { AuthType } from '../core/contentGenerator.js'; // Free Tier message functions const getRateLimitErrorMessageGoogleFree = ( diff --git a/packages/core/src/utils/filesearch/fileSearch.ts b/packages/core/src/utils/filesearch/fileSearch.ts index 480d5815..dff8d0ec 100644 --- a/packages/core/src/utils/filesearch/fileSearch.ts +++ b/packages/core/src/utils/filesearch/fileSearch.ts @@ -289,7 +289,7 @@ export class FileSearch { * Builds the in-memory cache for fast pattern matching. */ private buildResultCache(): void { - this.resultCache = new ResultCache(this.allFiles, this.absoluteDir); + this.resultCache = new ResultCache(this.allFiles); // The v1 algorithm is much faster since it only looks at the first // occurence of the pattern. We use it for search spaces that have >20k // files, because the v2 algorithm is just too slow in those cases. diff --git a/packages/core/src/utils/filesearch/result-cache.test.ts b/packages/core/src/utils/filesearch/result-cache.test.ts index 0b1b4e17..fcfa3f00 100644 --- a/packages/core/src/utils/filesearch/result-cache.test.ts +++ b/packages/core/src/utils/filesearch/result-cache.test.ts @@ -4,7 +4,6 @@ * SPDX-License-Identifier: Apache-2.0 */ -import path from 'node:path'; import { test, expect } from 'vitest'; import { ResultCache } from './result-cache.js'; @@ -17,7 +16,7 @@ test('ResultCache basic usage', async () => { 'subdir/other.js', 'subdir/nested/file.md', ]; - const cache = new ResultCache(files, path.resolve('.')); + const cache = new ResultCache(files); const { files: resultFiles, isExactMatch } = await cache.get('*.js'); expect(resultFiles).toEqual(files); expect(isExactMatch).toBe(false); @@ -25,7 +24,7 @@ test('ResultCache basic usage', async () => { test('ResultCache cache hit/miss', async () => { const files = ['foo.txt', 'bar.js', 'baz.md']; - const cache = new ResultCache(files, path.resolve('.')); + const cache = new ResultCache(files); // First call: miss const { files: result1Files, isExactMatch: isExactMatch1 } = await cache.get('*.js'); @@ -44,7 +43,7 @@ test('ResultCache cache hit/miss', async () => { test('ResultCache best base query', async () => { const files = ['foo.txt', 'foobar.js', 'baz.md']; - const cache = new ResultCache(files, path.resolve('.')); + const cache = new ResultCache(files); // Cache a broader query cache.set('foo', ['foo.txt', 'foobar.js']); diff --git a/packages/core/src/utils/filesearch/result-cache.ts b/packages/core/src/utils/filesearch/result-cache.ts index 77b99aec..cf0c2b4b 100644 --- a/packages/core/src/utils/filesearch/result-cache.ts +++ b/packages/core/src/utils/filesearch/result-cache.ts @@ -13,10 +13,7 @@ export class ResultCache { private hits = 0; private misses = 0; - constructor( - private readonly allFiles: string[], - private readonly absoluteDir: string, - ) { + constructor(private readonly allFiles: string[]) { this.cache = new Map(); } diff --git a/packages/core/src/utils/memoryImportProcessor.test.ts b/packages/core/src/utils/memoryImportProcessor.test.ts index 94fc1193..300d44fb 100644 --- a/packages/core/src/utils/memoryImportProcessor.test.ts +++ b/packages/core/src/utils/memoryImportProcessor.test.ts @@ -11,7 +11,7 @@ import { marked } from 'marked'; import { processImports, validateImportPath } from './memoryImportProcessor.js'; // Helper function to create platform-agnostic test paths -const testPath = (...segments: string[]) => { +function testPath(...segments: string[]): string { // Start with the first segment as is (might be an absolute path on Windows) let result = segments[0]; @@ -27,9 +27,8 @@ const testPath = (...segments: string[]) => { } return path.normalize(result); -}; +} -// Mock fs/promises vi.mock('fs/promises'); const mockedFs = vi.mocked(fs); @@ -509,21 +508,21 @@ describe('memoryImportProcessor', () => { expect(result.importTree.imports).toHaveLength(2); // First import: nested.md - // Prefix with underscore to indicate they're intentionally unused - const _expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); - const _expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); - const _expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); - // Check that the paths match using includes to handle potential absolute/relative differences - expect(result.importTree.imports![0].path).toContain('nested.md'); + const expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); + + expect(result.importTree.imports![0].path).toContain(expectedNestedPath); expect(result.importTree.imports![0].imports).toHaveLength(1); + + const expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); expect(result.importTree.imports![0].imports![0].path).toContain( - 'inner.md', + expectedInnerPath, ); expect(result.importTree.imports![0].imports![0].imports).toBeUndefined(); // Second import: simple.md - expect(result.importTree.imports![1].path).toContain('simple.md'); + const expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); + expect(result.importTree.imports![1].path).toContain(expectedSimplePath); expect(result.importTree.imports![1].imports).toBeUndefined(); }); @@ -724,21 +723,20 @@ describe('memoryImportProcessor', () => { expect(result.importTree.imports).toHaveLength(2); // First import: nested.md - // Prefix with underscore to indicate they're intentionally unused - const _expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); - const _expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); - const _expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); + const expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); + const expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); + const expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); // Check that the paths match using includes to handle potential absolute/relative differences - expect(result.importTree.imports![0].path).toContain('nested.md'); + expect(result.importTree.imports![0].path).toContain(expectedNestedPath); expect(result.importTree.imports![0].imports).toHaveLength(1); expect(result.importTree.imports![0].imports![0].path).toContain( - 'inner.md', + expectedInnerPath, ); expect(result.importTree.imports![0].imports![0].imports).toBeUndefined(); // Second import: simple.md - expect(result.importTree.imports![1].path).toContain('simple.md'); + expect(result.importTree.imports![1].path).toContain(expectedSimplePath); expect(result.importTree.imports![1].imports).toBeUndefined(); }); @@ -899,7 +897,7 @@ describe('memoryImportProcessor', () => { // Test relative paths - resolve them against basePath const relativePath = './file.md'; - const _resolvedRelativePath = path.resolve(basePath, relativePath); + path.resolve(basePath, relativePath); expect(validateImportPath(relativePath, basePath, [basePath])).toBe(true); // Test parent directory access (should be allowed if parent is in allowed paths) @@ -907,12 +905,12 @@ describe('memoryImportProcessor', () => { if (parentPath !== basePath) { // Only test if parent is different const parentRelativePath = '../file.md'; - const _resolvedParentPath = path.resolve(basePath, parentRelativePath); + path.resolve(basePath, parentRelativePath); expect( validateImportPath(parentRelativePath, basePath, [parentPath]), ).toBe(true); - const _resolvedSubPath = path.resolve(basePath, 'sub'); + path.resolve(basePath, 'sub'); const resultSub = validateImportPath('sub', basePath, [basePath]); expect(resultSub).toBe(true); } diff --git a/packages/core/src/utils/memoryImportProcessor.ts b/packages/core/src/utils/memoryImportProcessor.ts index 620517c0..adcf3a2f 100644 --- a/packages/core/src/utils/memoryImportProcessor.ts +++ b/packages/core/src/utils/memoryImportProcessor.ts @@ -261,7 +261,7 @@ export async function processImports( // Process imports in reverse order to handle indices correctly for (let i = imports.length - 1; i >= 0; i--) { - const { start, _end, path: importPath } = imports[i]; + const { start, path: importPath } = imports[i]; // Skip if inside a code region if ( diff --git a/packages/core/src/utils/quotaErrorDetection.ts b/packages/core/src/utils/quotaErrorDetection.ts index a5ccf12e..bcc594f8 100644 --- a/packages/core/src/utils/quotaErrorDetection.ts +++ b/packages/core/src/utils/quotaErrorDetection.ts @@ -4,6 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ +import { StructuredError } from '../core/turn.js'; + export interface ApiError { error: { code: number; @@ -13,11 +15,6 @@ export interface ApiError { }; } -interface StructuredError { - message: string; - status?: number; -} - export function isApiError(error: unknown): error is ApiError { return ( typeof error === 'object' && diff --git a/scripts/build_sandbox.js b/scripts/build_sandbox.js index 51c8566f..5cb4cd61 100644 --- a/scripts/build_sandbox.js +++ b/scripts/build_sandbox.js @@ -20,6 +20,7 @@ import { execSync } from 'child_process'; import { chmodSync, existsSync, readFileSync, rmSync, writeFileSync } from 'fs'; import { join } from 'path'; +import os from 'os'; import yargs from 'yargs'; import { hideBin } from 'yargs/helpers'; import cliPkgJson from '../packages/cli/package.json' with { type: 'json' }; @@ -121,12 +122,28 @@ chmodSync( const buildStdout = process.env.VERBOSE ? 'inherit' : 'ignore'; +// Determine the appropriate shell based on OS +const isWindows = os.platform() === 'win32'; +const shellToUse = isWindows ? 'powershell.exe' : '/bin/bash'; + function buildImage(imageName, dockerfile) { console.log(`building ${imageName} ... (can be slow first time)`); - const buildCommand = - sandboxCommand === 'podman' - ? `${sandboxCommand} build --authfile=<(echo '{}')` - : `${sandboxCommand} build`; + + let buildCommandArgs = ''; + let tempAuthFile = ''; + + if (sandboxCommand === 'podman') { + if (isWindows) { + // PowerShell doesn't support <() process substitution. + // Create a temporary auth file that we will clean up after. + tempAuthFile = join(os.tmpdir(), `gemini-auth-${Date.now()}.json`); + writeFileSync(tempAuthFile, '{}'); + buildCommandArgs = `--authfile="${tempAuthFile}"`; + } else { + // Use bash-specific syntax for Linux/macOS + buildCommandArgs = `--authfile=<(echo '{}')`; + } + } const npmPackageVersion = JSON.parse( readFileSync(join(process.cwd(), 'package.json'), 'utf-8'), @@ -136,27 +153,34 @@ function buildImage(imageName, dockerfile) { process.env.GEMINI_SANDBOX_IMAGE_TAG || imageName.split(':')[1]; const finalImageName = `${imageName.split(':')[0]}:${imageTag}`; - execSync( - `${buildCommand} ${ - process.env.BUILD_SANDBOX_FLAGS || '' - } --build-arg CLI_VERSION_ARG=${npmPackageVersion} -f "${dockerfile}" -t "${finalImageName}" .`, - { stdio: buildStdout, shell: '/bin/bash' }, - ); - console.log(`built ${finalImageName}`); - - // If an output file path was provided via command-line, write the final image URI to it. - if (argv.outputFile) { - console.log( - `Writing final image URI for CI artifact to: ${argv.outputFile}`, + try { + execSync( + `${sandboxCommand} build ${buildCommandArgs} ${ + process.env.BUILD_SANDBOX_FLAGS || '' + } --build-arg CLI_VERSION_ARG=${npmPackageVersion} -f "${dockerfile}" -t "${imageName}" .`, + { stdio: buildStdout, shell: shellToUse }, ); - // The publish step only supports one image. If we build multiple, only the last one - // will be published. Throw an error to make this failure explicit if the file already exists. - if (existsSync(argv.outputFile)) { - throw new Error( - `CI artifact file ${argv.outputFile} already exists. Refusing to overwrite.`, + console.log(`built ${finalImageName}`); + + // If an output file path was provided via command-line, write the final image URI to it. + if (argv.outputFile) { + console.log( + `Writing final image URI for CI artifact to: ${argv.outputFile}`, ); + // The publish step only supports one image. If we build multiple, only the last one + // will be published. Throw an error to make this failure explicit if the file already exists. + if (existsSync(argv.outputFile)) { + throw new Error( + `CI artifact file ${argv.outputFile} already exists. Refusing to overwrite.`, + ); + } + writeFileSync(argv.outputFile, finalImageName); + } + } finally { + // If we created a temp file, delete it now. + if (tempAuthFile) { + rmSync(tempAuthFile, { force: true }); } - writeFileSync(argv.outputFile, finalImageName); } } diff --git a/scripts/copy_bundle_assets.js b/scripts/copy_bundle_assets.js index 79d2a080..5a3af3e9 100644 --- a/scripts/copy_bundle_assets.js +++ b/scripts/copy_bundle_assets.js @@ -37,12 +37,4 @@ for (const file of sbFiles) { copyFileSync(join(root, file), join(bundleDir, basename(file))); } -// Find and copy all .vsix files from packages to the root of the bundle directory -const vsixFiles = glob.sync('packages/vscode-ide-companion/*.vsix', { - cwd: root, -}); -for (const file of vsixFiles) { - copyFileSync(join(root, file), join(bundleDir, basename(file))); -} - console.log('Assets copied to bundle/'); diff --git a/scripts/create_alias.sh b/scripts/create_alias.sh index ccaf3dd4..ecb01bb3 100755 --- a/scripts/create_alias.sh +++ b/scripts/create_alias.sh @@ -1,38 +1,39 @@ -#!/bin/bash +#!/usr/bin/env bash +set -euo pipefail # This script creates an alias for the Gemini CLI # Determine the project directory PROJECT_DIR=$(cd "$(dirname "$0")/.." && pwd) -ALIAS_COMMAND="alias gemini='node $PROJECT_DIR/scripts/start.js'" +ALIAS_COMMAND="alias gemini='node "${PROJECT_DIR}/scripts/start.js"'" # Detect shell and set config file path -if [[ "$SHELL" == *"/bash" ]]; then - CONFIG_FILE="$HOME/.bashrc" -elif [[ "$SHELL" == *"/zsh" ]]; then - CONFIG_FILE="$HOME/.zshrc" +if [[ "${SHELL}" == *"/bash" ]]; then + CONFIG_FILE="${HOME}/.bashrc" +elif [[ "${SHELL}" == *"/zsh" ]]; then + CONFIG_FILE="${HOME}/.zshrc" else echo "Unsupported shell. Only bash and zsh are supported." exit 1 fi -echo "This script will add the following alias to your shell configuration file ($CONFIG_FILE):" -echo " $ALIAS_COMMAND" +echo "This script will add the following alias to your shell configuration file (${CONFIG_FILE}):" +echo " ${ALIAS_COMMAND}" echo "" # Check if the alias already exists -if grep -q "alias gemini=" "$CONFIG_FILE"; then - echo "A 'gemini' alias already exists in $CONFIG_FILE. No changes were made." +if grep -q "alias gemini=" "${CONFIG_FILE}"; then + echo "A 'gemini' alias already exists in ${CONFIG_FILE}. No changes were made." exit 0 fi read -p "Do you want to proceed? (y/n) " -n 1 -r echo "" -if [[ $REPLY =~ ^[Yy]$ ]]; then - echo "$ALIAS_COMMAND" >> "$CONFIG_FILE" +if [[ "${REPLY}" =~ ^[Yy]$ ]]; then + echo "${ALIAS_COMMAND}" >> "${CONFIG_FILE}" echo "" - echo "Alias added to $CONFIG_FILE." - echo "Please run 'source $CONFIG_FILE' or open a new terminal to use the 'gemini' command." + echo "Alias added to ${CONFIG_FILE}." + echo "Please run 'source ${CONFIG_FILE}' or open a new terminal to use the 'gemini' command." else echo "Aborted. No changes were made." fi diff --git a/tsconfig.json b/tsconfig.json index e761d3e1..5f303ddc 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -3,7 +3,16 @@ "strict": true, "esModuleInterop": true, "skipLibCheck": true, + "noImplicitAny": true, + "noImplicitOverride": true, + "noImplicitReturns": true, + "noImplicitThis": true, "forceConsistentCasingInFileNames": true, + "noUnusedLocals": true, + "strictBindCallApply": true, + "strictFunctionTypes": true, + "strictNullChecks": true, + "strictPropertyInitialization": true, "resolveJsonModule": true, "sourceMap": true, "composite": true,