From c5c6966d08b042b3b287c1909afa60f950bca147 Mon Sep 17 00:00:00 2001 From: doonrevver86 Date: Tue, 12 Aug 2025 19:18:06 +0100 Subject: [PATCH 01/45] ensure sandbox build script is cross-platform (#2603) Co-authored-by: Allen Hutchison --- scripts/build_sandbox.js | 68 +++++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/scripts/build_sandbox.js b/scripts/build_sandbox.js index 7eece434..ef6b0686 100644 --- a/scripts/build_sandbox.js +++ b/scripts/build_sandbox.js @@ -20,6 +20,7 @@ import { execSync } from 'child_process'; import { chmodSync, existsSync, readFileSync, rmSync, writeFileSync } from 'fs'; import { join } from 'path'; +import os from 'os'; import yargs from 'yargs'; import { hideBin } from 'yargs/helpers'; import cliPkgJson from '../packages/cli/package.json' with { type: 'json' }; @@ -117,12 +118,28 @@ chmodSync( const buildStdout = process.env.VERBOSE ? 'inherit' : 'ignore'; +// Determine the appropriate shell based on OS +const isWindows = os.platform() === 'win32'; +const shellToUse = isWindows ? 'powershell.exe' : '/bin/bash'; + function buildImage(imageName, dockerfile) { console.log(`building ${imageName} ... (can be slow first time)`); - const buildCommand = - sandboxCommand === 'podman' - ? `${sandboxCommand} build --authfile=<(echo '{}')` - : `${sandboxCommand} build`; + + let buildCommandArgs = ''; + let tempAuthFile = ''; + + if (sandboxCommand === 'podman') { + if (isWindows) { + // PowerShell doesn't support <() process substitution. + // Create a temporary auth file that we will clean up after. + tempAuthFile = join(os.tmpdir(), `gemini-auth-${Date.now()}.json`); + writeFileSync(tempAuthFile, '{}'); + buildCommandArgs = `--authfile="${tempAuthFile}"`; + } else { + // Use bash-specific syntax for Linux/macOS + buildCommandArgs = `--authfile=<(echo '{}')`; + } + } const npmPackageVersion = JSON.parse( readFileSync(join(process.cwd(), 'package.json'), 'utf-8'), @@ -132,27 +149,34 @@ function buildImage(imageName, dockerfile) { process.env.GEMINI_SANDBOX_IMAGE_TAG || imageName.split(':')[1]; const finalImageName = `${imageName.split(':')[0]}:${imageTag}`; - execSync( - `${buildCommand} ${ - process.env.BUILD_SANDBOX_FLAGS || '' - } --build-arg CLI_VERSION_ARG=${npmPackageVersion} -f "${dockerfile}" -t "${finalImageName}" .`, - { stdio: buildStdout, shell: '/bin/bash' }, - ); - console.log(`built ${finalImageName}`); - - // If an output file path was provided via command-line, write the final image URI to it. - if (argv.outputFile) { - console.log( - `Writing final image URI for CI artifact to: ${argv.outputFile}`, + try { + execSync( + `${sandboxCommand} build ${buildCommandArgs} ${ + process.env.BUILD_SANDBOX_FLAGS || '' + } --build-arg CLI_VERSION_ARG=${npmPackageVersion} -f "${dockerfile}" -t "${imageName}" .`, + { stdio: buildStdout, shell: shellToUse }, ); - // The publish step only supports one image. If we build multiple, only the last one - // will be published. Throw an error to make this failure explicit if the file already exists. - if (existsSync(argv.outputFile)) { - throw new Error( - `CI artifact file ${argv.outputFile} already exists. Refusing to overwrite.`, + console.log(`built ${finalImageName}`); + + // If an output file path was provided via command-line, write the final image URI to it. + if (argv.outputFile) { + console.log( + `Writing final image URI for CI artifact to: ${argv.outputFile}`, ); + // The publish step only supports one image. If we build multiple, only the last one + // will be published. Throw an error to make this failure explicit if the file already exists. + if (existsSync(argv.outputFile)) { + throw new Error( + `CI artifact file ${argv.outputFile} already exists. Refusing to overwrite.`, + ); + } + writeFileSync(argv.outputFile, finalImageName); + } + } finally { + // If we created a temp file, delete it now. + if (tempAuthFile) { + rmSync(tempAuthFile, { force: true }); } - writeFileSync(argv.outputFile, finalImageName); } } From 7cc6b8c270a67803f9387eeead0d3d7ac914303a Mon Sep 17 00:00:00 2001 From: Richie Foreman Date: Tue, 12 Aug 2025 14:31:59 -0400 Subject: [PATCH 02/45] chore(usage telemetry): Freshen up Clearcut logging (#6013) Co-authored-by: christine betts Co-authored-by: Jacob Richman Co-authored-by: matt korwel --- eslint.config.js | 7 +- package-lock.json | 557 ++++++++++++++++++ package.json | 8 +- .../cli/src/config/config.integration.test.ts | 21 + packages/core/src/config/config.test.ts | 42 +- packages/core/src/mocks/msw.ts | 9 + .../clearcut-logger/clearcut-logger.test.ts | 253 ++++---- .../clearcut-logger/clearcut-logger.ts | 306 +++++----- packages/core/src/test-utils/config.ts | 36 ++ 9 files changed, 969 insertions(+), 270 deletions(-) create mode 100644 packages/core/src/mocks/msw.ts create mode 100644 packages/core/src/test-utils/config.ts diff --git a/eslint.config.js b/eslint.config.js index f35d4f35..fc751418 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -118,7 +118,12 @@ export default tseslint.config( 'import/no-internal-modules': [ 'error', { - allow: ['react-dom/test-utils', 'memfs/lib/volume.js', 'yargs/**'], + allow: [ + 'react-dom/test-utils', + 'memfs/lib/volume.js', + 'yargs/**', + 'msw/node', + ], }, ], 'import/no-relative-packages': 'error', diff --git a/package-lock.json b/package-lock.json index bcce33b5..acdd67f5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,6 +10,9 @@ "workspaces": [ "packages/*" ], + "dependencies": { + "node-fetch": "^3.3.2" + }, "bin": { "gemini": "bundle/gemini.js" }, @@ -37,6 +40,7 @@ "memfs": "^4.17.2", "mnemonist": "^0.40.3", "mock-fs": "^5.5.0", + "msw": "^2.10.4", "prettier": "^3.5.3", "react-devtools-core": "^4.28.5", "tsx": "^4.20.3", @@ -202,6 +206,53 @@ "node": ">=18" } }, + "node_modules/@bundled-es-modules/cookie": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/cookie/-/cookie-2.0.1.tgz", + "integrity": "sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cookie": "^0.7.2" + } + }, + "node_modules/@bundled-es-modules/statuses": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/statuses/-/statuses-1.0.1.tgz", + "integrity": "sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==", + "dev": true, + "license": "ISC", + "dependencies": { + "statuses": "^2.0.1" + } + }, + "node_modules/@bundled-es-modules/tough-cookie": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/tough-cookie/-/tough-cookie-0.1.6.tgz", + "integrity": "sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@types/tough-cookie": "^4.0.5", + "tough-cookie": "^4.1.4" + } + }, + "node_modules/@bundled-es-modules/tough-cookie/node_modules/tough-cookie": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", + "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/@csstools/color-helpers": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.0.2.tgz", @@ -1041,6 +1092,173 @@ "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==", "license": "ISC" }, + "node_modules/@inquirer/confirm": { + "version": "5.1.14", + "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.14.tgz", + "integrity": "sha512-5yR4IBfe0kXe59r1YCTG8WXkUbl7Z35HK87Sw+WUyGD8wNUx7JvY7laahzeytyE1oLn74bQnL7hstctQxisQ8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@inquirer/core": "^10.1.15", + "@inquirer/type": "^3.0.8" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/core": { + "version": "10.1.15", + "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.15.tgz", + "integrity": "sha512-8xrp836RZvKkpNbVvgWUlxjT4CraKk2q+I3Ksy+seI2zkcE+y6wNs1BVhgcv8VyImFecUhdQrYLdW32pAjwBdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@inquirer/figures": "^1.0.13", + "@inquirer/type": "^3.0.8", + "ansi-escapes": "^4.3.2", + "cli-width": "^4.1.0", + "mute-stream": "^2.0.0", + "signal-exit": "^4.1.0", + "wrap-ansi": "^6.2.0", + "yoctocolors-cjs": "^2.1.2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/core/node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@inquirer/core/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@inquirer/core/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@inquirer/core/node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/figures": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.13.tgz", + "integrity": "sha512-lGPVU3yO9ZNqA7vTYz26jny41lE7yoQansmqdMLBEfqaGsmdg7V3W9mK9Pvb5IL4EVZ9GnSDGMO/cJXud5dMaw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/type": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.8.tgz", + "integrity": "sha512-lg9Whz8onIHRthWaN1Q9EGLa/0LFJjyM8mEUbL1eTi6yMGvBf8gvyDLtxSXztQsxMvhxxNpJYrwa1YHdq+w4Jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -1239,6 +1457,24 @@ "node": ">=18" } }, + "node_modules/@mswjs/interceptors": { + "version": "0.39.5", + "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.39.5.tgz", + "integrity": "sha512-B9nHSJYtsv79uo7QdkZ/b/WoKm20IkVSmTc/WCKarmDtFwM0dRx2ouEniqwNkzCSLn3fydzKmnMzjtfdOWt3VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@open-draft/deferred-promise": "^2.2.0", + "@open-draft/logger": "^0.3.0", + "@open-draft/until": "^2.0.0", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "strict-event-emitter": "^0.5.1" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -1277,6 +1513,31 @@ "node": ">= 8" } }, + "node_modules/@open-draft/deferred-promise": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz", + "integrity": "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@open-draft/logger": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@open-draft/logger/-/logger-0.3.0.tgz", + "integrity": "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-node-process": "^1.2.0", + "outvariant": "^1.4.0" + } + }, + "node_modules/@open-draft/until": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-2.1.0.tgz", + "integrity": "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==", + "dev": true, + "license": "MIT" + }, "node_modules/@opentelemetry/api": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", @@ -2224,6 +2485,13 @@ "@types/node": "*" } }, + "node_modules/@types/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/cors": { "version": "2.8.19", "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.19.tgz", @@ -2491,12 +2759,26 @@ "integrity": "sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==", "license": "MIT" }, + "node_modules/@types/statuses": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/statuses/-/statuses-2.0.6.tgz", + "integrity": "sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/tinycolor2": { "version": "1.4.6", "resolved": "https://registry.npmjs.org/@types/tinycolor2/-/tinycolor2-1.4.6.tgz", "integrity": "sha512-iEN8J0BoMnsWBqjVbWH/c0G0Hh7O21lpR2/+PrvAVgWdzL7eexIFm4JN/Wn10PTcmNdtS6U67r499mlWMXOxNw==", "license": "MIT" }, + "node_modules/@types/tough-cookie": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.5.tgz", + "integrity": "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/unist": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", @@ -3779,6 +4061,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/cli-width": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", + "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 12" + } + }, "node_modules/cliui": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", @@ -4069,6 +4361,15 @@ "devOptional": true, "license": "MIT" }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", + "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, "node_modules/data-urls": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", @@ -5323,6 +5624,29 @@ "reusify": "^1.0.4" } }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", + "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, "node_modules/figures": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", @@ -5462,6 +5786,18 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "license": "MIT", + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -5930,6 +6266,16 @@ "dev": true, "license": "MIT" }, + "node_modules/graphql": { + "version": "16.11.0", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.11.0.tgz", + "integrity": "sha512-mS1lbMsxgQj6hge1XZ6p7GPhbrtFwUFYi3wRzXAC/FmYnyXMTvvI3td3rjmQ2u8ewXueaSvRPWaEcgVVOT9Jnw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, "node_modules/gtoken": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz", @@ -6092,6 +6438,13 @@ "node": ">= 0.4" } }, + "node_modules/headers-polyfill": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.3.tgz", + "integrity": "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==", + "dev": true, + "license": "MIT" + }, "node_modules/highlight.js": { "version": "11.11.1", "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", @@ -6951,6 +7304,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-node-process": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz", + "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==", + "dev": true, + "license": "MIT" + }, "node_modules/is-npm": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", @@ -7906,6 +8266,81 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "license": "MIT" }, + "node_modules/msw": { + "version": "2.10.4", + "resolved": "https://registry.npmjs.org/msw/-/msw-2.10.4.tgz", + "integrity": "sha512-6R1or/qyele7q3RyPwNuvc0IxO8L8/Aim6Sz5ncXEgcWUNxSKE+udriTOWHtpMwmfkLYlacA2y7TIx4cL5lgHA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@bundled-es-modules/cookie": "^2.0.1", + "@bundled-es-modules/statuses": "^1.0.1", + "@bundled-es-modules/tough-cookie": "^0.1.6", + "@inquirer/confirm": "^5.0.0", + "@mswjs/interceptors": "^0.39.1", + "@open-draft/deferred-promise": "^2.2.0", + "@open-draft/until": "^2.1.0", + "@types/cookie": "^0.6.0", + "@types/statuses": "^2.0.4", + "graphql": "^16.8.1", + "headers-polyfill": "^4.0.2", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "path-to-regexp": "^6.3.0", + "picocolors": "^1.1.1", + "strict-event-emitter": "^0.5.1", + "type-fest": "^4.26.1", + "yargs": "^17.7.2" + }, + "bin": { + "msw": "cli/index.js" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/mswjs" + }, + "peerDependencies": { + "typescript": ">= 4.8.x" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/msw/node_modules/path-to-regexp": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/msw/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mute-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz", + "integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, "node_modules/nanoid": { "version": "3.3.11", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", @@ -7948,6 +8383,44 @@ "dev": true, "license": "MIT" }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "deprecated": "Use your platform's native DOMException instead", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", + "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "license": "MIT", + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, "node_modules/normalize-package-data": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-6.0.2.tgz", @@ -8392,6 +8865,13 @@ "node": ">= 0.8.0" } }, + "node_modules/outvariant": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.3.tgz", + "integrity": "sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA==", + "dev": true, + "license": "MIT" + }, "node_modules/own-keys": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", @@ -8870,6 +9350,19 @@ "node": ">= 0.10" } }, + "node_modules/psl": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", + "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "funding": { + "url": "https://github.com/sponsors/lupomontero" + } + }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -8909,6 +9402,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true, + "license": "MIT" + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -9246,6 +9746,13 @@ "node": ">=0.10.5" } }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, "node_modules/resolve": { "version": "1.22.10", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", @@ -9933,6 +10440,13 @@ "node": ">= 0.4" } }, + "node_modules/strict-event-emitter": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.5.1.tgz", + "integrity": "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==", + "dev": true, + "license": "MIT" + }, "node_modules/string-width": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", @@ -10806,6 +11320,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", @@ -10985,6 +11509,17 @@ "punycode": "^2.1.0" } }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "node_modules/uuid": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", @@ -11242,6 +11777,15 @@ "node": ">=18" } }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, "node_modules/webidl-conversions": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", @@ -11703,6 +12247,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/yoctocolors-cjs": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz", + "integrity": "sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/yoga-layout": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/yoga-layout/-/yoga-layout-3.2.1.tgz", diff --git a/package.json b/package.json index 637fc445..0e27676f 100644 --- a/package.json +++ b/package.json @@ -79,13 +79,17 @@ "json": "^11.0.0", "lodash": "^4.17.21", "memfs": "^4.17.2", + "mnemonist": "^0.40.3", "mock-fs": "^5.5.0", + "msw": "^2.10.4", "prettier": "^3.5.3", "react-devtools-core": "^4.28.5", "tsx": "^4.20.3", "typescript-eslint": "^8.30.1", "vitest": "^3.2.4", - "yargs": "^17.7.2", - "mnemonist": "^0.40.3" + "yargs": "^17.7.2" + }, + "dependencies": { + "node-fetch": "^3.3.2" } } diff --git a/packages/cli/src/config/config.integration.test.ts b/packages/cli/src/config/config.integration.test.ts index 5d83986e..87a74578 100644 --- a/packages/cli/src/config/config.integration.test.ts +++ b/packages/cli/src/config/config.integration.test.ts @@ -13,6 +13,25 @@ import { ConfigParameters, ContentGeneratorConfig, } from '@google/gemini-cli-core'; +import { http, HttpResponse } from 'msw'; +import { setupServer } from 'msw/node'; + +export const server = setupServer(); + +// TODO(richieforeman): Consider moving this to test setup globally. +beforeAll(() => { + server.listen({}); +}); + +afterEach(() => { + server.resetHandlers(); +}); + +afterAll(() => { + server.close(); +}); + +const CLEARCUT_URL = 'https://play.googleapis.com/log'; const TEST_CONTENT_GENERATOR_CONFIG: ContentGeneratorConfig = { apiKey: 'test-key', @@ -37,6 +56,8 @@ describe('Configuration Integration Tests', () => { let originalEnv: NodeJS.ProcessEnv; beforeEach(() => { + server.resetHandlers(http.post(CLEARCUT_URL, () => HttpResponse.text())); + tempDir = fs.mkdtempSync(path.join(tmpdir(), 'gemini-cli-test-')); originalEnv = { ...process.env }; process.env.GEMINI_API_KEY = 'test-api-key'; diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index 8e6ca38f..6c57d058 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -4,7 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, vi, beforeEach, Mock } from 'vitest'; +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { Mock } from 'vitest'; import { Config, ConfigParameters, SandboxConfig } from './config.js'; import * as path from 'path'; import { setGeminiMdFilename as mockSetGeminiMdFilename } from '../tools/memoryTool.js'; @@ -18,6 +19,7 @@ import { } from '../core/contentGenerator.js'; import { GeminiClient } from '../core/client.js'; import { GitService } from '../services/gitService.js'; +import { ClearcutLogger } from '../telemetry/clearcut-logger/clearcut-logger.js'; vi.mock('fs', async (importOriginal) => { const actual = await importOriginal(); @@ -119,11 +121,16 @@ describe('Server Config (config.ts)', () => { telemetry: TELEMETRY_SETTINGS, sessionId: SESSION_ID, model: MODEL, + usageStatisticsEnabled: false, }; beforeEach(() => { // Reset mocks if necessary vi.clearAllMocks(); + vi.spyOn( + ClearcutLogger.prototype, + 'logStartSessionEvent', + ).mockImplementation(() => undefined); }); describe('initialize', () => { @@ -372,6 +379,39 @@ describe('Server Config (config.ts)', () => { expect(fileService).toBeDefined(); }); + describe('Usage Statistics', () => { + it('defaults usage statistics to enabled if not specified', () => { + const config = new Config({ + ...baseParams, + usageStatisticsEnabled: undefined, + }); + + expect(config.getUsageStatisticsEnabled()).toBe(true); + }); + + it.each([{ enabled: true }, { enabled: false }])( + 'sets usage statistics based on the provided value (enabled: $enabled)', + ({ enabled }) => { + const config = new Config({ + ...baseParams, + usageStatisticsEnabled: enabled, + }); + expect(config.getUsageStatisticsEnabled()).toBe(enabled); + }, + ); + + it('logs the session start event', () => { + new Config({ + ...baseParams, + usageStatisticsEnabled: true, + }); + + expect( + ClearcutLogger.prototype.logStartSessionEvent, + ).toHaveBeenCalledOnce(); + }); + }); + describe('Telemetry Settings', () => { it('should return default telemetry target if not provided', () => { const params: ConfigParameters = { diff --git a/packages/core/src/mocks/msw.ts b/packages/core/src/mocks/msw.ts new file mode 100644 index 00000000..4bf93138 --- /dev/null +++ b/packages/core/src/mocks/msw.ts @@ -0,0 +1,9 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { setupServer } from 'msw/node'; + +export const server = setupServer(); diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts index f955eb5a..96129ad3 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts @@ -4,33 +4,49 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; -import * as https from 'https'; -import { ClientRequest, IncomingMessage } from 'http'; -import { Readable, Writable } from 'stream'; - import { - ClearcutLogger, - LogResponse, - LogEventEntry, -} from './clearcut-logger.js'; -import { Config } from '../../config/config.js'; + vi, + describe, + it, + expect, + afterEach, + beforeAll, + afterAll, +} from 'vitest'; + +import { ClearcutLogger, LogEventEntry, TEST_ONLY } from './clearcut-logger.js'; +import { ConfigParameters } from '../../config/config.js'; import * as userAccount from '../../utils/user_account.js'; import * as userId from '../../utils/user_id.js'; +import { EventMetadataKey } from './event-metadata-key.js'; +import { makeFakeConfig } from '../../test-utils/config.js'; +import { http, HttpResponse } from 'msw'; +import { server } from '../../mocks/msw.js'; -// Mock dependencies -vi.mock('https-proxy-agent'); -vi.mock('https'); vi.mock('../../utils/user_account'); vi.mock('../../utils/user_id'); -const mockHttps = vi.mocked(https); const mockUserAccount = vi.mocked(userAccount); const mockUserId = vi.mocked(userId); +// TODO(richieforeman): Consider moving this to test setup globally. +beforeAll(() => { + server.listen({}); +}); + +afterEach(() => { + server.resetHandlers(); +}); + +afterAll(() => { + server.close(); +}); + describe('ClearcutLogger', () => { - let mockConfig: Config; - let logger: ClearcutLogger | undefined; + const NEXT_WAIT_MS = 1234; + const CLEARCUT_URL = 'https://play.googleapis.com/log'; + const MOCK_DATE = new Date('2025-01-02T00:00:00.000Z'); + const EXAMPLE_RESPONSE = `["${NEXT_WAIT_MS}",null,[[["ANDROID_BACKUP",0],["BATTERY_STATS",0],["SMART_SETUP",0],["TRON",0]],-3334737594024971225],[]]`; // A helper to get the internal events array for testing const getEvents = (l: ClearcutLogger): LogEventEntry[][] => @@ -38,32 +54,37 @@ describe('ClearcutLogger', () => { const getEventsSize = (l: ClearcutLogger): number => l['events'].size; - const getMaxEvents = (l: ClearcutLogger): number => l['max_events']; - - const getMaxRetryEvents = (l: ClearcutLogger): number => - l['max_retry_events']; - const requeueFailedEvents = (l: ClearcutLogger, events: LogEventEntry[][]) => l['requeueFailedEvents'](events); - beforeEach(() => { + function setup({ + config = {} as Partial, + lifetimeGoogleAccounts = 1, + cachedGoogleAccount = 'test@google.com', + installationId = 'test-installation-id', + } = {}) { + server.resetHandlers( + http.post(CLEARCUT_URL, () => HttpResponse.text(EXAMPLE_RESPONSE)), + ); + vi.useFakeTimers(); - vi.setSystemTime(new Date()); + vi.setSystemTime(MOCK_DATE); - mockConfig = { - getUsageStatisticsEnabled: vi.fn().mockReturnValue(true), - getDebugMode: vi.fn().mockReturnValue(false), - getSessionId: vi.fn().mockReturnValue('test-session-id'), - getProxy: vi.fn().mockReturnValue(undefined), - } as unknown as Config; + const loggerConfig = makeFakeConfig({ + ...config, + }); + ClearcutLogger.clearInstance(); - mockUserAccount.getCachedGoogleAccount.mockReturnValue('test@google.com'); - mockUserAccount.getLifetimeGoogleAccounts.mockReturnValue(1); - mockUserId.getInstallationId.mockReturnValue('test-installation-id'); + mockUserAccount.getCachedGoogleAccount.mockReturnValue(cachedGoogleAccount); + mockUserAccount.getLifetimeGoogleAccounts.mockReturnValue( + lifetimeGoogleAccounts, + ); + mockUserId.getInstallationId.mockReturnValue(installationId); - logger = ClearcutLogger.getInstance(mockConfig); - expect(logger).toBeDefined(); - }); + const logger = ClearcutLogger.getInstance(loggerConfig); + + return { logger, loggerConfig }; + } afterEach(() => { ClearcutLogger.clearInstance(); @@ -71,109 +92,131 @@ describe('ClearcutLogger', () => { vi.restoreAllMocks(); }); - it('should not return an instance if usage statistics are disabled', () => { - ClearcutLogger.clearInstance(); - vi.spyOn(mockConfig, 'getUsageStatisticsEnabled').mockReturnValue(false); - const disabledLogger = ClearcutLogger.getInstance(mockConfig); - expect(disabledLogger).toBeUndefined(); + describe('getInstance', () => { + it.each([ + { usageStatisticsEnabled: false, expectedValue: undefined }, + { + usageStatisticsEnabled: true, + expectedValue: expect.any(ClearcutLogger), + }, + ])( + 'returns an instance if usage statistics are enabled', + ({ usageStatisticsEnabled, expectedValue }) => { + ClearcutLogger.clearInstance(); + const { logger } = setup({ + config: { + usageStatisticsEnabled, + }, + }); + expect(logger).toEqual(expectedValue); + }, + ); + + it('is a singleton', () => { + ClearcutLogger.clearInstance(); + const { loggerConfig } = setup(); + const logger1 = ClearcutLogger.getInstance(loggerConfig); + const logger2 = ClearcutLogger.getInstance(loggerConfig); + expect(logger1).toBe(logger2); + }); + }); + + describe('createLogEvent', () => { + it('logs the total number of google accounts', () => { + const { logger } = setup({ + lifetimeGoogleAccounts: 9001, + }); + + const event = logger?.createLogEvent('abc', []); + + expect(event?.event_metadata[0][0]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT, + value: '9001', + }); + }); + + it('logs the current surface', () => { + const { logger } = setup({}); + + const event = logger?.createLogEvent('abc', []); + + expect(event?.event_metadata[0][1]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: 'SURFACE_NOT_SET', + }); + }); }); describe('enqueueLogEvent', () => { it('should add events to the queue', () => { + const { logger } = setup(); logger!.enqueueLogEvent({ test: 'event1' }); expect(getEventsSize(logger!)).toBe(1); }); it('should evict the oldest event when the queue is full', () => { - const maxEvents = getMaxEvents(logger!); + const { logger } = setup(); - for (let i = 0; i < maxEvents; i++) { + for (let i = 0; i < TEST_ONLY.MAX_EVENTS; i++) { logger!.enqueueLogEvent({ event_id: i }); } - expect(getEventsSize(logger!)).toBe(maxEvents); + expect(getEventsSize(logger!)).toBe(TEST_ONLY.MAX_EVENTS); const firstEvent = JSON.parse( getEvents(logger!)[0][0].source_extension_json, ); expect(firstEvent.event_id).toBe(0); // This should push out the first event - logger!.enqueueLogEvent({ event_id: maxEvents }); + logger!.enqueueLogEvent({ event_id: TEST_ONLY.MAX_EVENTS }); - expect(getEventsSize(logger!)).toBe(maxEvents); + expect(getEventsSize(logger!)).toBe(TEST_ONLY.MAX_EVENTS); const newFirstEvent = JSON.parse( getEvents(logger!)[0][0].source_extension_json, ); expect(newFirstEvent.event_id).toBe(1); const lastEvent = JSON.parse( - getEvents(logger!)[maxEvents - 1][0].source_extension_json, + getEvents(logger!)[TEST_ONLY.MAX_EVENTS - 1][0].source_extension_json, ); - expect(lastEvent.event_id).toBe(maxEvents); + expect(lastEvent.event_id).toBe(TEST_ONLY.MAX_EVENTS); }); }); describe('flushToClearcut', () => { - let mockRequest: Writable; - let mockResponse: Readable & Partial; - - beforeEach(() => { - mockRequest = new Writable({ - write(chunk, encoding, callback) { - callback(); + it('allows for usage with a configured proxy agent', async () => { + const { logger } = setup({ + config: { + proxy: 'http://mycoolproxy.whatever.com:3128', }, }); - vi.spyOn(mockRequest, 'on'); - vi.spyOn(mockRequest, 'end').mockReturnThis(); - vi.spyOn(mockRequest, 'destroy').mockReturnThis(); - mockResponse = new Readable({ read() {} }) as Readable & - Partial; + logger!.enqueueLogEvent({ event_id: 1 }); - mockHttps.request.mockImplementation( - ( - _options: string | https.RequestOptions | URL, - ...args: unknown[] - ): ClientRequest => { - const callback = args.find((arg) => typeof arg === 'function') as - | ((res: IncomingMessage) => void) - | undefined; + const response = await logger!.flushToClearcut(); - if (callback) { - callback(mockResponse as IncomingMessage); - } - return mockRequest as ClientRequest; - }, - ); + expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS); }); it('should clear events on successful flush', async () => { - mockResponse.statusCode = 200; - const mockResponseBody = { nextRequestWaitMs: 1000 }; - // Encoded protobuf for {nextRequestWaitMs: 1000} which is `08 E8 07` - const encodedResponse = Buffer.from([8, 232, 7]); + const { logger } = setup(); logger!.enqueueLogEvent({ event_id: 1 }); - const flushPromise = logger!.flushToClearcut(); + const response = await logger!.flushToClearcut(); - mockResponse.push(encodedResponse); - mockResponse.push(null); // End the stream - - const response: LogResponse = await flushPromise; - - expect(getEventsSize(logger!)).toBe(0); - expect(response.nextRequestWaitMs).toBe( - mockResponseBody.nextRequestWaitMs, - ); + expect(getEvents(logger!)).toEqual([]); + expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS); }); it('should handle a network error and requeue events', async () => { + const { logger } = setup(); + + server.resetHandlers(http.post(CLEARCUT_URL, () => HttpResponse.error())); logger!.enqueueLogEvent({ event_id: 1 }); logger!.enqueueLogEvent({ event_id: 2 }); expect(getEventsSize(logger!)).toBe(2); - const flushPromise = logger!.flushToClearcut(); - mockRequest.emit('error', new Error('Network error')); - await flushPromise; + const x = logger!.flushToClearcut(); + await x; expect(getEventsSize(logger!)).toBe(2); const events = getEvents(logger!); @@ -181,18 +224,28 @@ describe('ClearcutLogger', () => { }); it('should handle an HTTP error and requeue events', async () => { - mockResponse.statusCode = 500; - mockResponse.statusMessage = 'Internal Server Error'; + const { logger } = setup(); + + server.resetHandlers( + http.post( + CLEARCUT_URL, + () => + new HttpResponse( + { 'the system is down': true }, + { + status: 500, + }, + ), + ), + ); logger!.enqueueLogEvent({ event_id: 1 }); logger!.enqueueLogEvent({ event_id: 2 }); - expect(getEventsSize(logger!)).toBe(2); - const flushPromise = logger!.flushToClearcut(); - mockResponse.emit('end'); // End the response to trigger promise resolution - await flushPromise; + expect(getEvents(logger!).length).toBe(2); + await logger!.flushToClearcut(); - expect(getEventsSize(logger!)).toBe(2); + expect(getEvents(logger!).length).toBe(2); const events = getEvents(logger!); expect(JSON.parse(events[0][0].source_extension_json).event_id).toBe(1); }); @@ -200,7 +253,8 @@ describe('ClearcutLogger', () => { describe('requeueFailedEvents logic', () => { it('should limit the number of requeued events to max_retry_events', () => { - const maxRetryEvents = getMaxRetryEvents(logger!); + const { logger } = setup(); + const maxRetryEvents = TEST_ONLY.MAX_RETRY_EVENTS; const eventsToLogCount = maxRetryEvents + 5; const eventsToSend: LogEventEntry[][] = []; for (let i = 0; i < eventsToLogCount; i++) { @@ -225,7 +279,8 @@ describe('ClearcutLogger', () => { }); it('should not requeue more events than available space in the queue', () => { - const maxEvents = getMaxEvents(logger!); + const { logger } = setup(); + const maxEvents = TEST_ONLY.MAX_EVENTS; const spaceToLeave = 5; const initialEventCount = maxEvents - spaceToLeave; for (let i = 0; i < initialEventCount; i++) { diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index 1e67d1cf..a41f832d 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -4,10 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Buffer } from 'buffer'; -import * as https from 'https'; import { HttpsProxyAgent } from 'https-proxy-agent'; - import { StartSessionEvent, EndSessionEvent, @@ -56,19 +53,25 @@ export interface LogEventEntry { source_extension_json: string; } -export type EventValue = { +export interface EventValue { gemini_cli_key: EventMetadataKey | string; value: string; -}; +} -export type LogEvent = { - console_type: string; +export interface LogEvent { + console_type: 'GEMINI_CLI'; application: number; event_name: string; event_metadata: EventValue[][]; client_email?: string; client_install_id?: string; -}; +} + +export interface LogRequest { + log_source_name: 'CONCORD'; + request_time_ms: number; + log_event: LogEventEntry[][]; +} /** * Determine the surface that the user is currently using. Surface is effectively the @@ -89,22 +92,59 @@ function determineSurface(): string { } } +/** + * Clearcut URL to send logging events to. + */ +const CLEARCUT_URL = 'https://play.googleapis.com/log?format=json&hasfast=true'; + +/** + * Interval in which buffered events are sent to clearcut. + */ +const FLUSH_INTERVAL_MS = 1000 * 60; + +/** + * Maximum amount of events to keep in memory. Events added after this amount + * are dropped until the next flush to clearcut, which happens periodically as + * defined by {@link FLUSH_INTERVAL_MS}. + */ +const MAX_EVENTS = 1000; + +/** + * Maximum events to retry after a failed clearcut flush + */ +const MAX_RETRY_EVENTS = 100; + // Singleton class for batch posting log events to Clearcut. When a new event comes in, the elapsed time // is checked and events are flushed to Clearcut if at least a minute has passed since the last flush. export class ClearcutLogger { private static instance: ClearcutLogger; private config?: Config; + + /** + * Queue of pending events that need to be flushed to the server. New events + * are added to this queue and then flushed on demand (via `flushToClearcut`) + */ private readonly events: FixedDeque; - private last_flush_time: number = Date.now(); - private flush_interval_ms: number = 1000 * 60; // Wait at least a minute before flushing events. - private readonly max_events: number = 1000; // Maximum events to keep in memory - private readonly max_retry_events: number = 100; // Maximum failed events to retry - private flushing: boolean = false; // Prevent concurrent flush operations - private pendingFlush: boolean = false; // Track if a flush was requested during an ongoing flush + + /** + * The last time that the events were successfully flushed to the server. + */ + private lastFlushTime: number = Date.now(); + + /** + * the value is true when there is a pending flush happening. This prevents + * concurrent flush operations. + */ + private flushing: boolean = false; + + /** + * This value is true when a flush was requested during an ongoing flush. + */ + private pendingFlush: boolean = false; private constructor(config?: Config) { this.config = config; - this.events = new FixedDeque(Array, this.max_events); + this.events = new FixedDeque(Array, MAX_EVENTS); } static getInstance(config?: Config): ClearcutLogger | undefined { @@ -125,7 +165,7 @@ export class ClearcutLogger { enqueueLogEvent(event: object): void { try { // Manually handle overflow for FixedDeque, which throws when full. - const wasAtCapacity = this.events.size >= this.max_events; + const wasAtCapacity = this.events.size >= MAX_EVENTS; if (wasAtCapacity) { this.events.shift(); // Evict oldest element to make space. @@ -150,31 +190,14 @@ export class ClearcutLogger { } } - addDefaultFields(data: EventValue[]): void { - const totalAccounts = getLifetimeGoogleAccounts(); - const surface = determineSurface(); - const defaultLogMetadata = [ - { - gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT, - value: totalAccounts.toString(), - }, - { - gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, - value: surface, - }, - ]; - data.push(...defaultLogMetadata); - } - createLogEvent(name: string, data: EventValue[]): LogEvent { const email = getCachedGoogleAccount(); - // Add default fields that should exist for all logs - this.addDefaultFields(data); + data = addDefaultFields(data); const logEvent: LogEvent = { console_type: 'GEMINI_CLI', - application: 102, + application: 102, // GEMINI_CLI event_name: name, event_metadata: [data], }; @@ -190,7 +213,7 @@ export class ClearcutLogger { } flushIfNeeded(): void { - if (Date.now() - this.last_flush_time < this.flush_interval_ms) { + if (Date.now() - this.lastFlushTime < FLUSH_INTERVAL_MS) { return; } @@ -217,140 +240,67 @@ export class ClearcutLogger { const eventsToSend = this.events.toArray() as LogEventEntry[][]; this.events.clear(); - return new Promise<{ buffer: Buffer; statusCode?: number }>( - (resolve, reject) => { - const request = [ - { - log_source_name: 'CONCORD', - request_time_ms: Date.now(), - log_event: eventsToSend, - }, - ]; - const body = safeJsonStringify(request); - const options = { - hostname: 'play.googleapis.com', - path: '/log', - method: 'POST', - headers: { 'Content-Length': Buffer.byteLength(body) }, - timeout: 30000, // 30-second timeout - }; - const bufs: Buffer[] = []; - const req = https.request( - { - ...options, - agent: this.getProxyAgent(), - }, - (res) => { - res.on('error', reject); // Handle stream errors - res.on('data', (buf) => bufs.push(buf)); - res.on('end', () => { - try { - const buffer = Buffer.concat(bufs); - // Check if we got a successful response - if ( - res.statusCode && - res.statusCode >= 200 && - res.statusCode < 300 - ) { - resolve({ buffer, statusCode: res.statusCode }); - } else { - // HTTP error - reject with status code for retry handling - reject( - new Error(`HTTP ${res.statusCode}: ${res.statusMessage}`), - ); - } - } catch (e) { - reject(e); - } - }); - }, - ); - req.on('error', (e) => { - // Network-level error - reject(e); - }); - req.on('timeout', () => { - if (!req.destroyed) { - req.destroy(new Error('Request timeout after 30 seconds')); - } - }); - req.end(body); + const request: LogRequest[] = [ + { + log_source_name: 'CONCORD', + request_time_ms: Date.now(), + log_event: eventsToSend, }, - ) - .then(({ buffer }) => { - try { - this.last_flush_time = Date.now(); - return this.decodeLogResponse(buffer) || {}; - } catch (error: unknown) { - console.error('Error decoding log response:', error); - return {}; - } - }) - .catch((error: unknown) => { - // Handle both network-level and HTTP-level errors + ]; + + let result: LogResponse = {}; + + try { + const response = await fetch(CLEARCUT_URL, { + method: 'POST', + body: safeJsonStringify(request), + headers: { + 'Content-Type': 'application/json', + }, + }); + + const responseBody = await response.text(); + + if (response.status >= 200 && response.status < 300) { + this.lastFlushTime = Date.now(); + const nextRequestWaitMs = Number(JSON.parse(responseBody)[0]); + result = { + ...result, + nextRequestWaitMs, + }; + } else { if (this.config?.getDebugMode()) { - console.error('Error flushing log events:', error); + console.error( + `Error flushing log events: HTTP ${response.status}: ${response.statusText}`, + ); } // Re-queue failed events for retry this.requeueFailedEvents(eventsToSend); + } + } catch (e: unknown) { + if (this.config?.getDebugMode()) { + console.error('Error flushing log events:', e as Error); + } - // Return empty response to maintain the Promise contract - return {}; - }) - .finally(() => { - this.flushing = false; + // Re-queue failed events for retry + this.requeueFailedEvents(eventsToSend); + } - // If a flush was requested while we were flushing, flush again - if (this.pendingFlush) { - this.pendingFlush = false; - // Fire and forget the pending flush - this.flushToClearcut().catch((error) => { - if (this.config?.getDebugMode()) { - console.debug('Error in pending flush to Clearcut:', error); - } - }); + this.flushing = false; + + // If a flush was requested while we were flushing, flush again + if (this.pendingFlush) { + this.pendingFlush = false; + // Fire and forget the pending flush + this.flushToClearcut().catch((error) => { + if (this.config?.getDebugMode()) { + console.debug('Error in pending flush to Clearcut:', error); } }); - } - - // Visible for testing. Decodes protobuf-encoded response from Clearcut server. - decodeLogResponse(buf: Buffer): LogResponse | undefined { - // TODO(obrienowen): return specific errors to facilitate debugging. - if (buf.length < 1) { - return undefined; } - // The first byte of the buffer is `field<<3 | type`. We're looking for field - // 1, with type varint, represented by type=0. If the first byte isn't 8, that - // means field 1 is missing or the message is corrupted. Either way, we return - // undefined. - if (buf.readUInt8(0) !== 8) { - return undefined; - } - - let ms = BigInt(0); - let cont = true; - - // In each byte, the most significant bit is the continuation bit. If it's - // set, we keep going. The lowest 7 bits, are data bits. They are concatenated - // in reverse order to form the final number. - for (let i = 1; cont && i < buf.length; i++) { - const byte = buf.readUInt8(i); - ms |= BigInt(byte & 0x7f) << BigInt(7 * (i - 1)); - cont = (byte & 0x80) !== 0; - } - - if (cont) { - // We have fallen off the buffer without seeing a terminating byte. The - // message is corrupted. - return undefined; - } - - const returnVal = { - nextRequestWaitMs: Number(ms), - }; - return returnVal; + return result; } logStartSessionEvent(event: StartSessionEvent): void { @@ -752,24 +702,21 @@ export class ClearcutLogger { private requeueFailedEvents(eventsToSend: LogEventEntry[][]): void { // Add the events back to the front of the queue to be retried, but limit retry queue size - const eventsToRetry = eventsToSend.slice(-this.max_retry_events); // Keep only the most recent events + const eventsToRetry = eventsToSend.slice(-MAX_RETRY_EVENTS); // Keep only the most recent events // Log a warning if we're dropping events - if ( - eventsToSend.length > this.max_retry_events && - this.config?.getDebugMode() - ) { + if (eventsToSend.length > MAX_RETRY_EVENTS && this.config?.getDebugMode()) { console.warn( `ClearcutLogger: Dropping ${ - eventsToSend.length - this.max_retry_events + eventsToSend.length - MAX_RETRY_EVENTS } events due to retry queue limit. Total events: ${ eventsToSend.length - }, keeping: ${this.max_retry_events}`, + }, keeping: ${MAX_RETRY_EVENTS}`, ); } // Determine how many events can be re-queued - const availableSpace = this.max_events - this.events.size; + const availableSpace = MAX_EVENTS - this.events.size; const numEventsToRequeue = Math.min(eventsToRetry.length, availableSpace); if (numEventsToRequeue === 0) { @@ -792,7 +739,7 @@ export class ClearcutLogger { this.events.unshift(eventsToRequeue[i]); } // Clear any potential overflow - while (this.events.size > this.max_events) { + while (this.events.size > MAX_EVENTS) { this.events.pop(); } @@ -803,3 +750,28 @@ export class ClearcutLogger { } } } + +/** + * Adds default fields to data, and returns a new data array. This fields + * should exist on all log events. + */ +function addDefaultFields(data: EventValue[]): EventValue[] { + const totalAccounts = getLifetimeGoogleAccounts(); + const surface = determineSurface(); + const defaultLogMetadata: EventValue[] = [ + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT, + value: `${totalAccounts}`, + }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: surface, + }, + ]; + return [...data, ...defaultLogMetadata]; +} + +export const TEST_ONLY = { + MAX_RETRY_EVENTS, + MAX_EVENTS, +}; diff --git a/packages/core/src/test-utils/config.ts b/packages/core/src/test-utils/config.ts new file mode 100644 index 00000000..08faf8c3 --- /dev/null +++ b/packages/core/src/test-utils/config.ts @@ -0,0 +1,36 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Config, ConfigParameters } from '../config/config.js'; + +/** + * Default parameters used for {@link FAKE_CONFIG} + */ +export const DEFAULT_CONFIG_PARAMETERS: ConfigParameters = { + usageStatisticsEnabled: true, + debugMode: false, + sessionId: 'test-session-id', + proxy: undefined, + model: 'gemini-9001-super-duper', + targetDir: '/', + cwd: '/', +}; + +/** + * Produces a config. Default paramters are set to + * {@link DEFAULT_CONFIG_PARAMETERS}, optionally, fields can be specified to + * override those defaults. + */ +export function makeFakeConfig( + config: Partial = { + ...DEFAULT_CONFIG_PARAMETERS, + }, +): Config { + return new Config({ + ...DEFAULT_CONFIG_PARAMETERS, + ...config, + }); +} From 4ecfb478b066ba8448eeef579a874ffedfa910ee Mon Sep 17 00:00:00 2001 From: Shreya Keshive Date: Tue, 12 Aug 2025 14:37:49 -0400 Subject: [PATCH 03/45] chore(release): v0.1.19 (#6069) Co-authored-by: gemini-cli-robot --- package-lock.json | 10 +++++----- package.json | 4 ++-- packages/cli/package.json | 4 ++-- packages/core/package.json | 2 +- packages/test-utils/package.json | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/package-lock.json b/package-lock.json index acdd67f5..92b08b02 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@google/gemini-cli", - "version": "0.1.18", + "version": "0.1.19", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@google/gemini-cli", - "version": "0.1.18", + "version": "0.1.19", "workspaces": [ "packages/*" ], @@ -12286,7 +12286,7 @@ }, "packages/cli": { "name": "@google/gemini-cli", - "version": "0.1.18", + "version": "0.1.19", "dependencies": { "@google/gemini-cli-core": "file:../core", "@google/genai": "1.9.0", @@ -12490,7 +12490,7 @@ }, "packages/core": { "name": "@google/gemini-cli-core", - "version": "0.1.18", + "version": "0.1.19", "dependencies": { "@google/genai": "1.9.0", "@modelcontextprotocol/sdk": "^1.11.0", @@ -12617,7 +12617,7 @@ }, "packages/test-utils": { "name": "@google/gemini-cli-test-utils", - "version": "0.1.18", + "version": "0.1.19", "license": "Apache-2.0", "devDependencies": { "typescript": "^5.3.3" diff --git a/package.json b/package.json index 0e27676f..ffbd7628 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli", - "version": "0.1.18", + "version": "0.1.19", "engines": { "node": ">=20.0.0" }, @@ -14,7 +14,7 @@ "url": "git+https://github.com/google-gemini/gemini-cli.git" }, "config": { - "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.18" + "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.19" }, "scripts": { "start": "node scripts/start.js", diff --git a/packages/cli/package.json b/packages/cli/package.json index 582705f7..22a3853e 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli", - "version": "0.1.18", + "version": "0.1.19", "description": "Gemini CLI", "repository": { "type": "git", @@ -25,7 +25,7 @@ "dist" ], "config": { - "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.18" + "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.19" }, "dependencies": { "@google/gemini-cli-core": "file:../core", diff --git a/packages/core/package.json b/packages/core/package.json index e78a33cf..e3fb4078 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli-core", - "version": "0.1.18", + "version": "0.1.19", "description": "Gemini CLI Core", "repository": { "type": "git", diff --git a/packages/test-utils/package.json b/packages/test-utils/package.json index fe401c37..cb93c941 100644 --- a/packages/test-utils/package.json +++ b/packages/test-utils/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli-test-utils", - "version": "0.1.18", + "version": "0.1.19", "private": true, "main": "src/index.ts", "license": "Apache-2.0", From f34012034ce5ae86c3b7fd7c902efdefed1062c4 Mon Sep 17 00:00:00 2001 From: Richie Foreman Date: Tue, 12 Aug 2025 14:53:11 -0400 Subject: [PATCH 04/45] chore(vscode): Add `Build & Launch CLI` option to vscode project (#6027) --- .vscode/launch.json | 4 ++-- package.json | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 9b9d150d..97c9eba5 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -7,9 +7,9 @@ { "type": "node", "request": "launch", - "name": "Launch CLI", + "name": "Build & Launch CLI", "runtimeExecutable": "npm", - "runtimeArgs": ["run", "start"], + "runtimeArgs": ["run", "build-and-start"], "skipFiles": ["/**"], "cwd": "${workspaceFolder}", "console": "integratedTerminal", diff --git a/package.json b/package.json index ffbd7628..e5a14de5 100644 --- a/package.json +++ b/package.json @@ -24,6 +24,7 @@ "auth": "npm run auth:npm && npm run auth:docker", "generate": "node scripts/generate-git-commit-info.js", "build": "node scripts/build.js", + "build-and-start": "npm run build && npm run start", "build:vscode": "node scripts/build_vscode_companion.js", "build:all": "npm run build && npm run build:sandbox && npm run build:vscode", "build:packages": "npm run build --workspaces", From 5ee5cf17ba952ad6bfb8a671c617aa1a2122cc6b Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 15:18:54 -0400 Subject: [PATCH 05/45] chore(ci): do not "fail fast" on Node tests (#6053) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d34f39a3..5569b2f9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,7 @@ jobs: checks: write pull-requests: write strategy: + fail-fast: false # So we can see all test failures matrix: os: [ubuntu-latest, windows-latest, macos-latest] node-version: [20.x, 22.x, 24.x] From 8524cce7b9b31e852b2bbb4d5bf9a4843beec2b1 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 15:51:50 -0400 Subject: [PATCH 06/45] chore(ci): Ensure release workflow is consistent and not vulnerable to injection attacks (#6059) --- .github/workflows/release.yml | 218 ++++++++++++++++++++-------------- 1 file changed, 128 insertions(+), 90 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a011f776..8079e5c3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: Release +name: 'Release' on: schedule: @@ -9,165 +9,203 @@ on: version: description: 'The version to release (e.g., v0.1.11). Required for manual patch releases.' required: false # Not required for scheduled runs - type: string + type: 'string' ref: description: 'The branch or ref (full git sha) to release from.' required: true - type: string + type: 'string' default: 'main' dry_run: description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.' required: true - type: boolean + type: 'boolean' default: true create_nightly_release: description: 'Auto apply the nightly release tag, input version is ignored.' required: false - type: boolean + type: 'boolean' default: false force_skip_tests: description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests' required: false - type: boolean + type: 'boolean' default: false jobs: release: - runs-on: ubuntu-latest + runs-on: 'ubuntu-latest' environment: - name: production-release - url: ${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ steps.version.outputs.RELEASE_TAG }} - if: github.repository == 'google-gemini/gemini-cli' + name: 'production-release' + url: '${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ steps.version.outputs.RELEASE_TAG }}' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} permissions: - contents: write - packages: write - id-token: write - issues: write # For creating issues on failure + contents: 'write' + packages: 'write' + id-token: 'write' + issues: 'write' # For creating issues on failure outputs: - RELEASE_TAG: ${{ steps.version.outputs.RELEASE_TAG }} + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' steps: - - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 with: - ref: ${{ github.sha }} + ref: '${{ github.sha }}' fetch-depth: 0 - - name: Set booleans for simplified logic - id: vars - run: | + - name: 'Set booleans for simplified logic' + env: + CREATE_NIGHTLY_RELEASE: '${{ github.event.inputs.create_nightly_release }}' + EVENT_NAME: '${{ github.event_name }}' + DRY_RUN_INPUT: '${{ github.event.inputs.dry_run }}' + id: 'vars' + run: |- is_nightly="false" - if [[ "${{ github.event_name }}" == "schedule" || "${{ github.event.inputs.create_nightly_release }}" == "true" ]]; then + if [[ "${EVENT_NAME}" == "schedule" || "${CREATE_NIGHTLY_RELEASE}" == "true" ]]; then is_nightly="true" fi - echo "is_nightly=${is_nightly}" >> $GITHUB_OUTPUT + echo "is_nightly=${is_nightly}" >> "${GITHUB_OUTPUT}" is_dry_run="false" - if [[ "${{ github.event.inputs.dry_run }}" == "true" ]]; then + if [[ "${DRY_RUN_INPUT}" == "true" ]]; then is_dry_run="true" fi - echo "is_dry_run=${is_dry_run}" >> $GITHUB_OUTPUT + echo "is_dry_run=${is_dry_run}" >> "${GITHUB_OUTPUT}" - - name: Setup Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Setup Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: '20' + node-version-file: '.nvmrc' cache: 'npm' - - name: Install Dependencies - run: npm ci + - name: 'Install Dependencies' + run: |- + npm ci - - name: Get the version - id: version - run: | - VERSION_JSON=$(node scripts/get-release-version.js) - echo "RELEASE_TAG=$(echo $VERSION_JSON | jq -r .releaseTag)" >> $GITHUB_OUTPUT - echo "RELEASE_VERSION=$(echo $VERSION_JSON | jq -r .releaseVersion)" >> $GITHUB_OUTPUT - echo "NPM_TAG=$(echo $VERSION_JSON | jq -r .npmTag)" >> $GITHUB_OUTPUT + - name: 'Get the version' + id: 'version' env: - IS_NIGHTLY: ${{ steps.vars.outputs.is_nightly }} - MANUAL_VERSION: ${{ inputs.version }} + IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}' + MANUAL_VERSION: '${{ inputs.version }}' + run: |- + VERSION_JSON="$(node scripts/get-release-version.js)" + echo "RELEASE_TAG=$(echo "${VERSION_JSON}" | jq -r .releaseTag)" >> "${GITHUB_OUTPUT}" + echo "RELEASE_VERSION=$(echo "${VERSION_JSON}" | jq -r .releaseVersion)" >> "${GITHUB_OUTPUT}" + echo "NPM_TAG=$(echo "${VERSION_JSON}" | jq -r .npmTag)" >> "${GITHUB_OUTPUT}" - - name: Run Tests - if: github.event.inputs.force_skip_tests != 'true' - run: | + - name: 'Run Tests' + if: |- + ${{ github.event.inputs.force_skip_tests != 'true' }} + env: + GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}' + run: |- npm run preflight npm run test:integration:sandbox:none npm run test:integration:sandbox:docker - env: - GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} - - name: Configure Git User - run: | + - name: 'Configure Git User' + run: |- git config user.name "gemini-cli-robot" git config user.email "gemini-cli-robot@google.com" - - name: Create and switch to a release branch - id: release_branch - run: | - BRANCH_NAME="release/${{ steps.version.outputs.RELEASE_TAG }}" - git switch -c $BRANCH_NAME - echo "BRANCH_NAME=${BRANCH_NAME}" >> $GITHUB_OUTPUT + - name: 'Create and switch to a release branch' + id: 'release_branch' + env: + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + run: |- + BRANCH_NAME="release/${RELEASE_TAG}" + git switch -c "${BRANCH_NAME}" + echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}" - - name: Update package versions - run: | - npm run release:version ${{ steps.version.outputs.RELEASE_VERSION }} + - name: 'Update package versions' + env: + RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}' + run: |- + npm run release:version "${RELEASE_VERSION}" - - name: Commit and Conditionally Push package versions - run: | + - name: 'Commit and Conditionally Push package versions' + env: + BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}' + IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + run: |- git add package.json package-lock.json packages/*/package.json - git commit -m "chore(release): ${{ steps.version.outputs.RELEASE_TAG }}" - if [[ "${{ steps.vars.outputs.is_dry_run }}" == "false" ]]; then + git commit -m "chore(release): ${RELEASE_TAG}" + if [[ "${IS_DRY_RUN}" == "false" ]]; then echo "Pushing release branch to remote..." - git push --set-upstream origin ${{ steps.release_branch.outputs.BRANCH_NAME }} --follow-tags + git push --set-upstream origin "${BRANCH_NAME}" --follow-tags else echo "Dry run enabled. Skipping push." fi - - name: Build and Prepare Packages - run: | + - name: 'Build and Prepare Packages' + run: |- npm run build:packages npm run prepare:package - - name: Configure npm for publishing - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Configure npm for publishing' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: '20' + node-version-file: '.nvmrc' registry-url: 'https://wombat-dressing-room.appspot.com' scope: '@google' - - name: Publish @google/gemini-cli-core - run: npm publish --workspace=@google/gemini-cli-core --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }} + - name: 'Publish @google/gemini-cli-core' env: - NODE_AUTH_TOKEN: ${{ secrets.WOMBAT_TOKEN_CORE }} + IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}' + NODE_AUTH_TOKEN: '${{ secrets.WOMBAT_TOKEN_CORE }}' + NPM_TAG: '${{ steps.version.outputs.NPM_TAG }}' + run: |- + npm publish \ + --dry-run="${IS_DRY_RUN}" \ + --workspace="@google/gemini-cli-core" \ + --tag="${NPM_TAG}" - - name: Install latest core package - if: steps.vars.outputs.is_dry_run == 'false' - run: npm install @google/gemini-cli-core@${{ steps.version.outputs.RELEASE_VERSION }} --workspace=@google/gemini-cli --save-exact - - - name: Publish @google/gemini-cli - run: npm publish --workspace=@google/gemini-cli --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }} + - name: 'Install latest core package' + if: |- + ${{ steps.vars.outputs.is_dry_run == 'false' }} env: - NODE_AUTH_TOKEN: ${{ secrets.WOMBAT_TOKEN_CLI }} + RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}' + run: |- + npm install "@google/gemini-cli-core@${RELEASE_VERSION}" \ + --workspace="@google/gemini-cli" \ + --save-exact - - name: Create GitHub Release and Tag - if: ${{ steps.vars.outputs.is_dry_run == 'false' }} + - name: 'Publish @google/gemini-cli' env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - RELEASE_BRANCH: ${{ steps.release_branch.outputs.BRANCH_NAME }} - run: | - gh release create ${{ steps.version.outputs.RELEASE_TAG }} \ + IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}' + NODE_AUTH_TOKEN: '${{ secrets.WOMBAT_TOKEN_CLI }}' + NPM_TAG: '${{ steps.version.outputs.NPM_TAG }}' + run: |- + npm publish \ + --dry-run="${IS_DRY_RUN}" \ + --workspace="@google/gemini-cli" \ + --tag="${NPM_TAG}" + + - name: 'Create GitHub Release and Tag' + if: |- + ${{ steps.vars.outputs.is_dry_run == 'false' }} + env: + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}' + run: |- + gh release create "${RELEASE_TAG}" \ bundle/gemini.js \ --target "$RELEASE_BRANCH" \ - --title "Release ${{ steps.version.outputs.RELEASE_TAG }}" \ + --title "Release ${RELEASE_TAG}" \ --generate-notes - - name: Create Issue on Failure - if: failure() - run: | - gh issue create \ - --title "Release Failed for ${{ steps.version.outputs.RELEASE_TAG || 'N/A' }} on $(date +'%Y-%m-%d')" \ - --body "The release workflow failed. See the full run for details: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ - --label "kind/bug,release-failure" + - name: 'Create Issue on Failure' + if: |- + ${{ failure() }} env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }} || "N/A"' + DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}' + run: |- + gh issue create \ + --title "Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')" \ + --body "The release workflow failed. See the full run for details: ${DETAILS_URL}" \ + --label "kind/bug,release-failure" From 74fd0841d0d7148127e586fce4c550a01ff40e90 Mon Sep 17 00:00:00 2001 From: christine betts Date: Tue, 12 Aug 2025 20:08:47 +0000 Subject: [PATCH 07/45] [ide-mode] Update installation logic and nudge (#6068) --- packages/cli/src/ui/App.tsx | 14 ++++-- packages/cli/src/ui/IdeIntegrationNudge.tsx | 50 ++++++++++++++----- packages/cli/src/ui/commands/ideCommand.ts | 4 +- packages/core/src/ide/detect-ide.ts | 36 +++++++++----- packages/core/src/ide/ide-client.ts | 10 ++-- packages/core/src/ide/ide-installer.test.ts | 53 --------------------- packages/core/src/ide/ide-installer.ts | 22 +-------- packages/core/src/index.ts | 2 +- 8 files changed, 78 insertions(+), 113 deletions(-) diff --git a/packages/cli/src/ui/App.tsx b/packages/cli/src/ui/App.tsx index e952d6b2..ab30b730 100644 --- a/packages/cli/src/ui/App.tsx +++ b/packages/cli/src/ui/App.tsx @@ -576,14 +576,18 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const handleIdePromptComplete = useCallback( (result: IdeIntegrationNudgeResult) => { - if (result === 'yes') { - handleSlashCommand('/ide install'); + if (result.userSelection === 'yes') { + if (result.isExtensionPreInstalled) { + handleSlashCommand('/ide enable'); + } else { + handleSlashCommand('/ide install'); + } settings.setValue( SettingScope.User, 'hasSeenIdeIntegrationNudge', true, ); - } else if (result === 'dismiss') { + } else if (result.userSelection === 'dismiss') { settings.setValue( SettingScope.User, 'hasSeenIdeIntegrationNudge', @@ -942,9 +946,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { )} - {shouldShowIdePrompt ? ( + {shouldShowIdePrompt && currentIDE ? ( ) : isFolderTrustDialogOpen ? ( diff --git a/packages/cli/src/ui/IdeIntegrationNudge.tsx b/packages/cli/src/ui/IdeIntegrationNudge.tsx index f0c6172d..13f70a75 100644 --- a/packages/cli/src/ui/IdeIntegrationNudge.tsx +++ b/packages/cli/src/ui/IdeIntegrationNudge.tsx @@ -4,44 +4,74 @@ * SPDX-License-Identifier: Apache-2.0 */ +import { DetectedIde, getIdeInfo } from '@google/gemini-cli-core'; import { Box, Text, useInput } from 'ink'; import { RadioButtonSelect, RadioSelectItem, } from './components/shared/RadioButtonSelect.js'; -export type IdeIntegrationNudgeResult = 'yes' | 'no' | 'dismiss'; +export type IdeIntegrationNudgeResult = { + userSelection: 'yes' | 'no' | 'dismiss'; + isExtensionPreInstalled: boolean; +}; interface IdeIntegrationNudgeProps { - ideName?: string; + ide: DetectedIde; onComplete: (result: IdeIntegrationNudgeResult) => void; } export function IdeIntegrationNudge({ - ideName, + ide, onComplete, }: IdeIntegrationNudgeProps) { useInput((_input, key) => { if (key.escape) { - onComplete('no'); + onComplete({ + userSelection: 'no', + isExtensionPreInstalled: false, + }); } }); + const { displayName: ideName } = getIdeInfo(ide); + // Assume extension is already installed if the env variables are set. + const isExtensionPreInstalled = + !!process.env.GEMINI_CLI_IDE_SERVER_PORT && + !!process.env.GEMINI_CLI_IDE_WORKSPACE_PATH; + const OPTIONS: Array> = [ { label: 'Yes', - value: 'yes', + value: { + userSelection: 'yes', + isExtensionPreInstalled, + }, }, { label: 'No (esc)', - value: 'no', + value: { + userSelection: 'no', + isExtensionPreInstalled, + }, }, { label: "No, don't ask again", - value: 'dismiss', + value: { + userSelection: 'dismiss', + isExtensionPreInstalled, + }, }, ]; + const installText = isExtensionPreInstalled + ? `If you select Yes, the CLI will have access to your open files and display diffs directly in ${ + ideName ?? 'your editor' + }.` + : `If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${ + ideName ?? 'your editor' + }.`; + return ( {'> '} - {`Do you want to connect your ${ideName ?? 'your'} editor to Gemini CLI?`} + {`Do you want to connect ${ideName ?? 'your'} editor to Gemini CLI?`} - {`If you select Yes, we'll install an extension that allows the CLI to access your open files and display diffs directly in ${ideName ?? 'your editor'}.`} + {installText} { content: `IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: ${Object.values( DetectedIde, ) - .map((ide) => getIdeDisplayName(ide)) + .map((ide) => getIdeInfo(ide).displayName) .join(', ')}`, }) as const, }; diff --git a/packages/core/src/ide/detect-ide.ts b/packages/core/src/ide/detect-ide.ts index 759c4103..ef07994c 100644 --- a/packages/core/src/ide/detect-ide.ts +++ b/packages/core/src/ide/detect-ide.ts @@ -6,33 +6,43 @@ export enum DetectedIde { VSCode = 'vscode', - VSCodium = 'vscodium', Cursor = 'cursor', CloudShell = 'cloudshell', Codespaces = 'codespaces', - Windsurf = 'windsurf', FirebaseStudio = 'firebasestudio', Trae = 'trae', } -export function getIdeDisplayName(ide: DetectedIde): string { +export interface IdeInfo { + displayName: string; +} + +export function getIdeInfo(ide: DetectedIde): IdeInfo { switch (ide) { case DetectedIde.VSCode: - return 'VS Code'; - case DetectedIde.VSCodium: - return 'VSCodium'; + return { + displayName: 'VS Code', + }; case DetectedIde.Cursor: - return 'Cursor'; + return { + displayName: 'Cursor', + }; case DetectedIde.CloudShell: - return 'Cloud Shell'; + return { + displayName: 'Cloud Shell', + }; case DetectedIde.Codespaces: - return 'GitHub Codespaces'; - case DetectedIde.Windsurf: - return 'Windsurf'; + return { + displayName: 'GitHub Codespaces', + }; case DetectedIde.FirebaseStudio: - return 'Firebase Studio'; + return { + displayName: 'Firebase Studio', + }; case DetectedIde.Trae: - return 'Trae'; + return { + displayName: 'Trae', + }; default: { // This ensures that if a new IDE is added to the enum, we get a compile-time error. const exhaustiveCheck: never = ide; diff --git a/packages/core/src/ide/ide-client.ts b/packages/core/src/ide/ide-client.ts index 74d0df74..fe605eb2 100644 --- a/packages/core/src/ide/ide-client.ts +++ b/packages/core/src/ide/ide-client.ts @@ -6,11 +6,7 @@ import * as fs from 'node:fs'; import * as path from 'node:path'; -import { - detectIde, - DetectedIde, - getIdeDisplayName, -} from '../ide/detect-ide.js'; +import { detectIde, DetectedIde, getIdeInfo } from '../ide/detect-ide.js'; import { ideContext, IdeContextNotificationSchema, @@ -68,7 +64,7 @@ export class IdeClient { private constructor() { this.currentIde = detectIde(); if (this.currentIde) { - this.currentIdeDisplayName = getIdeDisplayName(this.currentIde); + this.currentIdeDisplayName = getIdeInfo(this.currentIde).displayName; } } @@ -86,7 +82,7 @@ export class IdeClient { `IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: ${Object.values( DetectedIde, ) - .map((ide) => getIdeDisplayName(ide)) + .map((ide) => getIdeInfo(ide).displayName) .join(', ')}`, false, ); diff --git a/packages/core/src/ide/ide-installer.test.ts b/packages/core/src/ide/ide-installer.test.ts index 1afd7a36..e43e1b34 100644 --- a/packages/core/src/ide/ide-installer.test.ts +++ b/packages/core/src/ide/ide-installer.test.ts @@ -23,19 +23,6 @@ describe('ide-installer', () => { // A more specific check might be needed if we export the class expect(installer).toBeInstanceOf(Object); }); - - it('should return an OpenVSXInstaller for "vscodium"', () => { - const installer = getIdeInstaller(DetectedIde.VSCodium); - expect(installer).not.toBeNull(); - expect(installer).toBeInstanceOf(Object); - }); - - it('should return a DefaultIDEInstaller for an unknown IDE', () => { - const installer = getIdeInstaller('unknown' as DetectedIde); - // Assuming DefaultIDEInstaller is the fallback - expect(installer).not.toBeNull(); - expect(installer).toBeInstanceOf(Object); - }); }); describe('VsCodeInstaller', () => { @@ -67,44 +54,4 @@ describe('ide-installer', () => { }); }); }); - - describe('OpenVSXInstaller', () => { - let installer: IdeInstaller; - - beforeEach(() => { - installer = getIdeInstaller(DetectedIde.VSCodium)!; - }); - - afterEach(() => { - vi.restoreAllMocks(); - }); - - describe('install', () => { - it('should call execSync with the correct command and return success', async () => { - const execSyncSpy = vi - .spyOn(child_process, 'execSync') - .mockImplementation(() => ''); - const result = await installer.install(); - expect(execSyncSpy).toHaveBeenCalledWith( - 'npx ovsx get google.gemini-cli-vscode-ide-companion', - { stdio: 'pipe' }, - ); - expect(result.success).toBe(true); - expect(result.message).toContain( - 'VS Code companion extension was installed successfully from OpenVSX', - ); - }); - - it('should return a failure message on failed installation', async () => { - vi.spyOn(child_process, 'execSync').mockImplementation(() => { - throw new Error('Command failed'); - }); - const result = await installer.install(); - expect(result.success).toBe(false); - expect(result.message).toContain( - 'Failed to install VS Code companion extension from OpenVSX', - ); - }); - }); - }); }); diff --git a/packages/core/src/ide/ide-installer.ts b/packages/core/src/ide/ide-installer.ts index e6192bfa..7db8e2d2 100644 --- a/packages/core/src/ide/ide-installer.ts +++ b/packages/core/src/ide/ide-installer.ts @@ -147,31 +147,11 @@ class VsCodeInstaller implements IdeInstaller { } } -class OpenVSXInstaller implements IdeInstaller { - async install(): Promise { - // TODO: Use the correct extension path. - const command = `npx ovsx get google.gemini-cli-vscode-ide-companion`; - try { - child_process.execSync(command, { stdio: 'pipe' }); - return { - success: true, - message: - 'VS Code companion extension was installed successfully from OpenVSX. Please restart your terminal to complete the setup.', - }; - } catch (_error) { - return { - success: false, - message: `Failed to install VS Code companion extension from OpenVSX. Please try installing it manually.`, - }; - } - } -} - export function getIdeInstaller(ide: DetectedIde): IdeInstaller | null { switch (ide) { case DetectedIde.VSCode: return new VsCodeInstaller(); default: - return new OpenVSXInstaller(); + return null; } } diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index e60bd048..791446e3 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -50,7 +50,7 @@ export * from './services/gitService.js'; export * from './ide/ide-client.js'; export * from './ide/ideContext.js'; export * from './ide/ide-installer.js'; -export { getIdeDisplayName, DetectedIde } from './ide/detect-ide.js'; +export { getIdeInfo, DetectedIde, IdeInfo } from './ide/detect-ide.js'; // Export Shell Execution Service export * from './services/shellExecutionService.js'; From d219f9013206aad5a1361e436ad4a45114e9cd49 Mon Sep 17 00:00:00 2001 From: Jacob Richman Date: Tue, 12 Aug 2025 14:05:49 -0700 Subject: [PATCH 08/45] Switch from useInput to useKeypress. (#6056) --- packages/cli/src/ui/IdeIntegrationNudge.tsx | 22 +- packages/cli/src/ui/components/AuthDialog.tsx | 38 ++-- .../cli/src/ui/components/AuthInProgress.tsx | 16 +- .../cli/src/ui/components/DebugProfiler.tsx | 16 +- .../ui/components/EditorSettingsDialog.tsx | 22 +- .../ui/components/FolderTrustDialog.test.tsx | 9 +- .../src/ui/components/FolderTrustDialog.tsx | 16 +- .../cli/src/ui/components/SettingsDialog.tsx | 193 +++++++++--------- .../ui/components/ShellConfirmationDialog.tsx | 16 +- .../cli/src/ui/components/ThemeDialog.tsx | 22 +- .../messages/ToolConfirmationMessage.tsx | 18 +- .../components/shared/RadioButtonSelect.tsx | 20 +- .../ui/hooks/useAutoAcceptIndicator.test.ts | 70 ++++--- .../src/ui/hooks/useAutoAcceptIndicator.ts | 43 ++-- .../cli/src/ui/hooks/useGeminiStream.test.tsx | 25 ++- packages/cli/src/ui/hooks/useGeminiStream.ts | 15 +- .../src/ui/privacy/CloudFreePrivacyNotice.tsx | 16 +- .../src/ui/privacy/CloudPaidPrivacyNotice.tsx | 16 +- .../src/ui/privacy/GeminiPrivacyNotice.tsx | 16 +- 19 files changed, 350 insertions(+), 259 deletions(-) diff --git a/packages/cli/src/ui/IdeIntegrationNudge.tsx b/packages/cli/src/ui/IdeIntegrationNudge.tsx index 13f70a75..2be69ad7 100644 --- a/packages/cli/src/ui/IdeIntegrationNudge.tsx +++ b/packages/cli/src/ui/IdeIntegrationNudge.tsx @@ -5,11 +5,12 @@ */ import { DetectedIde, getIdeInfo } from '@google/gemini-cli-core'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { RadioButtonSelect, RadioSelectItem, } from './components/shared/RadioButtonSelect.js'; +import { useKeypress } from './hooks/useKeypress.js'; export type IdeIntegrationNudgeResult = { userSelection: 'yes' | 'no' | 'dismiss'; @@ -25,14 +26,17 @@ export function IdeIntegrationNudge({ ide, onComplete, }: IdeIntegrationNudgeProps) { - useInput((_input, key) => { - if (key.escape) { - onComplete({ - userSelection: 'no', - isExtensionPreInstalled: false, - }); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onComplete({ + userSelection: 'no', + isExtensionPreInstalled: false, + }); + } + }, + { isActive: true }, + ); const { displayName: ideName } = getIdeInfo(ide); // Assume extension is already installed if the env variables are set. diff --git a/packages/cli/src/ui/components/AuthDialog.tsx b/packages/cli/src/ui/components/AuthDialog.tsx index ae076ee7..1262f894 100644 --- a/packages/cli/src/ui/components/AuthDialog.tsx +++ b/packages/cli/src/ui/components/AuthDialog.tsx @@ -5,12 +5,13 @@ */ import React, { useState } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { Colors } from '../colors.js'; import { RadioButtonSelect } from './shared/RadioButtonSelect.js'; import { LoadedSettings, SettingScope } from '../../config/settings.js'; import { AuthType } from '@google/gemini-cli-core'; import { validateAuthMethod } from '../../config/auth.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface AuthDialogProps { onSelect: (authMethod: AuthType | undefined, scope: SettingScope) => void; @@ -108,23 +109,26 @@ export function AuthDialog({ } }; - useInput((_input, key) => { - if (key.escape) { - // Prevent exit if there is an error message. - // This means they user is not authenticated yet. - if (errorMessage) { - return; + useKeypress( + (key) => { + if (key.name === 'escape') { + // Prevent exit if there is an error message. + // This means they user is not authenticated yet. + if (errorMessage) { + return; + } + if (settings.merged.selectedAuthType === undefined) { + // Prevent exiting if no auth method is set + setErrorMessage( + 'You must select an auth method to proceed. Press Ctrl+C twice to exit.', + ); + return; + } + onSelect(undefined, SettingScope.User); } - if (settings.merged.selectedAuthType === undefined) { - // Prevent exiting if no auth method is set - setErrorMessage( - 'You must select an auth method to proceed. Press Ctrl+C twice to exit.', - ); - return; - } - onSelect(undefined, SettingScope.User); - } - }); + }, + { isActive: true }, + ); return ( void; @@ -18,11 +19,14 @@ export function AuthInProgress({ }: AuthInProgressProps): React.JSX.Element { const [timedOut, setTimedOut] = useState(false); - useInput((input, key) => { - if (key.escape || (key.ctrl && (input === 'c' || input === 'C'))) { - onTimeout(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape' || (key.ctrl && key.name === 'c')) { + onTimeout(); + } + }, + { isActive: true }, + ); useEffect(() => { const timer = setTimeout(() => { diff --git a/packages/cli/src/ui/components/DebugProfiler.tsx b/packages/cli/src/ui/components/DebugProfiler.tsx index 89c40a91..22c16cfb 100644 --- a/packages/cli/src/ui/components/DebugProfiler.tsx +++ b/packages/cli/src/ui/components/DebugProfiler.tsx @@ -4,9 +4,10 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Text, useInput } from 'ink'; +import { Text } from 'ink'; import { useEffect, useRef, useState } from 'react'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; export const DebugProfiler = () => { const numRenders = useRef(0); @@ -16,11 +17,14 @@ export const DebugProfiler = () => { numRenders.current++; }); - useInput((input, key) => { - if (key.ctrl && input === 'b') { - setShowNumRenders((prev) => !prev); - } - }); + useKeypress( + (key) => { + if (key.ctrl && key.name === 'b') { + setShowNumRenders((prev) => !prev); + } + }, + { isActive: true }, + ); if (!showNumRenders) { return null; diff --git a/packages/cli/src/ui/components/EditorSettingsDialog.tsx b/packages/cli/src/ui/components/EditorSettingsDialog.tsx index 0b45d7f4..3c4c518b 100644 --- a/packages/cli/src/ui/components/EditorSettingsDialog.tsx +++ b/packages/cli/src/ui/components/EditorSettingsDialog.tsx @@ -5,7 +5,7 @@ */ import React, { useState } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { Colors } from '../colors.js'; import { EDITOR_DISPLAY_NAMES, @@ -15,6 +15,7 @@ import { import { RadioButtonSelect } from './shared/RadioButtonSelect.js'; import { LoadedSettings, SettingScope } from '../../config/settings.js'; import { EditorType, isEditorAvailable } from '@google/gemini-cli-core'; +import { useKeypress } from '../hooks/useKeypress.js'; interface EditorDialogProps { onSelect: (editorType: EditorType | undefined, scope: SettingScope) => void; @@ -33,14 +34,17 @@ export function EditorSettingsDialog({ const [focusedSection, setFocusedSection] = useState<'editor' | 'scope'>( 'editor', ); - useInput((_, key) => { - if (key.tab) { - setFocusedSection((prev) => (prev === 'editor' ? 'scope' : 'editor')); - } - if (key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (key.name === 'tab') { + setFocusedSection((prev) => (prev === 'editor' ? 'scope' : 'editor')); + } + if (key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); const editorItems: EditorDisplay[] = editorSettingsManager.getAvailableEditorDisplays(); diff --git a/packages/cli/src/ui/components/FolderTrustDialog.test.tsx b/packages/cli/src/ui/components/FolderTrustDialog.test.tsx index 01394d0f..d1be0b61 100644 --- a/packages/cli/src/ui/components/FolderTrustDialog.test.tsx +++ b/packages/cli/src/ui/components/FolderTrustDialog.test.tsx @@ -5,6 +5,7 @@ */ import { render } from 'ink-testing-library'; +import { waitFor } from '@testing-library/react'; import { vi } from 'vitest'; import { FolderTrustDialog, FolderTrustChoice } from './FolderTrustDialog.js'; @@ -18,12 +19,14 @@ describe('FolderTrustDialog', () => { ); }); - it('should call onSelect with DO_NOT_TRUST when escape is pressed', () => { + it('should call onSelect with DO_NOT_TRUST when escape is pressed', async () => { const onSelect = vi.fn(); const { stdin } = render(); - stdin.write('\u001B'); // Simulate escape key + stdin.write('\x1b'); - expect(onSelect).toHaveBeenCalledWith(FolderTrustChoice.DO_NOT_TRUST); + await waitFor(() => { + expect(onSelect).toHaveBeenCalledWith(FolderTrustChoice.DO_NOT_TRUST); + }); }); }); diff --git a/packages/cli/src/ui/components/FolderTrustDialog.tsx b/packages/cli/src/ui/components/FolderTrustDialog.tsx index 1918998c..30f3ff52 100644 --- a/packages/cli/src/ui/components/FolderTrustDialog.tsx +++ b/packages/cli/src/ui/components/FolderTrustDialog.tsx @@ -4,13 +4,14 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import React from 'react'; import { Colors } from '../colors.js'; import { RadioButtonSelect, RadioSelectItem, } from './shared/RadioButtonSelect.js'; +import { useKeypress } from '../hooks/useKeypress.js'; export enum FolderTrustChoice { TRUST_FOLDER = 'trust_folder', @@ -25,11 +26,14 @@ interface FolderTrustDialogProps { export const FolderTrustDialog: React.FC = ({ onSelect, }) => { - useInput((_, key) => { - if (key.escape) { - onSelect(FolderTrustChoice.DO_NOT_TRUST); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onSelect(FolderTrustChoice.DO_NOT_TRUST); + } + }, + { isActive: true }, + ); const options: Array> = [ { diff --git a/packages/cli/src/ui/components/SettingsDialog.tsx b/packages/cli/src/ui/components/SettingsDialog.tsx index 80e2339f..a09cd76a 100644 --- a/packages/cli/src/ui/components/SettingsDialog.tsx +++ b/packages/cli/src/ui/components/SettingsDialog.tsx @@ -5,7 +5,7 @@ */ import React, { useState, useEffect } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { Colors } from '../colors.js'; import { LoadedSettings, @@ -31,6 +31,7 @@ import { getDefaultValue, } from '../../utils/settingsUtils.js'; import { useVimMode } from '../contexts/VimModeContext.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface SettingsDialogProps { settings: LoadedSettings; @@ -256,107 +257,111 @@ export function SettingsDialog({ const showScrollUp = true; const showScrollDown = true; - useInput((input, key) => { - if (key.tab) { - setFocusSection((prev) => (prev === 'settings' ? 'scope' : 'settings')); - } - if (focusSection === 'settings') { - if (key.upArrow || input === 'k') { - const newIndex = - activeSettingIndex > 0 ? activeSettingIndex - 1 : items.length - 1; - setActiveSettingIndex(newIndex); - // Adjust scroll offset for wrap-around - if (newIndex === items.length - 1) { - setScrollOffset(Math.max(0, items.length - maxItemsToShow)); - } else if (newIndex < scrollOffset) { - setScrollOffset(newIndex); - } - } else if (key.downArrow || input === 'j') { - const newIndex = - activeSettingIndex < items.length - 1 ? activeSettingIndex + 1 : 0; - setActiveSettingIndex(newIndex); - // Adjust scroll offset for wrap-around - if (newIndex === 0) { - setScrollOffset(0); - } else if (newIndex >= scrollOffset + maxItemsToShow) { - setScrollOffset(newIndex - maxItemsToShow + 1); - } - } else if (key.return || input === ' ') { - items[activeSettingIndex]?.toggle(); - } else if ((key.ctrl && input === 'c') || (key.ctrl && input === 'l')) { - // Ctrl+C or Ctrl+L: Clear current setting and reset to default - const currentSetting = items[activeSettingIndex]; - if (currentSetting) { - const defaultValue = getDefaultValue(currentSetting.value); - // Ensure defaultValue is a boolean for setPendingSettingValue - const booleanDefaultValue = - typeof defaultValue === 'boolean' ? defaultValue : false; + useKeypress( + (key) => { + const { name, ctrl } = key; + if (name === 'tab') { + setFocusSection((prev) => (prev === 'settings' ? 'scope' : 'settings')); + } + if (focusSection === 'settings') { + if (name === 'up' || name === 'k') { + const newIndex = + activeSettingIndex > 0 ? activeSettingIndex - 1 : items.length - 1; + setActiveSettingIndex(newIndex); + // Adjust scroll offset for wrap-around + if (newIndex === items.length - 1) { + setScrollOffset(Math.max(0, items.length - maxItemsToShow)); + } else if (newIndex < scrollOffset) { + setScrollOffset(newIndex); + } + } else if (name === 'down' || name === 'j') { + const newIndex = + activeSettingIndex < items.length - 1 ? activeSettingIndex + 1 : 0; + setActiveSettingIndex(newIndex); + // Adjust scroll offset for wrap-around + if (newIndex === 0) { + setScrollOffset(0); + } else if (newIndex >= scrollOffset + maxItemsToShow) { + setScrollOffset(newIndex - maxItemsToShow + 1); + } + } else if (name === 'return' || name === 'space') { + items[activeSettingIndex]?.toggle(); + } else if (ctrl && (name === 'c' || name === 'l')) { + // Ctrl+C or Ctrl+L: Clear current setting and reset to default + const currentSetting = items[activeSettingIndex]; + if (currentSetting) { + const defaultValue = getDefaultValue(currentSetting.value); + // Ensure defaultValue is a boolean for setPendingSettingValue + const booleanDefaultValue = + typeof defaultValue === 'boolean' ? defaultValue : false; - // Update pending settings to default value - setPendingSettings((prev) => - setPendingSettingValue( - currentSetting.value, - booleanDefaultValue, - prev, - ), - ); - - // Remove from modified settings since it's now at default - setModifiedSettings((prev) => { - const updated = new Set(prev); - updated.delete(currentSetting.value); - return updated; - }); - - // Remove from restart-required settings if it was there - setRestartRequiredSettings((prev) => { - const updated = new Set(prev); - updated.delete(currentSetting.value); - return updated; - }); - - // If this setting doesn't require restart, save it immediately - if (!requiresRestart(currentSetting.value)) { - const immediateSettings = new Set([currentSetting.value]); - const immediateSettingsObject = setPendingSettingValue( - currentSetting.value, - booleanDefaultValue, - {}, + // Update pending settings to default value + setPendingSettings((prev) => + setPendingSettingValue( + currentSetting.value, + booleanDefaultValue, + prev, + ), ); - saveModifiedSettings( - immediateSettings, - immediateSettingsObject, - settings, - selectedScope, - ); + // Remove from modified settings since it's now at default + setModifiedSettings((prev) => { + const updated = new Set(prev); + updated.delete(currentSetting.value); + return updated; + }); + + // Remove from restart-required settings if it was there + setRestartRequiredSettings((prev) => { + const updated = new Set(prev); + updated.delete(currentSetting.value); + return updated; + }); + + // If this setting doesn't require restart, save it immediately + if (!requiresRestart(currentSetting.value)) { + const immediateSettings = new Set([currentSetting.value]); + const immediateSettingsObject = setPendingSettingValue( + currentSetting.value, + booleanDefaultValue, + {}, + ); + + saveModifiedSettings( + immediateSettings, + immediateSettingsObject, + settings, + selectedScope, + ); + } } } } - } - if (showRestartPrompt && input === 'r') { - // Only save settings that require restart (non-restart settings were already saved immediately) - const restartRequiredSettings = - getRestartRequiredFromModified(modifiedSettings); - const restartRequiredSet = new Set(restartRequiredSettings); + if (showRestartPrompt && name === 'r') { + // Only save settings that require restart (non-restart settings were already saved immediately) + const restartRequiredSettings = + getRestartRequiredFromModified(modifiedSettings); + const restartRequiredSet = new Set(restartRequiredSettings); - if (restartRequiredSet.size > 0) { - saveModifiedSettings( - restartRequiredSet, - pendingSettings, - settings, - selectedScope, - ); + if (restartRequiredSet.size > 0) { + saveModifiedSettings( + restartRequiredSet, + pendingSettings, + settings, + selectedScope, + ); + } + + setShowRestartPrompt(false); + setRestartRequiredSettings(new Set()); // Clear restart-required settings + if (onRestartRequest) onRestartRequest(); } - - setShowRestartPrompt(false); - setRestartRequiredSettings(new Set()); // Clear restart-required settings - if (onRestartRequest) onRestartRequest(); - } - if (key.escape) { - onSelect(undefined, selectedScope); - } - }); + if (name === 'escape') { + onSelect(undefined, selectedScope); + } + }, + { isActive: true }, + ); return ( = ({ request }) => { const { commands, onConfirm } = request; - useInput((_, key) => { - if (key.escape) { - onConfirm(ToolConfirmationOutcome.Cancel); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onConfirm(ToolConfirmationOutcome.Cancel); + } + }, + { isActive: true }, + ); const handleSelect = (item: ToolConfirmationOutcome) => { if (item === ToolConfirmationOutcome.Cancel) { diff --git a/packages/cli/src/ui/components/ThemeDialog.tsx b/packages/cli/src/ui/components/ThemeDialog.tsx index 37663447..16ecfc8f 100644 --- a/packages/cli/src/ui/components/ThemeDialog.tsx +++ b/packages/cli/src/ui/components/ThemeDialog.tsx @@ -5,7 +5,7 @@ */ import React, { useCallback, useState } from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { Colors } from '../colors.js'; import { themeManager, DEFAULT_THEME } from '../themes/theme-manager.js'; import { RadioButtonSelect } from './shared/RadioButtonSelect.js'; @@ -16,6 +16,7 @@ import { getScopeItems, getScopeMessageForSetting, } from '../../utils/dialogScopeUtils.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface ThemeDialogProps { /** Callback function when a theme is selected */ @@ -111,14 +112,17 @@ export function ThemeDialog({ 'theme', ); - useInput((input, key) => { - if (key.tab) { - setFocusedSection((prev) => (prev === 'theme' ? 'scope' : 'theme')); - } - if (key.escape) { - onSelect(undefined, selectedScope); - } - }); + useKeypress( + (key) => { + if (key.name === 'tab') { + setFocusedSection((prev) => (prev === 'theme' ? 'scope' : 'theme')); + } + if (key.name === 'escape') { + onSelect(undefined, selectedScope); + } + }, + { isActive: true }, + ); // Generate scope message for theme setting const otherScopeModifiedMessage = getScopeMessageForSetting( diff --git a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx index 88b25b86..a8813491 100644 --- a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx +++ b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx @@ -5,7 +5,7 @@ */ import React from 'react'; -import { Box, Text, useInput } from 'ink'; +import { Box, Text } from 'ink'; import { DiffRenderer } from './DiffRenderer.js'; import { Colors } from '../../colors.js'; import { @@ -20,6 +20,7 @@ import { RadioSelectItem, } from '../shared/RadioButtonSelect.js'; import { MaxSizedBox } from '../shared/MaxSizedBox.js'; +import { useKeypress } from '../../hooks/useKeypress.js'; export interface ToolConfirmationMessageProps { confirmationDetails: ToolCallConfirmationDetails; @@ -56,12 +57,15 @@ export const ToolConfirmationMessage: React.FC< onConfirm(outcome); }; - useInput((input, key) => { - if (!isFocused) return; - if (key.escape || (key.ctrl && (input === 'c' || input === 'C'))) { - handleConfirm(ToolConfirmationOutcome.Cancel); - } - }); + useKeypress( + (key) => { + if (!isFocused) return; + if (key.name === 'escape' || (key.ctrl && key.name === 'c')) { + handleConfirm(ToolConfirmationOutcome.Cancel); + } + }, + { isActive: isFocused }, + ); const handleSelect = (item: ToolConfirmationOutcome) => handleConfirm(item); diff --git a/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx b/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx index 8b0057ca..511d3847 100644 --- a/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx +++ b/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx @@ -5,8 +5,9 @@ */ import React, { useEffect, useState, useRef } from 'react'; -import { Text, Box, useInput } from 'ink'; +import { Text, Box } from 'ink'; import { Colors } from '../../colors.js'; +import { useKeypress } from '../../hooks/useKeypress.js'; /** * Represents a single option for the RadioButtonSelect. @@ -85,9 +86,10 @@ export function RadioButtonSelect({ [], ); - useInput( - (input, key) => { - const isNumeric = showNumbers && /^[0-9]$/.test(input); + useKeypress( + (key) => { + const { sequence, name } = key; + const isNumeric = showNumbers && /^[0-9]$/.test(sequence); // Any key press that is not a digit should clear the number input buffer. if (!isNumeric && numberInputTimer.current) { @@ -95,21 +97,21 @@ export function RadioButtonSelect({ setNumberInput(''); } - if (input === 'k' || key.upArrow) { + if (name === 'k' || name === 'up') { const newIndex = activeIndex > 0 ? activeIndex - 1 : items.length - 1; setActiveIndex(newIndex); onHighlight?.(items[newIndex]!.value); return; } - if (input === 'j' || key.downArrow) { + if (name === 'j' || name === 'down') { const newIndex = activeIndex < items.length - 1 ? activeIndex + 1 : 0; setActiveIndex(newIndex); onHighlight?.(items[newIndex]!.value); return; } - if (key.return) { + if (name === 'return') { onSelect(items[activeIndex]!.value); return; } @@ -120,7 +122,7 @@ export function RadioButtonSelect({ clearTimeout(numberInputTimer.current); } - const newNumberInput = numberInput + input; + const newNumberInput = numberInput + sequence; setNumberInput(newNumberInput); const targetIndex = Number.parseInt(newNumberInput, 10) - 1; @@ -154,7 +156,7 @@ export function RadioButtonSelect({ } } }, - { isActive: isFocused && items.length > 0 }, + { isActive: !!(isFocused && items.length > 0) }, ); const visibleItems = items.slice(scrollOffset, scrollOffset + maxItemsToShow); diff --git a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts index bda6c259..657d792b 100644 --- a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts +++ b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.test.ts @@ -21,9 +21,9 @@ import { Config as ActualConfigType, ApprovalMode, } from '@google/gemini-cli-core'; -import { useInput, type Key as InkKey } from 'ink'; +import { useKeypress, Key } from './useKeypress.js'; -vi.mock('ink'); +vi.mock('./useKeypress.js'); vi.mock('@google/gemini-cli-core', async () => { const actualServerModule = (await vi.importActual( @@ -53,13 +53,12 @@ interface MockConfigInstanceShape { getToolRegistry: Mock<() => { discoverTools: Mock<() => void> }>; } -type UseInputKey = InkKey; -type UseInputHandler = (input: string, key: UseInputKey) => void; +type UseKeypressHandler = (key: Key) => void; describe('useAutoAcceptIndicator', () => { let mockConfigInstance: MockConfigInstanceShape; - let capturedUseInputHandler: UseInputHandler; - let mockedInkUseInput: MockedFunction; + let capturedUseKeypressHandler: UseKeypressHandler; + let mockedUseKeypress: MockedFunction; beforeEach(() => { vi.resetAllMocks(); @@ -111,10 +110,12 @@ describe('useAutoAcceptIndicator', () => { return instance; }); - mockedInkUseInput = useInput as MockedFunction; - mockedInkUseInput.mockImplementation((handler: UseInputHandler) => { - capturedUseInputHandler = handler; - }); + mockedUseKeypress = useKeypress as MockedFunction; + mockedUseKeypress.mockImplementation( + (handler: UseKeypressHandler, _options) => { + capturedUseKeypressHandler = handler; + }, + ); // eslint-disable-next-line @typescript-eslint/no-explicit-any mockConfigInstance = new (Config as any)() as MockConfigInstanceShape; @@ -163,7 +164,10 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.DEFAULT); act(() => { - capturedUseInputHandler('', { tab: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.AUTO_EDIT, @@ -171,7 +175,7 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.AUTO_EDIT); act(() => { - capturedUseInputHandler('y', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.YOLO, @@ -179,7 +183,7 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.YOLO); act(() => { - capturedUseInputHandler('y', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.DEFAULT, @@ -187,7 +191,7 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.DEFAULT); act(() => { - capturedUseInputHandler('y', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.YOLO, @@ -195,7 +199,10 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.YOLO); act(() => { - capturedUseInputHandler('', { tab: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.AUTO_EDIT, @@ -203,7 +210,10 @@ describe('useAutoAcceptIndicator', () => { expect(result.current).toBe(ApprovalMode.AUTO_EDIT); act(() => { - capturedUseInputHandler('', { tab: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).toHaveBeenCalledWith( ApprovalMode.DEFAULT, @@ -220,37 +230,51 @@ describe('useAutoAcceptIndicator', () => { ); act(() => { - capturedUseInputHandler('', { tab: true, shift: false } as InkKey); + capturedUseKeypressHandler({ + name: 'tab', + shift: false, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('', { tab: false, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'unknown', + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('a', { tab: false, shift: false } as InkKey); + capturedUseKeypressHandler({ + name: 'a', + shift: false, + ctrl: false, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('y', { tab: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', ctrl: false } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('a', { ctrl: true } as InkKey); + capturedUseKeypressHandler({ name: 'a', ctrl: true } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('y', { shift: true } as InkKey); + capturedUseKeypressHandler({ name: 'y', shift: true } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); act(() => { - capturedUseInputHandler('a', { ctrl: true, shift: true } as InkKey); + capturedUseKeypressHandler({ + name: 'a', + ctrl: true, + shift: true, + } as Key); }); expect(mockConfigInstance.setApprovalMode).not.toHaveBeenCalled(); }); diff --git a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts index 8af3cea1..2cc16077 100644 --- a/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts +++ b/packages/cli/src/ui/hooks/useAutoAcceptIndicator.ts @@ -5,8 +5,8 @@ */ import { useState, useEffect } from 'react'; -import { useInput } from 'ink'; import { ApprovalMode, type Config } from '@google/gemini-cli-core'; +import { useKeypress } from './useKeypress.js'; export interface UseAutoAcceptIndicatorArgs { config: Config; @@ -23,27 +23,30 @@ export function useAutoAcceptIndicator({ setShowAutoAcceptIndicator(currentConfigValue); }, [currentConfigValue]); - useInput((input, key) => { - let nextApprovalMode: ApprovalMode | undefined; + useKeypress( + (key) => { + let nextApprovalMode: ApprovalMode | undefined; - if (key.ctrl && input === 'y') { - nextApprovalMode = - config.getApprovalMode() === ApprovalMode.YOLO - ? ApprovalMode.DEFAULT - : ApprovalMode.YOLO; - } else if (key.tab && key.shift) { - nextApprovalMode = - config.getApprovalMode() === ApprovalMode.AUTO_EDIT - ? ApprovalMode.DEFAULT - : ApprovalMode.AUTO_EDIT; - } + if (key.ctrl && key.name === 'y') { + nextApprovalMode = + config.getApprovalMode() === ApprovalMode.YOLO + ? ApprovalMode.DEFAULT + : ApprovalMode.YOLO; + } else if (key.shift && key.name === 'tab') { + nextApprovalMode = + config.getApprovalMode() === ApprovalMode.AUTO_EDIT + ? ApprovalMode.DEFAULT + : ApprovalMode.AUTO_EDIT; + } - if (nextApprovalMode) { - config.setApprovalMode(nextApprovalMode); - // Update local state immediately for responsiveness - setShowAutoAcceptIndicator(nextApprovalMode); - } - }); + if (nextApprovalMode) { + config.setApprovalMode(nextApprovalMode); + // Update local state immediately for responsiveness + setShowAutoAcceptIndicator(nextApprovalMode); + } + }, + { isActive: true }, + ); return showAutoAcceptIndicator; } diff --git a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx index 751b869e..37d63e9a 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx +++ b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx @@ -8,7 +8,7 @@ import { describe, it, expect, vi, beforeEach, Mock } from 'vitest'; import { renderHook, act, waitFor } from '@testing-library/react'; import { useGeminiStream, mergePartListUnions } from './useGeminiStream.js'; -import { useInput } from 'ink'; +import { useKeypress } from './useKeypress.js'; import { useReactToolScheduler, TrackedToolCall, @@ -71,10 +71,9 @@ vi.mock('./useReactToolScheduler.js', async (importOriginal) => { }; }); -vi.mock('ink', async (importOriginal) => { - const actualInkModule = (await importOriginal()) as any; - return { ...(actualInkModule || {}), useInput: vi.fn() }; -}); +vi.mock('./useKeypress.js', () => ({ + useKeypress: vi.fn(), +})); vi.mock('./shellCommandProcessor.js', () => ({ useShellCommandProcessor: vi.fn().mockReturnValue({ @@ -899,19 +898,23 @@ describe('useGeminiStream', () => { }); describe('User Cancellation', () => { - let useInputCallback: (input: string, key: any) => void; - const mockUseInput = useInput as Mock; + let keypressCallback: (key: any) => void; + const mockUseKeypress = useKeypress as Mock; beforeEach(() => { - // Capture the callback passed to useInput - mockUseInput.mockImplementation((callback) => { - useInputCallback = callback; + // Capture the callback passed to useKeypress + mockUseKeypress.mockImplementation((callback, options) => { + if (options.isActive) { + keypressCallback = callback; + } else { + keypressCallback = () => {}; + } }); }); const simulateEscapeKeyPress = () => { act(() => { - useInputCallback('', { escape: true }); + keypressCallback({ name: 'escape' }); }); }; diff --git a/packages/cli/src/ui/hooks/useGeminiStream.ts b/packages/cli/src/ui/hooks/useGeminiStream.ts index 6385d267..6f3cb4fd 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.ts +++ b/packages/cli/src/ui/hooks/useGeminiStream.ts @@ -5,7 +5,6 @@ */ import { useState, useRef, useCallback, useEffect, useMemo } from 'react'; -import { useInput } from 'ink'; import { Config, GeminiClient, @@ -55,6 +54,7 @@ import { TrackedCancelledToolCall, } from './useReactToolScheduler.js'; import { useSessionStats } from '../contexts/SessionContext.js'; +import { useKeypress } from './useKeypress.js'; export function mergePartListUnions(list: PartListUnion[]): PartListUnion { const resultParts: PartListUnion = []; @@ -213,11 +213,14 @@ export const useGeminiStream = ( pendingHistoryItemRef, ]); - useInput((_input, key) => { - if (key.escape) { - cancelOngoingRequest(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + cancelOngoingRequest(); + } + }, + { isActive: streamingState === StreamingState.Responding }, + ); const prepareQueryForGemini = useCallback( async ( diff --git a/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx b/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx index 25e14281..d4c13097 100644 --- a/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx +++ b/packages/cli/src/ui/privacy/CloudFreePrivacyNotice.tsx @@ -4,12 +4,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Newline, Text, useInput } from 'ink'; +import { Box, Newline, Text } from 'ink'; import { RadioButtonSelect } from '../components/shared/RadioButtonSelect.js'; import { usePrivacySettings } from '../hooks/usePrivacySettings.js'; import { CloudPaidPrivacyNotice } from './CloudPaidPrivacyNotice.js'; import { Config } from '@google/gemini-cli-core'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface CloudFreePrivacyNoticeProps { config: Config; @@ -23,11 +24,14 @@ export const CloudFreePrivacyNotice = ({ const { privacyState, updateDataCollectionOptIn } = usePrivacySettings(config); - useInput((input, key) => { - if (privacyState.error && key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (privacyState.error && key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); if (privacyState.isLoading) { return Loading...; diff --git a/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx b/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx index e50dcd4b..f0adbb68 100644 --- a/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx +++ b/packages/cli/src/ui/privacy/CloudPaidPrivacyNotice.tsx @@ -4,8 +4,9 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Newline, Text, useInput } from 'ink'; +import { Box, Newline, Text } from 'ink'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface CloudPaidPrivacyNoticeProps { onExit: () => void; @@ -14,11 +15,14 @@ interface CloudPaidPrivacyNoticeProps { export const CloudPaidPrivacyNotice = ({ onExit, }: CloudPaidPrivacyNoticeProps) => { - useInput((input, key) => { - if (key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); return ( diff --git a/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx b/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx index 57030ac3..c0eaa74f 100644 --- a/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx +++ b/packages/cli/src/ui/privacy/GeminiPrivacyNotice.tsx @@ -4,19 +4,23 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Box, Newline, Text, useInput } from 'ink'; +import { Box, Newline, Text } from 'ink'; import { Colors } from '../colors.js'; +import { useKeypress } from '../hooks/useKeypress.js'; interface GeminiPrivacyNoticeProps { onExit: () => void; } export const GeminiPrivacyNotice = ({ onExit }: GeminiPrivacyNoticeProps) => { - useInput((input, key) => { - if (key.escape) { - onExit(); - } - }); + useKeypress( + (key) => { + if (key.name === 'escape') { + onExit(); + } + }, + { isActive: true }, + ); return ( From 3a87712c1a15dac9f0a717b40fbf9e59398177ca Mon Sep 17 00:00:00 2001 From: Shreya Keshive Date: Tue, 12 Aug 2025 17:08:07 -0400 Subject: [PATCH 09/45] Launch VS Code IDE Integration (#6063) --- packages/cli/src/config/config.test.ts | 27 ---------------- packages/cli/src/config/config.ts | 8 ----- .../cli/src/config/settingsSchema.test.ts | 1 - packages/cli/src/config/settingsSchema.ts | 10 +----- packages/cli/src/gemini.tsx | 2 +- packages/cli/src/ui/App.test.tsx | 4 +-- packages/cli/src/ui/App.tsx | 1 - .../cli/src/ui/commands/ideCommand.test.ts | 11 ++----- packages/cli/src/ui/commands/ideCommand.ts | 2 +- .../messages/ToolConfirmationMessage.tsx | 4 +-- packages/core/src/config/config.ts | 7 ---- packages/core/src/core/client.test.ts | 18 +++++------ packages/core/src/core/client.ts | 2 +- packages/core/src/ide/ide-installer.ts | 32 +------------------ packages/core/src/tools/edit.test.ts | 2 -- packages/core/src/tools/edit.ts | 1 - packages/core/src/tools/write-file.test.ts | 1 - packages/core/src/tools/write-file.ts | 1 - scripts/copy_bundle_assets.js | 8 ----- 19 files changed, 21 insertions(+), 121 deletions(-) diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index 701ae267..178980eb 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -1010,33 +1010,6 @@ describe('loadCliConfig model selection', () => { }); }); -describe('loadCliConfig ideModeFeature', () => { - const originalArgv = process.argv; - const originalEnv = { ...process.env }; - - beforeEach(() => { - vi.resetAllMocks(); - vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); - process.env.GEMINI_API_KEY = 'test-api-key'; - delete process.env.SANDBOX; - delete process.env.GEMINI_CLI_IDE_SERVER_PORT; - }); - - afterEach(() => { - process.argv = originalArgv; - process.env = originalEnv; - vi.restoreAllMocks(); - }); - - it('should be false by default', async () => { - process.argv = ['node', 'script.js']; - const settings: Settings = {}; - const argv = await parseArguments(); - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect(config.getIdeModeFeature()).toBe(false); - }); -}); - describe('loadCliConfig folderTrustFeature', () => { const originalArgv = process.argv; const originalEnv = { ...process.env }; diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index 52600e42..d0658e75 100644 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -67,7 +67,6 @@ export interface CliArgs { experimentalAcp: boolean | undefined; extensions: string[] | undefined; listExtensions: boolean | undefined; - ideModeFeature: boolean | undefined; proxy: string | undefined; includeDirectories: string[] | undefined; } @@ -200,10 +199,6 @@ export async function parseArguments(): Promise { type: 'boolean', description: 'List all available extensions and exit.', }) - .option('ide-mode-feature', { - type: 'boolean', - description: 'Run in IDE mode?', - }) .option('proxy', { type: 'string', description: @@ -307,8 +302,6 @@ export async function loadCliConfig( const memoryImportFormat = settings.memoryImportFormat || 'tree'; const ideMode = settings.ideMode ?? false; - const ideModeFeature = - argv.ideModeFeature ?? settings.ideModeFeature ?? false; const folderTrustFeature = settings.folderTrustFeature ?? false; const folderTrustSetting = settings.folderTrust ?? false; @@ -474,7 +467,6 @@ export async function loadCliConfig( noBrowser: !!process.env.NO_BROWSER, summarizeToolOutput: settings.summarizeToolOutput, ideMode, - ideModeFeature, chatCompression: settings.chatCompression, folderTrustFeature, folderTrust, diff --git a/packages/cli/src/config/settingsSchema.test.ts b/packages/cli/src/config/settingsSchema.test.ts index ab820ee1..118b1823 100644 --- a/packages/cli/src/config/settingsSchema.test.ts +++ b/packages/cli/src/config/settingsSchema.test.ts @@ -44,7 +44,6 @@ describe('SettingsSchema', () => { 'telemetry', 'bugCommand', 'summarizeToolOutput', - 'ideModeFeature', 'dnsResolutionOrder', 'excludedProjectEnvVars', 'disableUpdateNag', diff --git a/packages/cli/src/config/settingsSchema.ts b/packages/cli/src/config/settingsSchema.ts index cd8c61fb..f061b16a 100644 --- a/packages/cli/src/config/settingsSchema.ts +++ b/packages/cli/src/config/settingsSchema.ts @@ -395,15 +395,7 @@ export const SETTINGS_SCHEMA = { description: 'Settings for summarizing tool output.', showInDialog: false, }, - ideModeFeature: { - type: 'boolean', - label: 'IDE Mode Feature Flag', - category: 'Advanced', - requiresRestart: true, - default: undefined as boolean | undefined, - description: 'Internal feature flag for IDE mode.', - showInDialog: false, - }, + dnsResolutionOrder: { type: 'string', label: 'DNS Resolution Order', diff --git a/packages/cli/src/gemini.tsx b/packages/cli/src/gemini.tsx index a0cf352e..acc9c4b2 100644 --- a/packages/cli/src/gemini.tsx +++ b/packages/cli/src/gemini.tsx @@ -191,7 +191,7 @@ export async function main() { await config.initialize(); - if (config.getIdeMode() && config.getIdeModeFeature()) { + if (config.getIdeMode()) { await config.getIdeClient().connect(); logIdeConnection(config, new IdeConnectionEvent(IdeConnectionType.START)); } diff --git a/packages/cli/src/ui/App.test.tsx b/packages/cli/src/ui/App.test.tsx index 82ba4fe1..3636823b 100644 --- a/packages/cli/src/ui/App.test.tsx +++ b/packages/cli/src/ui/App.test.tsx @@ -155,13 +155,13 @@ vi.mock('@google/gemini-cli-core', async (importOriginal) => { setFlashFallbackHandler: vi.fn(), getSessionId: vi.fn(() => 'test-session-id'), getUserTier: vi.fn().mockResolvedValue(undefined), - getIdeModeFeature: vi.fn(() => false), - getIdeMode: vi.fn(() => false), + getIdeMode: vi.fn(() => true), getWorkspaceContext: vi.fn(() => ({ getDirectories: vi.fn(() => []), })), getIdeClient: vi.fn(() => ({ getCurrentIde: vi.fn(() => 'vscode'), + getDetectedIdeDisplayName: vi.fn(() => 'VSCode'), })), }; }); diff --git a/packages/cli/src/ui/App.tsx b/packages/cli/src/ui/App.tsx index ab30b730..1caabbe0 100644 --- a/packages/cli/src/ui/App.tsx +++ b/packages/cli/src/ui/App.tsx @@ -130,7 +130,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { registerCleanup(() => config.getIdeClient().disconnect()); }, [config]); const shouldShowIdePrompt = - config.getIdeModeFeature() && currentIDE && !config.getIdeMode() && !settings.merged.hasSeenIdeIntegrationNudge && diff --git a/packages/cli/src/ui/commands/ideCommand.test.ts b/packages/cli/src/ui/commands/ideCommand.test.ts index 10a97e2a..8576320b 100644 --- a/packages/cli/src/ui/commands/ideCommand.test.ts +++ b/packages/cli/src/ui/commands/ideCommand.test.ts @@ -40,7 +40,6 @@ describe('ideCommand', () => { } as unknown as CommandContext; mockConfig = { - getIdeModeFeature: vi.fn(), getIdeMode: vi.fn(), getIdeClient: vi.fn(() => ({ reconnect: vi.fn(), @@ -60,14 +59,12 @@ describe('ideCommand', () => { vi.restoreAllMocks(); }); - it('should return null if ideModeFeature is not enabled', () => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(false); - const command = ideCommand(mockConfig); + it('should return null if config is not provided', () => { + const command = ideCommand(null); expect(command).toBeNull(); }); - it('should return the ide command if ideModeFeature is enabled', () => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(true); + it('should return the ide command', () => { vi.mocked(mockConfig.getIdeMode).mockReturnValue(true); vi.mocked(mockConfig.getIdeClient).mockReturnValue({ getCurrentIde: () => DetectedIde.VSCode, @@ -85,7 +82,6 @@ describe('ideCommand', () => { describe('status subcommand', () => { const mockGetConnectionStatus = vi.fn(); beforeEach(() => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(true); vi.mocked(mockConfig.getIdeClient).mockReturnValue({ getConnectionStatus: mockGetConnectionStatus, getCurrentIde: () => DetectedIde.VSCode, @@ -162,7 +158,6 @@ describe('ideCommand', () => { describe('install subcommand', () => { const mockInstall = vi.fn(); beforeEach(() => { - vi.mocked(mockConfig.getIdeModeFeature).mockReturnValue(true); vi.mocked(mockConfig.getIdeMode).mockReturnValue(true); vi.mocked(mockConfig.getIdeClient).mockReturnValue({ getCurrentIde: () => DetectedIde.VSCode, diff --git a/packages/cli/src/ui/commands/ideCommand.ts b/packages/cli/src/ui/commands/ideCommand.ts index 23af2e48..2dfad33c 100644 --- a/packages/cli/src/ui/commands/ideCommand.ts +++ b/packages/cli/src/ui/commands/ideCommand.ts @@ -115,7 +115,7 @@ async function getIdeStatusMessageWithFiles(ideClient: IdeClient): Promise<{ } export const ideCommand = (config: Config | null): SlashCommand | null => { - if (!config || !config.getIdeModeFeature()) { + if (!config) { return null; } const ideClient = config.getIdeClient(); diff --git a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx index a8813491..2f93609e 100644 --- a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx +++ b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx @@ -45,7 +45,7 @@ export const ToolConfirmationMessage: React.FC< const handleConfirm = async (outcome: ToolConfirmationOutcome) => { if (confirmationDetails.type === 'edit') { const ideClient = config?.getIdeClient(); - if (config?.getIdeMode() && config?.getIdeModeFeature()) { + if (config?.getIdeMode()) { const cliOutcome = outcome === ToolConfirmationOutcome.Cancel ? 'rejected' : 'accepted'; await ideClient?.resolveDiffFromCli( @@ -136,7 +136,7 @@ export const ToolConfirmationMessage: React.FC< value: ToolConfirmationOutcome.ProceedAlways, }, ); - if (config?.getIdeMode() && config?.getIdeModeFeature()) { + if (config?.getIdeMode()) { options.push({ label: 'No (esc)', value: ToolConfirmationOutcome.Cancel, diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index d02e4153..9231f427 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -191,7 +191,6 @@ export interface ConfigParameters { blockedMcpServers?: Array<{ name: string; extensionName: string }>; noBrowser?: boolean; summarizeToolOutput?: Record; - ideModeFeature?: boolean; folderTrustFeature?: boolean; folderTrust?: boolean; ideMode?: boolean; @@ -240,7 +239,6 @@ export class Config { private readonly model: string; private readonly extensionContextFilePaths: string[]; private readonly noBrowser: boolean; - private readonly ideModeFeature: boolean; private readonly folderTrustFeature: boolean; private readonly folderTrust: boolean; private ideMode: boolean; @@ -317,7 +315,6 @@ export class Config { this._blockedMcpServers = params.blockedMcpServers ?? []; this.noBrowser = params.noBrowser ?? false; this.summarizeToolOutput = params.summarizeToolOutput; - this.ideModeFeature = params.ideModeFeature ?? false; this.folderTrustFeature = params.folderTrustFeature ?? false; this.folderTrust = params.folderTrust ?? false; this.ideMode = params.ideMode ?? false; @@ -654,10 +651,6 @@ export class Config { return this.summarizeToolOutput; } - getIdeModeFeature(): boolean { - return this.ideModeFeature; - } - getIdeMode(): boolean { return this.ideMode; } diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index 4c6f6dbb..5e68cfb6 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -667,7 +667,7 @@ describe('Gemini Client (client.ts)', () => { }); describe('sendMessageStream', () => { - it('should include editor context when ideModeFeature is enabled', async () => { + it('should include editor context when ideMode is enabled', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -691,7 +691,7 @@ describe('Gemini Client (client.ts)', () => { }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -751,7 +751,7 @@ ${JSON.stringify( }); }); - it('should not add context if ideModeFeature is enabled but no open files', async () => { + it('should not add context if ideMode is enabled but no open files', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -759,7 +759,7 @@ ${JSON.stringify( }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -798,7 +798,7 @@ ${JSON.stringify( ); }); - it('should add context if ideModeFeature is enabled and there is one active file', async () => { + it('should add context if ideMode is enabled and there is one active file', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -814,7 +814,7 @@ ${JSON.stringify( }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -873,7 +873,7 @@ ${JSON.stringify( }); }); - it('should add context if ideModeFeature is enabled and there are open files but no active file', async () => { + it('should add context if ideMode is enabled and there are open files but no active file', async () => { // Arrange vi.mocked(ideContext.getIdeContext).mockReturnValue({ workspaceState: { @@ -890,7 +890,7 @@ ${JSON.stringify( }, }); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); const mockStream = (async function* () { yield { type: 'content', value: 'Hello' }; @@ -1226,7 +1226,7 @@ ${JSON.stringify( beforeEach(() => { client['forceFullIdeContext'] = false; // Reset before each delta test vi.spyOn(client, 'tryCompressChat').mockResolvedValue(null); - vi.spyOn(client['config'], 'getIdeModeFeature').mockReturnValue(true); + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); mockTurnRunFn.mockReturnValue(mockStream); const mockChat: Partial = { diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index cc492472..96be4111 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -444,7 +444,7 @@ export class GeminiClient { yield { type: GeminiEventType.ChatCompressed, value: compressed }; } - if (this.config.getIdeModeFeature() && this.config.getIdeMode()) { + if (this.config.getIdeMode()) { const { contextParts, newIdeContext } = this.getIdeContextParts( this.forceFullIdeContext || this.getHistory().length === 0, ); diff --git a/packages/core/src/ide/ide-installer.ts b/packages/core/src/ide/ide-installer.ts index 7db8e2d2..121e0089 100644 --- a/packages/core/src/ide/ide-installer.ts +++ b/packages/core/src/ide/ide-installer.ts @@ -6,15 +6,12 @@ import * as child_process from 'child_process'; import * as process from 'process'; -import { glob } from 'glob'; import * as path from 'path'; import * as fs from 'fs'; import * as os from 'os'; -import { fileURLToPath } from 'url'; import { DetectedIde } from './detect-ide.js'; const VSCODE_COMMAND = process.platform === 'win32' ? 'code.cmd' : 'code'; -const VSCODE_COMPANION_EXTENSION_FOLDER = 'vscode-ide-companion'; export interface IdeInstaller { install(): Promise; @@ -103,34 +100,7 @@ class VsCodeInstaller implements IdeInstaller { }; } - const bundleDir = path.dirname(fileURLToPath(import.meta.url)); - // The VSIX file is copied to the bundle directory as part of the build. - let vsixFiles = glob.sync(path.join(bundleDir, '*.vsix')); - if (vsixFiles.length === 0) { - // If the VSIX file is not in the bundle, it might be a dev - // environment running with `npm start`. Look for it in the original - // package location, relative to the bundle dir. - const devPath = path.join( - bundleDir, // .../packages/core/dist/src/ide - '..', // .../packages/core/dist/src - '..', // .../packages/core/dist - '..', // .../packages/core - '..', // .../packages - VSCODE_COMPANION_EXTENSION_FOLDER, - '*.vsix', - ); - vsixFiles = glob.sync(devPath); - } - if (vsixFiles.length === 0) { - return { - success: false, - message: - 'Could not find the required VS Code companion extension. Please file a bug via /bug.', - }; - } - - const vsixPath = vsixFiles[0]; - const command = `"${commandPath}" --install-extension "${vsixPath}" --force`; + const command = `"${commandPath}" --install-extension google.gemini-cli-vscode-ide-companion --force`; try { child_process.execSync(command, { stdio: 'pipe' }); return { diff --git a/packages/core/src/tools/edit.test.ts b/packages/core/src/tools/edit.test.ts index 3e0dba61..b2e31fdd 100644 --- a/packages/core/src/tools/edit.test.ts +++ b/packages/core/src/tools/edit.test.ts @@ -62,7 +62,6 @@ describe('EditTool', () => { getWorkspaceContext: () => createMockWorkspaceContext(rootDir), getIdeClient: () => undefined, getIdeMode: () => false, - getIdeModeFeature: () => false, // getGeminiConfig: () => ({ apiKey: 'test-api-key' }), // This was not a real Config method // Add other properties/methods of Config if EditTool uses them // Minimal other methods to satisfy Config type if needed by EditTool constructor or other direct uses: @@ -810,7 +809,6 @@ describe('EditTool', () => { }), }; (mockConfig as any).getIdeMode = () => true; - (mockConfig as any).getIdeModeFeature = () => true; (mockConfig as any).getIdeClient = () => ideClient; }); diff --git a/packages/core/src/tools/edit.ts b/packages/core/src/tools/edit.ts index 86641300..e2b517cf 100644 --- a/packages/core/src/tools/edit.ts +++ b/packages/core/src/tools/edit.ts @@ -250,7 +250,6 @@ class EditToolInvocation implements ToolInvocation { ); const ideClient = this.config.getIdeClient(); const ideConfirmation = - this.config.getIdeModeFeature() && this.config.getIdeMode() && ideClient?.getConnectionStatus().status === IDEConnectionStatus.Connected ? ideClient.openDiff(this.params.file_path, editData.newContent) diff --git a/packages/core/src/tools/write-file.test.ts b/packages/core/src/tools/write-file.test.ts index 1967b99b..06561602 100644 --- a/packages/core/src/tools/write-file.test.ts +++ b/packages/core/src/tools/write-file.test.ts @@ -58,7 +58,6 @@ const mockConfigInternal = { getGeminiClient: vi.fn(), // Initialize as a plain mock function getIdeClient: vi.fn(), getIdeMode: vi.fn(() => false), - getIdeModeFeature: vi.fn(() => false), getWorkspaceContext: () => createMockWorkspaceContext(rootDir), getApiKey: () => 'test-key', getModel: () => 'test-model', diff --git a/packages/core/src/tools/write-file.ts b/packages/core/src/tools/write-file.ts index 5cdba419..72aeba6d 100644 --- a/packages/core/src/tools/write-file.ts +++ b/packages/core/src/tools/write-file.ts @@ -195,7 +195,6 @@ export class WriteFileTool const ideClient = this.config.getIdeClient(); const ideConfirmation = - this.config.getIdeModeFeature() && this.config.getIdeMode() && ideClient.getConnectionStatus().status === IDEConnectionStatus.Connected ? ideClient.openDiff(params.file_path, correctedContent) diff --git a/scripts/copy_bundle_assets.js b/scripts/copy_bundle_assets.js index 79d2a080..5a3af3e9 100644 --- a/scripts/copy_bundle_assets.js +++ b/scripts/copy_bundle_assets.js @@ -37,12 +37,4 @@ for (const file of sbFiles) { copyFileSync(join(root, file), join(bundleDir, basename(file))); } -// Find and copy all .vsix files from packages to the root of the bundle directory -const vsixFiles = glob.sync('packages/vscode-ide-companion/*.vsix', { - cwd: root, -}); -for (const file of vsixFiles) { - copyFileSync(join(root, file), join(bundleDir, basename(file))); -} - console.log('Assets copied to bundle/'); From 11377915dbf42064ed9a8d9e31b46adc565ae021 Mon Sep 17 00:00:00 2001 From: Srinath Padmanabhan <17151014+srithreepo@users.noreply.github.com> Date: Tue, 12 Aug 2025 15:09:13 -0700 Subject: [PATCH 10/45] Create Docs Pages based on github documentation (#6083) --- .github/workflows/docs-page-action.yml | 54 ++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 .github/workflows/docs-page-action.yml diff --git a/.github/workflows/docs-page-action.yml b/.github/workflows/docs-page-action.yml new file mode 100644 index 00000000..0c28dca6 --- /dev/null +++ b/.github/workflows/docs-page-action.yml @@ -0,0 +1,54 @@ +# Sample workflow for building and deploying a Jekyll site to GitHub Pages +name: Deploy Jekyll with GitHub Pages dependencies preinstalled + +on: + # Runs on pushes targeting the default branch + push: + tags: 'v*' + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: 'pages' + cancel-in-progress: false + +jobs: + build: + # This 'if' condition is the key. It ensures the job only runs if the + # tag name does NOT contain the substring 'nightly'. + if: "contains(github.ref_name, 'nightly') == false" + # Build job + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Pages + uses: actions/configure-pages@v5 + - name: Build with Jekyll + uses: actions/jekyll-build-pages@v1 + with: + source: ./ + destination: ./_site + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 From 8d6eb8c322890b5cdf20d4a30dd17afb1541f5aa Mon Sep 17 00:00:00 2001 From: Arya Gummadi Date: Tue, 12 Aug 2025 15:10:22 -0700 Subject: [PATCH 11/45] feat: add --approval-mode parameter (#6024) Co-authored-by: Jacob Richman --- docs/cli/configuration.md | 9 +- .../cli/src/config/config.integration.test.ts | 145 +++++++ packages/cli/src/config/config.test.ts | 369 ++++++++++++++++++ packages/cli/src/config/config.ts | 67 +++- 4 files changed, 581 insertions(+), 9 deletions(-) diff --git a/docs/cli/configuration.md b/docs/cli/configuration.md index 6ebcd630..d95793f1 100644 --- a/docs/cli/configuration.md +++ b/docs/cli/configuration.md @@ -422,6 +422,13 @@ Arguments passed directly when running the CLI can override other configurations - Displays the current memory usage. - **`--yolo`**: - Enables YOLO mode, which automatically approves all tool calls. +- **`--approval-mode `**: + - Sets the approval mode for tool calls. Available modes: + - `default`: Prompt for approval on each tool call (default behavior) + - `auto_edit`: Automatically approve edit tools (replace, write_file) while prompting for others + - `yolo`: Automatically approve all tool calls (equivalent to `--yolo`) + - Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach. + - Example: `gemini --approval-mode auto_edit` - **`--telemetry`**: - Enables [telemetry](../telemetry.md). - **`--telemetry-target`**: @@ -517,7 +524,7 @@ Sandboxing is disabled by default, but you can enable it in a few ways: - Using `--sandbox` or `-s` flag. - Setting `GEMINI_SANDBOX` environment variable. -- Sandbox is enabled in `--yolo` mode by default. +- Sandbox is enabled when using `--yolo` or `--approval-mode=yolo` by default. By default, it uses a pre-built `gemini-cli-sandbox` Docker image. diff --git a/packages/cli/src/config/config.integration.test.ts b/packages/cli/src/config/config.integration.test.ts index 87a74578..45ed6d82 100644 --- a/packages/cli/src/config/config.integration.test.ts +++ b/packages/cli/src/config/config.integration.test.ts @@ -261,4 +261,149 @@ describe('Configuration Integration Tests', () => { expect(config.getExtensionContextFilePaths()).toEqual(contextFiles); }); }); + + describe('Approval Mode Integration Tests', () => { + let parseArguments: typeof import('./config').parseArguments; + + beforeEach(async () => { + // Import the argument parsing function for integration testing + const { parseArguments: parseArgs } = await import('./config'); + parseArguments = parseArgs; + }); + + it('should parse --approval-mode=auto_edit correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + + const argv = await parseArguments(); + + // Verify that the argument was parsed correctly + expect(argv.approvalMode).toBe('auto_edit'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); + } finally { + process.argv = originalArgv; + } + }); + + it('should parse --approval-mode=yolo correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'yolo', + '-p', + 'test', + ]; + + const argv = await parseArguments(); + + expect(argv.approvalMode).toBe('yolo'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); // Should NOT be set when using --approval-mode + } finally { + process.argv = originalArgv; + } + }); + + it('should parse --approval-mode=default correctly through the full argument parsing flow', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'default', + '-p', + 'test', + ]; + + const argv = await parseArguments(); + + expect(argv.approvalMode).toBe('default'); + expect(argv.prompt).toBe('test'); + expect(argv.yolo).toBe(false); + } finally { + process.argv = originalArgv; + } + }); + + it('should parse legacy --yolo flag correctly', async () => { + const originalArgv = process.argv; + + try { + process.argv = ['node', 'script.js', '--yolo', '-p', 'test']; + + const argv = await parseArguments(); + + expect(argv.yolo).toBe(true); + expect(argv.approvalMode).toBeUndefined(); // Should NOT be set when using --yolo + expect(argv.prompt).toBe('test'); + } finally { + process.argv = originalArgv; + } + }); + + it('should reject invalid approval mode values during argument parsing', async () => { + const originalArgv = process.argv; + + try { + process.argv = ['node', 'script.js', '--approval-mode', 'invalid_mode']; + + // Should throw during argument parsing due to yargs validation + await expect(parseArguments()).rejects.toThrow(); + } finally { + process.argv = originalArgv; + } + }); + + it('should reject conflicting --yolo and --approval-mode flags', async () => { + const originalArgv = process.argv; + + try { + process.argv = [ + 'node', + 'script.js', + '--yolo', + '--approval-mode', + 'default', + ]; + + // Should throw during argument parsing due to conflict validation + await expect(parseArguments()).rejects.toThrow(); + } finally { + process.argv = originalArgv; + } + }); + + it('should handle backward compatibility with mixed scenarios', async () => { + const originalArgv = process.argv; + + try { + // Test that no approval mode arguments defaults to no flags set + process.argv = ['node', 'script.js', '-p', 'test']; + + const argv = await parseArguments(); + + expect(argv.approvalMode).toBeUndefined(); + expect(argv.yolo).toBe(false); + expect(argv.prompt).toBe('test'); + } finally { + process.argv = originalArgv; + } + }); + }); }); diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index 178980eb..fc4d24bd 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -156,6 +156,93 @@ describe('parseArguments', () => { expect(argv.promptInteractive).toBe('interactive prompt'); expect(argv.prompt).toBeUndefined(); }); + + it('should throw an error when both --yolo and --approval-mode are used together', async () => { + process.argv = [ + 'node', + 'script.js', + '--yolo', + '--approval-mode', + 'default', + ]; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments()).rejects.toThrow('process.exit called'); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should throw an error when using short flags -y and --approval-mode together', async () => { + process.argv = ['node', 'script.js', '-y', '--approval-mode', 'yolo']; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments()).rejects.toThrow('process.exit called'); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); + + it('should allow --approval-mode without --yolo', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'auto_edit']; + const argv = await parseArguments(); + expect(argv.approvalMode).toBe('auto_edit'); + expect(argv.yolo).toBe(false); + }); + + it('should allow --yolo without --approval-mode', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments(); + expect(argv.yolo).toBe(true); + expect(argv.approvalMode).toBeUndefined(); + }); + + it('should reject invalid --approval-mode values', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'invalid']; + + const mockExit = vi.spyOn(process, 'exit').mockImplementation(() => { + throw new Error('process.exit called'); + }); + + const mockConsoleError = vi + .spyOn(console, 'error') + .mockImplementation(() => {}); + + await expect(parseArguments()).rejects.toThrow('process.exit called'); + + expect(mockConsoleError).toHaveBeenCalledWith( + expect.stringContaining('Invalid values:'), + ); + + mockExit.mockRestore(); + mockConsoleError.mockRestore(); + }); }); describe('loadCliConfig', () => { @@ -760,6 +847,211 @@ describe('mergeExcludeTools', () => { }); }); +describe('Approval mode tool exclusion logic', () => { + const originalIsTTY = process.stdin.isTTY; + + beforeEach(() => { + process.stdin.isTTY = false; // Ensure non-interactive mode + }); + + afterEach(() => { + process.stdin.isTTY = originalIsTTY; + }); + + it('should exclude all interactive tools in non-interactive mode with default approval mode', async () => { + process.argv = ['node', 'script.js', '-p', 'test']; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).toContain(EditTool.Name); + expect(excludedTools).toContain(WriteFileTool.Name); + }); + + it('should exclude all interactive tools in non-interactive mode with explicit default approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'default', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).toContain(EditTool.Name); + expect(excludedTools).toContain(WriteFileTool.Name); + }); + + it('should exclude only shell tools in non-interactive mode with auto_edit approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should exclude no interactive tools in non-interactive mode with yolo approval mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'yolo', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should exclude no interactive tools in non-interactive mode with legacy yolo flag', async () => { + process.argv = ['node', 'script.js', '--yolo', '-p', 'test']; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + }); + + it('should not exclude interactive tools in interactive mode regardless of approval mode', async () => { + process.stdin.isTTY = true; // Interactive mode + + const testCases = [ + { args: ['node', 'script.js'] }, // default + { args: ['node', 'script.js', '--approval-mode', 'default'] }, + { args: ['node', 'script.js', '--approval-mode', 'auto_edit'] }, + { args: ['node', 'script.js', '--approval-mode', 'yolo'] }, + { args: ['node', 'script.js', '--yolo'] }, + ]; + + for (const testCase of testCases) { + process.argv = testCase.args; + const argv = await parseArguments(); + const settings: Settings = {}; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).not.toContain(ShellTool.Name); + expect(excludedTools).not.toContain(EditTool.Name); + expect(excludedTools).not.toContain(WriteFileTool.Name); + } + }); + + it('should merge approval mode exclusions with settings exclusions in auto_edit mode', async () => { + process.argv = [ + 'node', + 'script.js', + '--approval-mode', + 'auto_edit', + '-p', + 'test', + ]; + const argv = await parseArguments(); + const settings: Settings = { excludeTools: ['custom_tool'] }; + const extensions: Extension[] = []; + + const config = await loadCliConfig( + settings, + extensions, + 'test-session', + argv, + ); + + const excludedTools = config.getExcludeTools(); + expect(excludedTools).toContain('custom_tool'); // From settings + expect(excludedTools).toContain(ShellTool.Name); // From approval mode + expect(excludedTools).not.toContain(EditTool.Name); // Should be allowed in auto_edit + expect(excludedTools).not.toContain(WriteFileTool.Name); // Should be allowed in auto_edit + }); + + it('should throw an error for invalid approval mode values in loadCliConfig', async () => { + // Create a mock argv with an invalid approval mode that bypasses argument parsing validation + const invalidArgv: Partial & { approvalMode: string } = { + approvalMode: 'invalid_mode', + promptInteractive: '', + prompt: '', + yolo: false, + }; + + const settings: Settings = {}; + const extensions: Extension[] = []; + + await expect( + loadCliConfig(settings, extensions, 'test-session', invalidArgv), + ).rejects.toThrow( + 'Invalid approval mode: invalid_mode. Valid values are: yolo, auto_edit, default', + ); + }); +}); + describe('loadCliConfig with allowed-mcp-server-names', () => { const originalArgv = process.argv; const originalEnv = { ...process.env }; @@ -1327,3 +1619,80 @@ describe('loadCliConfig interactive', () => { expect(config.isInteractive()).toBe(false); }); }); + +describe('loadCliConfig approval mode', () => { + const originalArgv = process.argv; + const originalEnv = { ...process.env }; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + process.env.GEMINI_API_KEY = 'test-api-key'; + }); + + afterEach(() => { + process.argv = originalArgv; + process.env = originalEnv; + vi.restoreAllMocks(); + }); + + it('should default to DEFAULT approval mode when no flags are set', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should set YOLO approval mode when --yolo flag is used', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should set YOLO approval mode when -y flag is used', async () => { + process.argv = ['node', 'script.js', '-y']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should set DEFAULT approval mode when --approval-mode=default', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'default']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should set AUTO_EDIT approval mode when --approval-mode=auto_edit', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'auto_edit']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.AUTO_EDIT); + }); + + it('should set YOLO approval mode when --approval-mode=yolo', async () => { + process.argv = ['node', 'script.js', '--approval-mode', 'yolo']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); + + it('should prioritize --approval-mode over --yolo when both would be valid (but validation prevents this)', async () => { + // Note: This test documents the intended behavior, but in practice the validation + // prevents both flags from being used together + process.argv = ['node', 'script.js', '--approval-mode', 'default']; + const argv = await parseArguments(); + // Manually set yolo to true to simulate what would happen if validation didn't prevent it + argv.yolo = true; + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.DEFAULT); + }); + + it('should fall back to --yolo behavior when --approval-mode is not set', async () => { + process.argv = ['node', 'script.js', '--yolo']; + const argv = await parseArguments(); + const config = await loadCliConfig({}, [], 'test-session', argv); + expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); + }); +}); diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index d0658e75..dd207ff2 100644 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -57,6 +57,7 @@ export interface CliArgs { showMemoryUsage: boolean | undefined; show_memory_usage: boolean | undefined; yolo: boolean | undefined; + approvalMode: string | undefined; telemetry: boolean | undefined; checkpointing: boolean | undefined; telemetryTarget: string | undefined; @@ -147,6 +148,12 @@ export async function parseArguments(): Promise { 'Automatically accept all actions (aka YOLO mode, see https://www.youtube.com/watch?v=xvFZjo5PgG0 for more details)?', default: false, }) + .option('approval-mode', { + type: 'string', + choices: ['default', 'auto_edit', 'yolo'], + description: + 'Set the approval mode: default (prompt for approval), auto_edit (auto-approve edit tools), yolo (auto-approve all tools)', + }) .option('telemetry', { type: 'boolean', description: @@ -219,6 +226,11 @@ export async function parseArguments(): Promise { 'Cannot use both --prompt (-p) and --prompt-interactive (-i) together', ); } + if (argv.yolo && argv.approvalMode) { + throw new Error( + 'Cannot use both --yolo (-y) and --approval-mode together. Use --approval-mode=yolo instead.', + ); + } return true; }), ) @@ -356,20 +368,59 @@ export async function loadCliConfig( let mcpServers = mergeMcpServers(settings, activeExtensions); const question = argv.promptInteractive || argv.prompt || ''; - const approvalMode = - argv.yolo || false ? ApprovalMode.YOLO : ApprovalMode.DEFAULT; + + // Determine approval mode with backward compatibility + let approvalMode: ApprovalMode; + if (argv.approvalMode) { + // New --approval-mode flag takes precedence + switch (argv.approvalMode) { + case 'yolo': + approvalMode = ApprovalMode.YOLO; + break; + case 'auto_edit': + approvalMode = ApprovalMode.AUTO_EDIT; + break; + case 'default': + approvalMode = ApprovalMode.DEFAULT; + break; + default: + throw new Error( + `Invalid approval mode: ${argv.approvalMode}. Valid values are: yolo, auto_edit, default`, + ); + } + } else { + // Fallback to legacy --yolo flag behavior + approvalMode = + argv.yolo || false ? ApprovalMode.YOLO : ApprovalMode.DEFAULT; + } + const interactive = !!argv.promptInteractive || (process.stdin.isTTY && question.length === 0); - // In non-interactive and non-yolo mode, exclude interactive built in tools. - const extraExcludes = - !interactive && approvalMode !== ApprovalMode.YOLO - ? [ShellTool.Name, EditTool.Name, WriteFileTool.Name] - : undefined; + // In non-interactive mode, exclude tools that require a prompt. + const extraExcludes: string[] = []; + if (!interactive) { + switch (approvalMode) { + case ApprovalMode.DEFAULT: + // In default non-interactive mode, all tools that require approval are excluded. + extraExcludes.push(ShellTool.Name, EditTool.Name, WriteFileTool.Name); + break; + case ApprovalMode.AUTO_EDIT: + // In auto-edit non-interactive mode, only tools that still require a prompt are excluded. + extraExcludes.push(ShellTool.Name); + break; + case ApprovalMode.YOLO: + // No extra excludes for YOLO mode. + break; + default: + // This should never happen due to validation earlier, but satisfies the linter + break; + } + } const excludeTools = mergeExcludeTools( settings, activeExtensions, - extraExcludes, + extraExcludes.length > 0 ? extraExcludes : undefined, ); const blockedMcpServers: Array<{ name: string; extensionName: string }> = []; From 9d023be1d16a6bf7427569f863e6cfd2c3442d8b Mon Sep 17 00:00:00 2001 From: Tommaso Sciortino Date: Tue, 12 Aug 2025 15:57:27 -0700 Subject: [PATCH 12/45] Upgrade integration tests to use Vitest (#6021) --- .github/workflows/e2e.yml | 4 +- .vscode/launch.json | 13 -- docs/integration-tests.md | 16 +- integration-tests/file-system.test.ts | 154 +++++++-------- integration-tests/globalSetup.ts | 55 ++++++ integration-tests/google_web_search.test.ts | 126 ++++++------ integration-tests/list_directory.test.ts | 98 +++++----- ...st.js => mcp_server_cyclic_schema.test.ts} | 15 +- integration-tests/read_many_files.test.ts | 69 +++---- integration-tests/replace.test.ts | 91 +++++---- integration-tests/run-tests.js | 182 ------------------ integration-tests/run_shell_command.test.ts | 103 +++++----- integration-tests/save_memory.test.ts | 52 ++--- integration-tests/simple-mcp-server.test.ts | 14 +- integration-tests/test-helper.ts | 19 +- integration-tests/vitest.config.ts | 18 ++ integration-tests/write_file.test.ts | 94 ++++----- package.json | 8 +- 18 files changed, 511 insertions(+), 620 deletions(-) create mode 100644 integration-tests/globalSetup.ts rename integration-tests/{mcp_server_cyclic_schema.test.js => mcp_server_cyclic_schema.test.ts} (92%) delete mode 100644 integration-tests/run-tests.js create mode 100644 integration-tests/vitest.config.ts diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index f9f82422..a6f6ad5f 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -46,7 +46,9 @@ jobs: - name: Run E2E tests env: GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} - run: npm run test:integration:${{ matrix.sandbox }} -- --verbose --keep-output + VERBOSE: true + KEEP_OUTPUT: true + run: npm run test:integration:${{ matrix.sandbox }} e2e-test-macos: name: E2E Test - macOS diff --git a/.vscode/launch.json b/.vscode/launch.json index 97c9eba5..6e4a7605 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -17,19 +17,6 @@ "GEMINI_SANDBOX": "false" } }, - { - "type": "node", - "request": "launch", - "name": "Launch E2E", - "program": "${workspaceFolder}/integration-tests/run-tests.js", - "args": ["--verbose", "--keep-output", "list_directory"], - "skipFiles": ["/**"], - "cwd": "${workspaceFolder}", - "console": "integratedTerminal", - "env": { - "GEMINI_SANDBOX": "false" - } - }, { "name": "Launch Companion VS Code Extension", "type": "extensionHost", diff --git a/docs/integration-tests.md b/docs/integration-tests.md index 7a4c8489..6289b7a7 100644 --- a/docs/integration-tests.md +++ b/docs/integration-tests.md @@ -67,13 +67,9 @@ The integration test runner provides several options for diagnostics to help tra You can preserve the temporary files created during a test run for inspection. This is useful for debugging issues with file system operations. -To keep the test output, you can either use the `--keep-output` flag or set the `KEEP_OUTPUT` environment variable to `true`. +To keep the test output set the `KEEP_OUTPUT` environment variable to `true`. ```bash -# Using the flag -npm run test:integration:sandbox:none -- --keep-output - -# Using the environment variable KEEP_OUTPUT=true npm run test:integration:sandbox:none ``` @@ -81,20 +77,20 @@ When output is kept, the test runner will print the path to the unique directory ### Verbose output -For more detailed debugging, the `--verbose` flag streams the real-time output from the `gemini` command to the console. +For more detailed debugging, set the `VERBOSE` environment variable to `true`. ```bash -npm run test:integration:sandbox:none -- --verbose +VERBOSE=true npm run test:integration:sandbox:none ``` -When using `--verbose` and `--keep-output` in the same command, the output is streamed to the console and also saved to a log file within the test's temporary directory. +When using `VERBOSE=true` and `KEEP_OUTPUT=true` in the same command, the output is streamed to the console and also saved to a log file within the test's temporary directory. The verbose output is formatted to clearly identify the source of the logs: ``` ---- TEST: : --- +--- TEST: : --- ... output from the gemini command ... ---- END TEST: : --- +--- END TEST: : --- ``` ## Linting and formatting diff --git a/integration-tests/file-system.test.ts b/integration-tests/file-system.test.ts index d43f047f..5a7028e0 100644 --- a/integration-tests/file-system.test.ts +++ b/integration-tests/file-system.test.ts @@ -4,86 +4,90 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { strict as assert } from 'assert'; -import { test } from 'node:test'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to read a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to read a file'); - rig.createFile('test.txt', 'hello world'); +describe('file-system', () => { + it('should be able to read a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to read a file'); + rig.createFile('test.txt', 'hello world'); - const result = await rig.run( - `read the file test.txt and show me its contents`, - ); + const result = await rig.run( + `read the file test.txt and show me its contents`, + ); - const foundToolCall = await rig.waitForToolCall('read_file'); + const foundToolCall = await rig.waitForToolCall('read_file'); - // Add debugging information - if (!foundToolCall || !result.includes('hello world')) { - printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains hello world': result.includes('hello world'), - }); - } + // Add debugging information + if (!foundToolCall || !result.includes('hello world')) { + printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains hello world': result.includes('hello world'), + }); + } - assert.ok(foundToolCall, 'Expected to find a read_file tool call'); + expect( + foundToolCall, + 'Expected to find a read_file tool call', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'hello world', 'File read test'); -}); - -test('should be able to write a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to write a file'); - rig.createFile('test.txt', ''); - - const result = await rig.run(`edit test.txt to have a hello world message`); - - // Accept multiple valid tools for editing files - const foundToolCall = await rig.waitForAnyToolCall([ - 'write_file', - 'edit', - 'replace', - ]); - - // Add debugging information - if (!foundToolCall) { - printDebugInfo(rig, result); - } - - assert.ok( - foundToolCall, - 'Expected to find a write_file, edit, or replace tool call', - ); - - // Validate model output - will throw if no output - validateModelOutput(result, null, 'File write test'); - - const fileContent = rig.readFile('test.txt'); - - // Add debugging for file content - if (!fileContent.toLowerCase().includes('hello')) { - const writeCalls = rig - .readToolLogs() - .filter((t) => t.toolRequest.name === 'write_file') - .map((t) => t.toolRequest.args); - - printDebugInfo(rig, result, { - 'File content mismatch': true, - 'Expected to contain': 'hello', - 'Actual content': fileContent, - 'Write tool calls': JSON.stringify(writeCalls), - }); - } - - assert.ok( - fileContent.toLowerCase().includes('hello'), - 'Expected file to contain hello', - ); - - // Log success info if verbose - if (process.env.VERBOSE === 'true') { - console.log('File written successfully with hello message.'); - } + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'hello world', 'File read test'); + }); + + it('should be able to write a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to write a file'); + rig.createFile('test.txt', ''); + + const result = await rig.run(`edit test.txt to have a hello world message`); + + // Accept multiple valid tools for editing files + const foundToolCall = await rig.waitForAnyToolCall([ + 'write_file', + 'edit', + 'replace', + ]); + + // Add debugging information + if (!foundToolCall) { + printDebugInfo(rig, result); + } + + expect( + foundToolCall, + 'Expected to find a write_file, edit, or replace tool call', + ).toBeTruthy(); + + // Validate model output - will throw if no output + validateModelOutput(result, null, 'File write test'); + + const fileContent = rig.readFile('test.txt'); + + // Add debugging for file content + if (!fileContent.toLowerCase().includes('hello')) { + const writeCalls = rig + .readToolLogs() + .filter((t) => t.toolRequest.name === 'write_file') + .map((t) => t.toolRequest.args); + + printDebugInfo(rig, result, { + 'File content mismatch': true, + 'Expected to contain': 'hello', + 'Actual content': fileContent, + 'Write tool calls': JSON.stringify(writeCalls), + }); + } + + expect( + fileContent.toLowerCase().includes('hello'), + 'Expected file to contain hello', + ).toBeTruthy(); + + // Log success info if verbose + if (process.env.VERBOSE === 'true') { + console.log('File written successfully with hello message.'); + } + }); }); diff --git a/integration-tests/globalSetup.ts b/integration-tests/globalSetup.ts new file mode 100644 index 00000000..89ca203f --- /dev/null +++ b/integration-tests/globalSetup.ts @@ -0,0 +1,55 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { mkdir, readdir, rm } from 'fs/promises'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const rootDir = join(__dirname, '..'); +const integrationTestsDir = join(rootDir, '.integration-tests'); +let runDir = ''; // Make runDir accessible in teardown + +export async function setup() { + runDir = join(integrationTestsDir, `${Date.now()}`); + await mkdir(runDir, { recursive: true }); + + // Clean up old test runs, but keep the latest few for debugging + try { + const testRuns = await readdir(integrationTestsDir); + if (testRuns.length > 5) { + const oldRuns = testRuns.sort().slice(0, testRuns.length - 5); + await Promise.all( + oldRuns.map((oldRun) => + rm(join(integrationTestsDir, oldRun), { + recursive: true, + force: true, + }), + ), + ); + } + } catch (e) { + console.error('Error cleaning up old test runs:', e); + } + + process.env.INTEGRATION_TEST_FILE_DIR = runDir; + process.env.GEMINI_CLI_INTEGRATION_TEST = 'true'; + process.env.TELEMETRY_LOG_FILE = join(runDir, 'telemetry.log'); + + if (process.env.KEEP_OUTPUT) { + console.log(`Keeping output for test run in: ${runDir}`); + } + process.env.VERBOSE = process.env.VERBOSE ?? 'false'; + + console.log(`\nIntegration test output directory: ${runDir}`); +} + +export async function teardown() { + // Cleanup the test run directory unless KEEP_OUTPUT is set + if (process.env.KEEP_OUTPUT !== 'true' && runDir) { + await rm(runDir, { recursive: true, force: true }); + } +} diff --git a/integration-tests/google_web_search.test.ts b/integration-tests/google_web_search.test.ts index 6fb365a0..698edfe5 100644 --- a/integration-tests/google_web_search.test.ts +++ b/integration-tests/google_web_search.test.ts @@ -4,74 +4,78 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to search the web', async () => { - const rig = new TestRig(); - await rig.setup('should be able to search the web'); +describe('google_web_search', () => { + it('should be able to search the web', async () => { + const rig = new TestRig(); + await rig.setup('should be able to search the web'); - let result; - try { - result = await rig.run(`what is the weather in London`); - } catch (error) { - // Network errors can occur in CI environments - if ( - error instanceof Error && - (error.message.includes('network') || error.message.includes('timeout')) - ) { - console.warn( - 'Skipping test due to network error:', - (error as Error).message, - ); - return; // Skip the test + let result; + try { + result = await rig.run(`what is the weather in London`); + } catch (error) { + // Network errors can occur in CI environments + if ( + error instanceof Error && + (error.message.includes('network') || error.message.includes('timeout')) + ) { + console.warn( + 'Skipping test due to network error:', + (error as Error).message, + ); + return; // Skip the test + } + throw error; // Re-throw if not a network error } - throw error; // Re-throw if not a network error - } - const foundToolCall = await rig.waitForToolCall('google_web_search'); + const foundToolCall = await rig.waitForToolCall('google_web_search'); - // Add debugging information - if (!foundToolCall) { - const allTools = printDebugInfo(rig, result); + // Add debugging information + if (!foundToolCall) { + const allTools = printDebugInfo(rig, result); - // Check if the tool call failed due to network issues - const failedSearchCalls = allTools.filter( - (t) => - t.toolRequest.name === 'google_web_search' && !t.toolRequest.success, + // Check if the tool call failed due to network issues + const failedSearchCalls = allTools.filter( + (t) => + t.toolRequest.name === 'google_web_search' && !t.toolRequest.success, + ); + if (failedSearchCalls.length > 0) { + console.warn( + 'google_web_search tool was called but failed, possibly due to network issues', + ); + console.warn( + 'Failed calls:', + failedSearchCalls.map((t) => t.toolRequest.args), + ); + return; // Skip the test if network issues + } + } + + expect( + foundToolCall, + 'Expected to find a call to google_web_search', + ).toBeTruthy(); + + // Validate model output - will throw if no output, warn if missing expected content + const hasExpectedContent = validateModelOutput( + result, + ['weather', 'london'], + 'Google web search test', ); - if (failedSearchCalls.length > 0) { - console.warn( - 'google_web_search tool was called but failed, possibly due to network issues', - ); - console.warn( - 'Failed calls:', - failedSearchCalls.map((t) => t.toolRequest.args), - ); - return; // Skip the test if network issues + + // If content was missing, log the search queries used + if (!hasExpectedContent) { + const searchCalls = rig + .readToolLogs() + .filter((t) => t.toolRequest.name === 'google_web_search'); + if (searchCalls.length > 0) { + console.warn( + 'Search queries used:', + searchCalls.map((t) => t.toolRequest.args), + ); + } } - } - - assert.ok(foundToolCall, 'Expected to find a call to google_web_search'); - - // Validate model output - will throw if no output, warn if missing expected content - const hasExpectedContent = validateModelOutput( - result, - ['weather', 'london'], - 'Google web search test', - ); - - // If content was missing, log the search queries used - if (!hasExpectedContent) { - const searchCalls = rig - .readToolLogs() - .filter((t) => t.toolRequest.name === 'google_web_search'); - if (searchCalls.length > 0) { - console.warn( - 'Search queries used:', - searchCalls.map((t) => t.toolRequest.args), - ); - } - } + }); }); diff --git a/integration-tests/list_directory.test.ts b/integration-tests/list_directory.test.ts index 023eca12..38416f4f 100644 --- a/integration-tests/list_directory.test.ts +++ b/integration-tests/list_directory.test.ts @@ -4,59 +4,63 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; import { existsSync } from 'fs'; import { join } from 'path'; -test('should be able to list a directory', async () => { - const rig = new TestRig(); - await rig.setup('should be able to list a directory'); - rig.createFile('file1.txt', 'file 1 content'); - rig.mkdir('subdir'); - rig.sync(); +describe('list_directory', () => { + it('should be able to list a directory', async () => { + const rig = new TestRig(); + await rig.setup('should be able to list a directory'); + rig.createFile('file1.txt', 'file 1 content'); + rig.mkdir('subdir'); + rig.sync(); - // Poll for filesystem changes to propagate in containers - await rig.poll( - () => { - // Check if the files exist in the test directory - const file1Path = join(rig.testDir!, 'file1.txt'); - const subdirPath = join(rig.testDir!, 'subdir'); - return existsSync(file1Path) && existsSync(subdirPath); - }, - 1000, // 1 second max wait - 50, // check every 50ms - ); - - const prompt = `Can you list the files in the current directory. Display them in the style of 'ls'`; - - const result = await rig.run(prompt); - - const foundToolCall = await rig.waitForToolCall('list_directory'); - - // Add debugging information - if ( - !foundToolCall || - !result.includes('file1.txt') || - !result.includes('subdir') - ) { - const allTools = printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains file1.txt': result.includes('file1.txt'), - 'Contains subdir': result.includes('subdir'), - }); - - console.error( - 'List directory calls:', - allTools - .filter((t) => t.toolRequest.name === 'list_directory') - .map((t) => t.toolRequest.args), + // Poll for filesystem changes to propagate in containers + await rig.poll( + () => { + // Check if the files exist in the test directory + const file1Path = join(rig.testDir!, 'file1.txt'); + const subdirPath = join(rig.testDir!, 'subdir'); + return existsSync(file1Path) && existsSync(subdirPath); + }, + 1000, // 1 second max wait + 50, // check every 50ms ); - } - assert.ok(foundToolCall, 'Expected to find a list_directory tool call'); + const prompt = `Can you list the files in the current directory. Display them in the style of 'ls'`; - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, ['file1.txt', 'subdir'], 'List directory test'); + const result = await rig.run(prompt); + + const foundToolCall = await rig.waitForToolCall('list_directory'); + + // Add debugging information + if ( + !foundToolCall || + !result.includes('file1.txt') || + !result.includes('subdir') + ) { + const allTools = printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains file1.txt': result.includes('file1.txt'), + 'Contains subdir': result.includes('subdir'), + }); + + console.error( + 'List directory calls:', + allTools + .filter((t) => t.toolRequest.name === 'list_directory') + .map((t) => t.toolRequest.args), + ); + } + + expect( + foundToolCall, + 'Expected to find a list_directory tool call', + ).toBeTruthy(); + + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, ['file1.txt', 'subdir'], 'List directory test'); + }); }); diff --git a/integration-tests/mcp_server_cyclic_schema.test.js b/integration-tests/mcp_server_cyclic_schema.test.ts similarity index 92% rename from integration-tests/mcp_server_cyclic_schema.test.js rename to integration-tests/mcp_server_cyclic_schema.test.ts index 1ace98f1..18c1bcde 100644 --- a/integration-tests/mcp_server_cyclic_schema.test.js +++ b/integration-tests/mcp_server_cyclic_schema.test.ts @@ -9,15 +9,11 @@ * and then detect and warn about the potential tools that caused the error. */ -import { test, describe, before } from 'node:test'; -import { strict as assert } from 'node:assert'; +import { describe, it, beforeAll, expect } from 'vitest'; import { TestRig } from './test-helper.js'; import { join } from 'path'; -import { fileURLToPath } from 'url'; import { writeFileSync } from 'fs'; -const __dirname = fileURLToPath(new URL('.', import.meta.url)); - // Create a minimal MCP server that doesn't require external dependencies // This implements the MCP protocol directly using Node.js built-ins const serverScript = `#!/usr/bin/env node @@ -160,7 +156,7 @@ rpc.send({ describe('mcp server with cyclic tool schema is detected', () => { const rig = new TestRig(); - before(async () => { + beforeAll(async () => { // Setup test directory with MCP server configuration await rig.setup('cyclic-schema-mcp-server', { settings: { @@ -174,7 +170,7 @@ describe('mcp server with cyclic tool schema is detected', () => { }); // Create server script in the test directory - const testServerPath = join(rig.testDir, 'mcp-server.cjs'); + const testServerPath = join(rig.testDir!, 'mcp-server.cjs'); writeFileSync(testServerPath, serverScript); // Make the script executable (though running with 'node' should work anyway) @@ -184,15 +180,14 @@ describe('mcp server with cyclic tool schema is detected', () => { } }); - test('should error and suggest disabling the cyclic tool', async () => { + it('should error and suggest disabling the cyclic tool', async () => { // Just run any command to trigger the schema depth error. // If this test starts failing, check `isSchemaDepthError` from // geminiChat.ts to see if it needs to be updated. // Or, possibly it could mean that gemini has fixed the issue. const output = await rig.run('hello'); - assert.match( - output, + expect(output).toMatch( /Skipping tool 'tool_with_cyclic_schema' from MCP server 'cyclic-schema-server' because it has missing types in its parameter schema/, ); }); diff --git a/integration-tests/read_many_files.test.ts b/integration-tests/read_many_files.test.ts index 74d2f358..8e839a6a 100644 --- a/integration-tests/read_many_files.test.ts +++ b/integration-tests/read_many_files.test.ts @@ -4,47 +4,48 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to read multiple files', async () => { - const rig = new TestRig(); - await rig.setup('should be able to read multiple files'); - rig.createFile('file1.txt', 'file 1 content'); - rig.createFile('file2.txt', 'file 2 content'); +describe('read_many_files', () => { + it('should be able to read multiple files', async () => { + const rig = new TestRig(); + await rig.setup('should be able to read multiple files'); + rig.createFile('file1.txt', 'file 1 content'); + rig.createFile('file2.txt', 'file 2 content'); - const prompt = `Please use read_many_files to read file1.txt and file2.txt and show me what's in them`; + const prompt = `Please use read_many_files to read file1.txt and file2.txt and show me what's in them`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - // Check for either read_many_files or multiple read_file calls - const allTools = rig.readToolLogs(); - const readManyFilesCall = await rig.waitForToolCall('read_many_files'); - const readFileCalls = allTools.filter( - (t) => t.toolRequest.name === 'read_file', - ); + // Check for either read_many_files or multiple read_file calls + const allTools = rig.readToolLogs(); + const readManyFilesCall = await rig.waitForToolCall('read_many_files'); + const readFileCalls = allTools.filter( + (t) => t.toolRequest.name === 'read_file', + ); - // Accept either read_many_files OR at least 2 read_file calls - const foundValidPattern = readManyFilesCall || readFileCalls.length >= 2; + // Accept either read_many_files OR at least 2 read_file calls + const foundValidPattern = readManyFilesCall || readFileCalls.length >= 2; - // Add debugging information - if (!foundValidPattern) { - printDebugInfo(rig, result, { - 'read_many_files called': readManyFilesCall, - 'read_file calls': readFileCalls.length, - }); - } + // Add debugging information + if (!foundValidPattern) { + printDebugInfo(rig, result, { + 'read_many_files called': readManyFilesCall, + 'read_file calls': readFileCalls.length, + }); + } - assert.ok( - foundValidPattern, - 'Expected to find either read_many_files or multiple read_file tool calls', - ); + expect( + foundValidPattern, + 'Expected to find either read_many_files or multiple read_file tool calls', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput( - result, - ['file 1 content', 'file 2 content'], - 'Read many files test', - ); + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput( + result, + ['file 1 content', 'file 2 content'], + 'Read many files test', + ); + }); }); diff --git a/integration-tests/replace.test.ts b/integration-tests/replace.test.ts index 1ac6f5a4..3a2d979b 100644 --- a/integration-tests/replace.test.ts +++ b/integration-tests/replace.test.ts @@ -4,63 +4,60 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to replace content in a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to replace content in a file'); +describe('replace', () => { + it('should be able to replace content in a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to replace content in a file'); - const fileName = 'file_to_replace.txt'; - const originalContent = 'original content'; - const expectedContent = 'replaced content'; + const fileName = 'file_to_replace.txt'; + const originalContent = 'original content'; + const expectedContent = 'replaced content'; - rig.createFile(fileName, originalContent); - const prompt = `Can you replace 'original' with 'replaced' in the file 'file_to_replace.txt'`; + rig.createFile(fileName, originalContent); + const prompt = `Can you replace 'original' with 'replaced' in the file 'file_to_replace.txt'`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('replace'); + const foundToolCall = await rig.waitForToolCall('replace'); - // Add debugging information - if (!foundToolCall) { - printDebugInfo(rig, result); - } + // Add debugging information + if (!foundToolCall) { + printDebugInfo(rig, result); + } - assert.ok(foundToolCall, 'Expected to find a replace tool call'); + expect(foundToolCall, 'Expected to find a replace tool call').toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput( - result, - ['replaced', 'file_to_replace.txt'], - 'Replace content test', - ); - - const newFileContent = rig.readFile(fileName); - - // Add debugging for file content - if (newFileContent !== expectedContent) { - console.error('File content mismatch - Debug info:'); - console.error('Expected:', expectedContent); - console.error('Actual:', newFileContent); - console.error( - 'Tool calls:', - rig.readToolLogs().map((t) => ({ - name: t.toolRequest.name, - args: t.toolRequest.args, - })), + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput( + result, + ['replaced', 'file_to_replace.txt'], + 'Replace content test', ); - } - assert.strictEqual( - newFileContent, - expectedContent, - 'File content should be updated correctly', - ); + const newFileContent = rig.readFile(fileName); - // Log success info if verbose - if (process.env.VERBOSE === 'true') { - console.log('File replaced successfully. New content:', newFileContent); - } + // Add debugging for file content + if (newFileContent !== expectedContent) { + console.error('File content mismatch - Debug info:'); + console.error('Expected:', expectedContent); + console.error('Actual:', newFileContent); + console.error( + 'Tool calls:', + rig.readToolLogs().map((t) => ({ + name: t.toolRequest.name, + args: t.toolRequest.args, + })), + ); + } + + expect(newFileContent).toBe(expectedContent); + + // Log success info if verbose + if (process.env.VERBOSE === 'true') { + console.log('File replaced successfully. New content:', newFileContent); + } + }); }); diff --git a/integration-tests/run-tests.js b/integration-tests/run-tests.js deleted file mode 100644 index b33e1afa..00000000 --- a/integration-tests/run-tests.js +++ /dev/null @@ -1,182 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import { spawnSync, spawn } from 'child_process'; -import { mkdirSync, rmSync, createWriteStream } from 'fs'; -import { join, dirname, basename } from 'path'; -import { fileURLToPath } from 'url'; -import { glob } from 'glob'; - -async function main() { - const __dirname = dirname(fileURLToPath(import.meta.url)); - const rootDir = join(__dirname, '..'); - const integrationTestsDir = join(rootDir, '.integration-tests'); - - if (process.env.GEMINI_SANDBOX === 'docker' && !process.env.IS_DOCKER) { - console.log('Building sandbox for Docker...'); - const buildResult = spawnSync('npm', ['run', 'build:all'], { - stdio: 'inherit', - }); - if (buildResult.status !== 0) { - console.error('Sandbox build failed.'); - process.exit(1); - } - } - - const runId = `${Date.now()}`; - const runDir = join(integrationTestsDir, runId); - - mkdirSync(runDir, { recursive: true }); - - const args = process.argv.slice(2); - const keepOutput = - process.env.KEEP_OUTPUT === 'true' || args.includes('--keep-output'); - if (keepOutput) { - const keepOutputIndex = args.indexOf('--keep-output'); - if (keepOutputIndex > -1) { - args.splice(keepOutputIndex, 1); - } - console.log(`Keeping output for test run in: ${runDir}`); - } - - const verbose = args.includes('--verbose'); - if (verbose) { - const verboseIndex = args.indexOf('--verbose'); - if (verboseIndex > -1) { - args.splice(verboseIndex, 1); - } - } - - const testPatterns = - args.length > 0 - ? args.map((arg) => `integration-tests/${arg}.test.ts`) - : ['integration-tests/*.test.ts']; - const testFiles = glob.sync(testPatterns, { cwd: rootDir, absolute: true }); - - for (const testFile of testFiles) { - const testFileName = basename(testFile); - console.log(` Found test file: ${testFileName}`); - } - - const MAX_RETRIES = 3; - let allTestsPassed = true; - - for (const testFile of testFiles) { - const testFileName = basename(testFile); - const testFileDir = join(runDir, testFileName); - mkdirSync(testFileDir, { recursive: true }); - - console.log( - `------------- Running test file: ${testFileName} ------------------------------`, - ); - - let attempt = 0; - let testFilePassed = false; - let lastStdout = []; - let lastStderr = []; - - while (attempt < MAX_RETRIES && !testFilePassed) { - attempt++; - if (attempt > 1) { - console.log( - `--- Retrying ${testFileName} (attempt ${attempt} of ${MAX_RETRIES}) ---`, - ); - } - - const nodeArgs = ['--test']; - if (verbose) { - nodeArgs.push('--test-reporter=spec'); - } - nodeArgs.push(testFile); - - const child = spawn('npx', ['tsx', ...nodeArgs], { - stdio: 'pipe', - env: { - ...process.env, - GEMINI_CLI_INTEGRATION_TEST: 'true', - INTEGRATION_TEST_FILE_DIR: testFileDir, - KEEP_OUTPUT: keepOutput.toString(), - VERBOSE: verbose.toString(), - TEST_FILE_NAME: testFileName, - TELEMETRY_LOG_FILE: join(testFileDir, 'telemetry.log'), - }, - }); - - let outputStream; - if (keepOutput) { - const outputFile = join(testFileDir, `output-attempt-${attempt}.log`); - outputStream = createWriteStream(outputFile); - console.log(`Output for ${testFileName} written to: ${outputFile}`); - } - - const stdout = []; - const stderr = []; - - child.stdout.on('data', (data) => { - if (verbose) { - process.stdout.write(data); - } else { - stdout.push(data); - } - if (outputStream) { - outputStream.write(data); - } - }); - - child.stderr.on('data', (data) => { - if (verbose) { - process.stderr.write(data); - } else { - stderr.push(data); - } - if (outputStream) { - outputStream.write(data); - } - }); - - const exitCode = await new Promise((resolve) => { - child.on('close', (code) => { - if (outputStream) { - outputStream.end(() => { - resolve(code); - }); - } else { - resolve(code); - } - }); - }); - - if (exitCode === 0) { - testFilePassed = true; - } else { - lastStdout = stdout; - lastStderr = stderr; - } - } - - if (!testFilePassed) { - console.error( - `Test file failed after ${MAX_RETRIES} attempts: ${testFileName}`, - ); - if (!verbose) { - process.stdout.write(Buffer.concat(lastStdout).toString('utf8')); - process.stderr.write(Buffer.concat(lastStderr).toString('utf8')); - } - allTestsPassed = false; - } - } - - if (!keepOutput) { - rmSync(runDir, { recursive: true, force: true }); - } - - if (!allTestsPassed) { - console.error('One or more test files failed.'); - process.exit(1); - } -} - -main(); diff --git a/integration-tests/run_shell_command.test.ts b/integration-tests/run_shell_command.test.ts index 2a5f9ed4..a1aa08ae 100644 --- a/integration-tests/run_shell_command.test.ts +++ b/integration-tests/run_shell_command.test.ts @@ -4,60 +4,67 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to run a shell command', async () => { - const rig = new TestRig(); - await rig.setup('should be able to run a shell command'); +describe('run_shell_command', () => { + it('should be able to run a shell command', async () => { + const rig = new TestRig(); + await rig.setup('should be able to run a shell command'); - const prompt = `Please run the command "echo hello-world" and show me the output`; + const prompt = `Please run the command "echo hello-world" and show me the output`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('run_shell_command'); + const foundToolCall = await rig.waitForToolCall('run_shell_command'); - // Add debugging information - if (!foundToolCall || !result.includes('hello-world')) { - printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains hello-world': result.includes('hello-world'), - }); - } + // Add debugging information + if (!foundToolCall || !result.includes('hello-world')) { + printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains hello-world': result.includes('hello-world'), + }); + } - assert.ok(foundToolCall, 'Expected to find a run_shell_command tool call'); + expect( + foundToolCall, + 'Expected to find a run_shell_command tool call', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - // Model often reports exit code instead of showing output - validateModelOutput( - result, - ['hello-world', 'exit code 0'], - 'Shell command test', - ); -}); - -test('should be able to run a shell command via stdin', async () => { - const rig = new TestRig(); - await rig.setup('should be able to run a shell command via stdin'); - - const prompt = `Please run the command "echo test-stdin" and show me what it outputs`; - - const result = await rig.run({ stdin: prompt }); - - const foundToolCall = await rig.waitForToolCall('run_shell_command'); - - // Add debugging information - if (!foundToolCall || !result.includes('test-stdin')) { - printDebugInfo(rig, result, { - 'Test type': 'Stdin test', - 'Found tool call': foundToolCall, - 'Contains test-stdin': result.includes('test-stdin'), - }); - } - - assert.ok(foundToolCall, 'Expected to find a run_shell_command tool call'); - - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'test-stdin', 'Shell command stdin test'); + // Validate model output - will throw if no output, warn if missing expected content + // Model often reports exit code instead of showing output + validateModelOutput( + result, + ['hello-world', 'exit code 0'], + 'Shell command test', + ); + }); + + it('should be able to run a shell command via stdin', async () => { + const rig = new TestRig(); + await rig.setup('should be able to run a shell command via stdin'); + + const prompt = `Please run the command "echo test-stdin" and show me what it outputs`; + + const result = await rig.run({ stdin: prompt }); + + const foundToolCall = await rig.waitForToolCall('run_shell_command'); + + // Add debugging information + if (!foundToolCall || !result.includes('test-stdin')) { + printDebugInfo(rig, result, { + 'Test type': 'Stdin test', + 'Found tool call': foundToolCall, + 'Contains test-stdin': result.includes('test-stdin'), + }); + } + + expect( + foundToolCall, + 'Expected to find a run_shell_command tool call', + ).toBeTruthy(); + + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'test-stdin', 'Shell command stdin test'); + }); }); diff --git a/integration-tests/save_memory.test.ts b/integration-tests/save_memory.test.ts index 3ec641d4..15b062e9 100644 --- a/integration-tests/save_memory.test.ts +++ b/integration-tests/save_memory.test.ts @@ -4,38 +4,42 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js'; -test('should be able to save to memory', async () => { - const rig = new TestRig(); - await rig.setup('should be able to save to memory'); +describe('save_memory', () => { + it('should be able to save to memory', async () => { + const rig = new TestRig(); + await rig.setup('should be able to save to memory'); - const prompt = `remember that my favorite color is blue. + const prompt = `remember that my favorite color is blue. what is my favorite color? tell me that and surround it with $ symbol`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('save_memory'); + const foundToolCall = await rig.waitForToolCall('save_memory'); - // Add debugging information - if (!foundToolCall || !result.toLowerCase().includes('blue')) { - const allTools = printDebugInfo(rig, result, { - 'Found tool call': foundToolCall, - 'Contains blue': result.toLowerCase().includes('blue'), - }); + // Add debugging information + if (!foundToolCall || !result.toLowerCase().includes('blue')) { + const allTools = printDebugInfo(rig, result, { + 'Found tool call': foundToolCall, + 'Contains blue': result.toLowerCase().includes('blue'), + }); - console.error( - 'Memory tool calls:', - allTools - .filter((t) => t.toolRequest.name === 'save_memory') - .map((t) => t.toolRequest.args), - ); - } + console.error( + 'Memory tool calls:', + allTools + .filter((t) => t.toolRequest.name === 'save_memory') + .map((t) => t.toolRequest.args), + ); + } - assert.ok(foundToolCall, 'Expected to find a save_memory tool call'); + expect( + foundToolCall, + 'Expected to find a save_memory tool call', + ).toBeTruthy(); - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'blue', 'Save memory test'); + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'blue', 'Save memory test'); + }); }); diff --git a/integration-tests/simple-mcp-server.test.ts b/integration-tests/simple-mcp-server.test.ts index c4191078..98c81f16 100644 --- a/integration-tests/simple-mcp-server.test.ts +++ b/integration-tests/simple-mcp-server.test.ts @@ -10,8 +10,7 @@ * external dependencies, making it compatible with Docker sandbox mode. */ -import { test, describe, before } from 'node:test'; -import { strict as assert } from 'node:assert'; +import { describe, it, beforeAll, expect } from 'vitest'; import { TestRig, validateModelOutput } from './test-helper.js'; import { join } from 'path'; import { writeFileSync } from 'fs'; @@ -168,7 +167,7 @@ rpc.send({ describe('simple-mcp-server', () => { const rig = new TestRig(); - before(async () => { + beforeAll(async () => { // Setup test directory with MCP server configuration await rig.setup('simple-mcp-server', { settings: { @@ -192,17 +191,20 @@ describe('simple-mcp-server', () => { } }); - test('should add two numbers', async () => { + it('should add two numbers', async () => { // Test directory is already set up in before hook // Just run the command - MCP server config is in settings.json const output = await rig.run('add 5 and 10'); const foundToolCall = await rig.waitForToolCall('add'); - assert.ok(foundToolCall, 'Expected to find an add tool call'); + expect(foundToolCall, 'Expected to find an add tool call').toBeTruthy(); // Validate model output - will throw if no output, fail if missing expected content validateModelOutput(output, '15', 'MCP server test'); - assert.ok(output.includes('15'), 'Expected output to contain the sum (15)'); + expect( + output.includes('15'), + 'Expected output to contain the sum (15)', + ).toBeTruthy(); }); }); diff --git a/integration-tests/test-helper.ts b/integration-tests/test-helper.ts index 33443aaf..ac7fec6f 100644 --- a/integration-tests/test-helper.ts +++ b/integration-tests/test-helper.ts @@ -10,7 +10,7 @@ import { mkdirSync, writeFileSync, readFileSync } from 'fs'; import { join, dirname } from 'path'; import { fileURLToPath } from 'url'; import { env } from 'process'; -import { fileExists } from '../scripts/telemetry_utils.js'; +import fs from 'fs'; const __dirname = dirname(fileURLToPath(import.meta.url)); @@ -297,15 +297,12 @@ export class TestRig { } readFile(fileName: string) { - const content = readFileSync(join(this.testDir!, fileName), 'utf-8'); + const filePath = join(this.testDir!, fileName); + const content = readFileSync(filePath, 'utf-8'); if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') { - const testId = `${env.TEST_FILE_NAME!.replace( - '.test.js', - '', - )}:${this.testName!.replace(/ /g, '-')}`; - console.log(`--- FILE: ${testId}/${fileName} ---`); + console.log(`--- FILE: ${filePath} ---`); console.log(content); - console.log(`--- END FILE: ${testId}/${fileName} ---`); + console.log(`--- END FILE: ${filePath} ---`); } return content; } @@ -336,7 +333,7 @@ export class TestRig { // Wait for telemetry file to exist and have content await this.poll( () => { - if (!fileExists(logFilePath)) return false; + if (!fs.existsSync(logFilePath)) return false; try { const content = readFileSync(logFilePath, 'utf-8'); // Check if file has meaningful content (at least one complete JSON object) @@ -547,7 +544,7 @@ export class TestRig { // Try reading from file first const logFilePath = join(this.testDir!, 'telemetry.log'); - if (fileExists(logFilePath)) { + if (fs.existsSync(logFilePath)) { try { const content = readFileSync(logFilePath, 'utf-8'); if (content && content.includes('"event.name"')) { @@ -581,7 +578,7 @@ export class TestRig { } // Check if file exists, if not return empty array (file might not be created yet) - if (!fileExists(logFilePath)) { + if (!fs.existsSync(logFilePath)) { return []; } diff --git a/integration-tests/vitest.config.ts b/integration-tests/vitest.config.ts new file mode 100644 index 00000000..e0c6b848 --- /dev/null +++ b/integration-tests/vitest.config.ts @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + testTimeout: 300000, // 5 minutes + globalSetup: './globalSetup.ts', + reporters: ['default'], + include: ['**/*.test.ts'], + retry: 2, + fileParallelism: false, + }, +}); diff --git a/integration-tests/write_file.test.ts b/integration-tests/write_file.test.ts index 7809161e..3fe26af6 100644 --- a/integration-tests/write_file.test.ts +++ b/integration-tests/write_file.test.ts @@ -4,8 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { test } from 'node:test'; -import { strict as assert } from 'assert'; +import { describe, it, expect } from 'vitest'; import { TestRig, createToolCallErrorMessage, @@ -13,56 +12,57 @@ import { validateModelOutput, } from './test-helper.js'; -test('should be able to write a file', async () => { - const rig = new TestRig(); - await rig.setup('should be able to write a file'); - const prompt = `show me an example of using the write tool. put a dad joke in dad.txt`; +describe('write_file', () => { + it('should be able to write a file', async () => { + const rig = new TestRig(); + await rig.setup('should be able to write a file'); + const prompt = `show me an example of using the write tool. put a dad joke in dad.txt`; - const result = await rig.run(prompt); + const result = await rig.run(prompt); - const foundToolCall = await rig.waitForToolCall('write_file'); + const foundToolCall = await rig.waitForToolCall('write_file'); - // Add debugging information - if (!foundToolCall) { - printDebugInfo(rig, result); - } + // Add debugging information + if (!foundToolCall) { + printDebugInfo(rig, result); + } - const allTools = rig.readToolLogs(); - assert.ok( - foundToolCall, - createToolCallErrorMessage( - 'write_file', - allTools.map((t) => t.toolRequest.name), - result, - ), - ); - - // Validate model output - will throw if no output, warn if missing expected content - validateModelOutput(result, 'dad.txt', 'Write file test'); - - const newFilePath = 'dad.txt'; - - const newFileContent = rig.readFile(newFilePath); - - // Add debugging for file content - if (newFileContent === '') { - console.error('File was created but is empty'); - console.error( - 'Tool calls:', - rig.readToolLogs().map((t) => ({ - name: t.toolRequest.name, - args: t.toolRequest.args, - })), + const allTools = rig.readToolLogs(); + expect(foundToolCall, 'Expected to find a write_file tool call').toBeTruthy( + createToolCallErrorMessage( + 'write_file', + allTools.map((t) => t.toolRequest.name), + result, + ), ); - } - assert.notEqual(newFileContent, '', 'Expected file to have content'); + // Validate model output - will throw if no output, warn if missing expected content + validateModelOutput(result, 'dad.txt', 'Write file test'); - // Log success info if verbose - if (process.env.VERBOSE === 'true') { - console.log( - 'File created successfully with content:', - newFileContent.substring(0, 100) + '...', - ); - } + const newFilePath = 'dad.txt'; + + const newFileContent = rig.readFile(newFilePath); + + // Add debugging for file content + if (newFileContent === '') { + console.error('File was created but is empty'); + console.error( + 'Tool calls:', + rig.readToolLogs().map((t) => ({ + name: t.toolRequest.name, + args: t.toolRequest.args, + })), + ); + } + + expect(newFileContent).not.toBe(''); + + // Log success info if verbose + if (process.env.VERBOSE === 'true') { + console.log( + 'File created successfully with content:', + newFileContent.substring(0, 100) + '...', + ); + } + }); }); diff --git a/package.json b/package.json index e5a14de5..8b6d7295 100644 --- a/package.json +++ b/package.json @@ -33,11 +33,11 @@ "test": "npm run test --workspaces --if-present", "test:ci": "npm run test:ci --workspaces --if-present && npm run test:scripts", "test:scripts": "vitest run --config ./scripts/tests/vitest.config.ts", - "test:e2e": "npm run test:integration:sandbox:none -- --verbose --keep-output", + "test:e2e": "cross-env VERBOSE=true KEEP_OUTPUT=true npm run test:integration:sandbox:none", "test:integration:all": "npm run test:integration:sandbox:none && npm run test:integration:sandbox:docker && npm run test:integration:sandbox:podman", - "test:integration:sandbox:none": "GEMINI_SANDBOX=false node integration-tests/run-tests.js", - "test:integration:sandbox:docker": "GEMINI_SANDBOX=docker node integration-tests/run-tests.js", - "test:integration:sandbox:podman": "GEMINI_SANDBOX=podman node integration-tests/run-tests.js", + "test:integration:sandbox:none": "GEMINI_SANDBOX=false vitest run --root ./integration-tests", + "test:integration:sandbox:docker": "npm run build:sandbox && GEMINI_SANDBOX=docker vitest run --root ./integration-tests", + "test:integration:sandbox:podman": "GEMINI_SANDBOX=podman vitest run --root ./integration-tests", "lint": "eslint . --ext .ts,.tsx && eslint integration-tests", "lint:fix": "eslint . --fix && eslint integration-tests --fix", "lint:ci": "eslint . --ext .ts,.tsx --max-warnings 0 && eslint integration-tests --max-warnings 0", From 661ee0a79e9981846c307192d1ba9194f843b26a Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 21:30:30 -0400 Subject: [PATCH 13/45] chore(ci): Ensure e2e workflow is consistent and not vulnerable to injection attacks (#6098) --- .github/workflows/e2e.yml | 103 +++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 45 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index a6f6ad5f..3fa9bbc8 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -1,75 +1,88 @@ -# .github/workflows/e2e.yml - -name: E2E Tests +name: 'E2E Tests' on: push: - branches: [main] + branches: + - 'main' merge_group: jobs: e2e-test-linux: - name: E2E Test (Linux) - ${{ matrix.sandbox }} - runs-on: ubuntu-latest + name: 'E2E Test (Linux) - ${{ matrix.sandbox }}' + runs-on: 'ubuntu-latest' strategy: matrix: - sandbox: [sandbox:none, sandbox:docker] - node-version: [20.x, 22.x, 24.x] + sandbox: + - 'sandbox:none' + - 'sandbox:docker' + node-version: + - '20.x' + - '22.x' + - '24.x' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Set up Node.js ${{ matrix.node-version }} - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Set up Node.js ${{ matrix.node-version }}' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: ${{ matrix.node-version }} + node-version: '${{ matrix.node-version }}' cache: 'npm' - - name: Install dependencies - run: npm ci + - name: 'Install dependencies' + run: |- + npm ci - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Set up Docker - if: matrix.sandbox == 'sandbox:docker' - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 + - name: 'Set up Docker' + if: |- + ${{ matrix.sandbox == 'sandbox:docker' }} + uses: 'docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435' # ratchet:docker/setup-buildx-action@v3 - - name: Set up Podman - if: matrix.sandbox == 'sandbox:podman' - uses: redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603 # v1 + - name: 'Set up Podman' + if: |- + ${{ matrix.sandbox == 'sandbox:podman' }} + uses: 'redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603' # ratchet:redhat-actions/podman-login@v1 with: - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: 'docker.io' + username: '${{ secrets.DOCKERHUB_USERNAME }}' + password: '${{ secrets.DOCKERHUB_TOKEN }}' - - name: Run E2E tests + - name: 'Run E2E tests' env: - GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} - VERBOSE: true - KEEP_OUTPUT: true - run: npm run test:integration:${{ matrix.sandbox }} + GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}' + KEEP_OUTPUT: 'true' + SANDBOX: '${{ matrix.sandbox }}' + VERBOSE: 'true' + run: |- + npm run "test:integration:${SANDBOX}" e2e-test-macos: - name: E2E Test - macOS - runs-on: macos-latest + name: 'E2E Test - macOS' + runs-on: 'macos-latest' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Set up Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Set up Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: 20.x + node-version-file: '.nvmrc' cache: 'npm' - - name: Install dependencies - run: npm ci + - name: 'Install dependencies' + run: |- + npm ci - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Run E2E tests + - name: 'Run E2E tests' env: - GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} - run: npm run test:e2e + GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}' + run: |- + npm run test:e2e From 806af05b97f907b38731165d08486f3c00355bb4 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 21:34:57 -0400 Subject: [PATCH 14/45] chore(ci): Ensure stale and no-response workflows are consistent and not vulnerable to injection attacks (#6097) --- .github/workflows/no-response.yml | 23 ++++++++++++----------- .github/workflows/stale.yml | 31 ++++++++++++++++--------------- 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/.github/workflows/no-response.yml b/.github/workflows/no-response.yml index 3d3d8e7e..abaad9db 100644 --- a/.github/workflows/no-response.yml +++ b/.github/workflows/no-response.yml @@ -1,32 +1,33 @@ -name: No Response +name: 'No Response' # Run as a daily cron at 1:45 AM on: schedule: - cron: '45 1 * * *' - workflow_dispatch: {} + workflow_dispatch: jobs: no-response: - runs-on: ubuntu-latest - if: ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} permissions: - issues: write - pull-requests: write + issues: 'write' + pull-requests: 'write' concurrency: - group: ${{ github.workflow }}-no-response + group: '${{ github.workflow }}-no-response' cancel-in-progress: true steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 + - uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9 with: - repo-token: ${{ secrets.GITHUB_TOKEN }} + repo-token: '${{ secrets.GITHUB_TOKEN }}' days-before-stale: -1 days-before-close: 14 stale-issue-label: 'status/need-information' - close-issue-message: > + close-issue-message: >- This issue was marked as needing more information and has not received a response in 14 days. Closing it for now. If you still face this problem, feel free to reopen with more details. Thank you! stale-pr-label: 'status/need-information' - close-pr-message: > + close-pr-message: >- This pull request was marked as needing more information and has had no updates in 14 days. Closing it for now. You are welcome to reopen with the required info. Thanks for contributing! diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 914e9d57..87354b57 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,38 +1,39 @@ -name: Mark stale issues and pull requests +name: 'Mark stale issues and pull requests' # Run as a daily cron at 1:30 AM on: schedule: - cron: '30 1 * * *' - workflow_dispatch: {} + workflow_dispatch: jobs: stale: - runs-on: ubuntu-latest - if: ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} permissions: - issues: write - pull-requests: write + issues: 'write' + pull-requests: 'write' concurrency: - group: ${{ github.workflow }}-stale + group: '${{ github.workflow }}-stale' cancel-in-progress: true steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 + - uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9 with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: > + repo-token: '${{ secrets.GITHUB_TOKEN }}' + stale-issue-message: >- This issue has been automatically marked as stale due to 60 days of inactivity. It will be closed in 14 days if no further activity occurs. - stale-pr-message: > + stale-pr-message: >- This pull request has been automatically marked as stale due to 60 days of inactivity. It will be closed in 14 days if no further activity occurs. - close-issue-message: > + close-issue-message: >- This issue has been closed due to 14 additional days of inactivity after being marked as stale. If you believe this is still relevant, feel free to comment or reopen the issue. Thank you! - close-pr-message: > + close-pr-message: >- This pull request has been closed due to 14 additional days of inactivity after being marked as stale. If this is still relevant, you are welcome to reopen or leave a comment. Thanks for contributing! days-before-stale: 60 days-before-close: 14 - exempt-issue-labels: pinned,security - exempt-pr-labels: pinned,security + exempt-issue-labels: 'pinned,security' + exempt-pr-labels: 'pinned,security' From 431a312d4d4d8686e1009eef3c9a02032ccfd356 Mon Sep 17 00:00:00 2001 From: Jerop Kipruto Date: Wed, 13 Aug 2025 10:38:45 +0900 Subject: [PATCH 15/45] Show OpenTelemetry SDK initialization & shutdown in debug mode only (#6096) --- packages/cli/src/nonInteractiveCli.ts | 2 +- packages/core/src/telemetry/sdk.ts | 18 +++++++++++++----- packages/core/src/telemetry/telemetry.test.ts | 4 ++-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/packages/cli/src/nonInteractiveCli.ts b/packages/cli/src/nonInteractiveCli.ts index 95ed70cf..c237e56b 100644 --- a/packages/cli/src/nonInteractiveCli.ts +++ b/packages/cli/src/nonInteractiveCli.ts @@ -143,7 +143,7 @@ export async function runNonInteractive( } finally { consolePatcher.cleanup(); if (isTelemetrySdkInitialized()) { - await shutdownTelemetry(); + await shutdownTelemetry(config); } } } diff --git a/packages/core/src/telemetry/sdk.ts b/packages/core/src/telemetry/sdk.ts index 1167750a..c6630236 100644 --- a/packages/core/src/telemetry/sdk.ts +++ b/packages/core/src/telemetry/sdk.ts @@ -125,25 +125,33 @@ export function initializeTelemetry(config: Config): void { try { sdk.start(); - console.log('OpenTelemetry SDK started successfully.'); + if (config.getDebugMode()) { + console.log('OpenTelemetry SDK started successfully.'); + } telemetryInitialized = true; initializeMetrics(config); } catch (error) { console.error('Error starting OpenTelemetry SDK:', error); } - process.on('SIGTERM', shutdownTelemetry); - process.on('SIGINT', shutdownTelemetry); + process.on('SIGTERM', () => { + shutdownTelemetry(config); + }); + process.on('SIGINT', () => { + shutdownTelemetry(config); + }); } -export async function shutdownTelemetry(): Promise { +export async function shutdownTelemetry(config: Config): Promise { if (!telemetryInitialized || !sdk) { return; } try { ClearcutLogger.getInstance()?.shutdown(); await sdk.shutdown(); - console.log('OpenTelemetry SDK shut down successfully.'); + if (config.getDebugMode()) { + console.log('OpenTelemetry SDK shut down successfully.'); + } } catch (error) { console.error('Error shutting down SDK:', error); } finally { diff --git a/packages/core/src/telemetry/telemetry.test.ts b/packages/core/src/telemetry/telemetry.test.ts index 9734e382..15bd2e95 100644 --- a/packages/core/src/telemetry/telemetry.test.ts +++ b/packages/core/src/telemetry/telemetry.test.ts @@ -45,7 +45,7 @@ describe('telemetry', () => { afterEach(async () => { // Ensure we shut down telemetry even if a test fails. if (isTelemetrySdkInitialized()) { - await shutdownTelemetry(); + await shutdownTelemetry(mockConfig); } }); @@ -57,7 +57,7 @@ describe('telemetry', () => { it('should shutdown the telemetry service', async () => { initializeTelemetry(mockConfig); - await shutdownTelemetry(); + await shutdownTelemetry(mockConfig); expect(mockNodeSdk.shutdown).toHaveBeenCalled(); }); From 9912577a2b425e3f1f5eb16a3ae3b9da0e49466d Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 22:12:28 -0400 Subject: [PATCH 16/45] chore(ci): Ensure docs-gen workflow are not vulnerable to injection attacks (#6100) --- ...release-docker.yaml => release-docker.yml} | 0 .github/ISSUE_TEMPLATE/bug_report.yml | 6 +- .github/ISSUE_TEMPLATE/feature_request.yml | 4 +- .github/workflows/docs-page-action.yml | 68 +++++++++---------- 4 files changed, 37 insertions(+), 41 deletions(-) rename .gcp/{release-docker.yaml => release-docker.yml} (100%) diff --git a/.gcp/release-docker.yaml b/.gcp/release-docker.yml similarity index 100% rename from .gcp/release-docker.yaml rename to .gcp/release-docker.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 9f78e2fe..cb6a456a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -4,8 +4,8 @@ labels: ['kind/bug', 'status/need-triage'] body: - type: markdown attributes: - value: | - > [!IMPORTANT] + value: |- + > [!IMPORTANT] > Thanks for taking the time to fill out this bug report! > > Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if an issue already exists for the bug you encountered. @@ -30,7 +30,7 @@ body: attributes: label: Client information description: Please paste the full text from the `/about` command run from Gemini CLI. Also include which platform (macOS, Windows, Linux). - value: | + value: |-
```console diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index c08de46a..bf4d4871 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -4,8 +4,8 @@ labels: ['kind/enhancement', 'status/need-triage'] body: - type: markdown attributes: - value: | - > [!IMPORTANT] + value: |- + > [!IMPORTANT] > Thanks for taking the time to suggest an enhancement! > > Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if a similar feature has already been requested. diff --git a/.github/workflows/docs-page-action.yml b/.github/workflows/docs-page-action.yml index 0c28dca6..59588109 100644 --- a/.github/workflows/docs-page-action.yml +++ b/.github/workflows/docs-page-action.yml @@ -1,54 +1,50 @@ -# Sample workflow for building and deploying a Jekyll site to GitHub Pages -name: Deploy Jekyll with GitHub Pages dependencies preinstalled +name: 'Deploy GitHub Pages' on: - # Runs on pushes targeting the default branch push: tags: 'v*' - - # Allows you to run this workflow manually from the Actions tab workflow_dispatch: -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: - contents: read - pages: write - id-token: write + contents: 'read' + pages: 'write' + id-token: 'write' -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +# Allow only one concurrent deployment, skipping runs queued between the run +# in-progress and latest queued. However, do NOT cancel in-progress runs as we +# want to allow these production deployments to complete. concurrency: - group: 'pages' + group: '${{ github.workflow }}' cancel-in-progress: false jobs: build: - # This 'if' condition is the key. It ensures the job only runs if the - # tag name does NOT contain the substring 'nightly'. - if: "contains(github.ref_name, 'nightly') == false" - # Build job - runs-on: ubuntu-latest + if: |- + ${{ !contains(github.ref_name, 'nightly') }} + runs-on: 'ubuntu-latest' steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Pages - uses: actions/configure-pages@v5 - - name: Build with Jekyll - uses: actions/jekyll-build-pages@v1 - with: - source: ./ - destination: ./_site - - name: Upload artifact - uses: actions/upload-pages-artifact@v3 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + + - name: 'Setup Pages' + uses: 'actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b' # ratchet:actions/configure-pages@v5 + + - name: 'Build with Jekyll' + uses: 'actions/jekyll-build-pages@44a6e6beabd48582f863aeeb6cb2151cc1716697' # ratchet:actions/jekyll-build-pages@v1 + with: + source: './' + destination: './_site' + + - name: 'Upload artifact' + uses: 'actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa' # ratchet:actions/upload-pages-artifact@v3 - # Deployment job deploy: environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - needs: build + name: 'github-pages' + url: '${{ steps.deployment.outputs.page_url }}' + runs-on: 'ubuntu-latest' + needs: 'build' steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 + - name: 'Deploy to GitHub Pages' + id: 'deployment' + uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # ratchet:actions/deploy-pages@v4 From 0e8bbfb8ba2b02f8e80850c7f6b444c72fbd7340 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 22:17:04 -0400 Subject: [PATCH 17/45] chore: update bash to follow shellcheck recommendations (#6102) --- .github/scripts/pr-triage.sh | 128 +++++++++++++++++++---------------- scripts/create_alias.sh | 29 ++++---- 2 files changed, 84 insertions(+), 73 deletions(-) diff --git a/.github/scripts/pr-triage.sh b/.github/scripts/pr-triage.sh index 6b60432b..985b3ffc 100755 --- a/.github/scripts/pr-triage.sh +++ b/.github/scripts/pr-triage.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail # Initialize a comma-separated string to hold PR numbers that need a comment @@ -6,13 +6,23 @@ PRS_NEEDING_COMMENT="" # Function to process a single PR process_pr() { + if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then + echo "‼️ Missing \$GITHUB_REPOSITORY - this must be run from GitHub Actions" + return 1 + fi + + if [[ -z "${GITHUB_OUTPUT:-}" ]]; then + echo "‼️ Missing \$GITHUB_OUTPUT - this must be run from GitHub Actions" + return 1 + fi + local PR_NUMBER=$1 - echo "🔄 Processing PR #$PR_NUMBER" + echo "🔄 Processing PR #${PR_NUMBER}" # Get PR body with error handling local PR_BODY - if ! PR_BODY=$(gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json body -q .body 2>/dev/null); then - echo " ⚠️ Could not fetch PR #$PR_NUMBER details" + if ! PR_BODY=$(gh pr view "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --json body -q .body 2>/dev/null); then + echo " ⚠️ Could not fetch PR #${PR_NUMBER} details" return 1 fi @@ -20,67 +30,67 @@ process_pr() { local ISSUE_NUMBER="" # Pattern 1: Direct reference like #123 - if [ -z "$ISSUE_NUMBER" ]; then - ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") + if [[ -z "${ISSUE_NUMBER}" ]]; then + ISSUE_NUMBER=$(echo "${PR_BODY}" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") fi # Pattern 2: Closes/Fixes/Resolves patterns (case-insensitive) - if [ -z "$ISSUE_NUMBER" ]; then - ISSUE_NUMBER=$(echo "$PR_BODY" | grep -iE '(closes?|fixes?|resolves?) #[0-9]+' | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") + if [[ -z "${ISSUE_NUMBER}" ]]; then + ISSUE_NUMBER=$(echo "${PR_BODY}" | grep -iE '(closes?|fixes?|resolves?) #[0-9]+' | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") fi - if [ -z "$ISSUE_NUMBER" ]; then - echo "⚠️ No linked issue found for PR #$PR_NUMBER, adding status/need-issue label" - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --add-label "status/need-issue" 2>/dev/null; then + if [[ -z "${ISSUE_NUMBER}" ]]; then + echo "⚠️ No linked issue found for PR #${PR_NUMBER}, adding status/need-issue label" + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --add-label "status/need-issue" 2>/dev/null; then echo " ⚠️ Failed to add label (may already exist or have permission issues)" fi # Add PR number to the list - if [ -z "$PRS_NEEDING_COMMENT" ]; then - PRS_NEEDING_COMMENT="$PR_NUMBER" + if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then + PRS_NEEDING_COMMENT="${PR_NUMBER}" else - PRS_NEEDING_COMMENT="$PRS_NEEDING_COMMENT,$PR_NUMBER" + PRS_NEEDING_COMMENT="${PRS_NEEDING_COMMENT},${PR_NUMBER}" fi - echo "needs_comment=true" >> $GITHUB_OUTPUT + echo "needs_comment=true" >> "${GITHUB_OUTPUT}" else - echo "🔗 Found linked issue #$ISSUE_NUMBER" + echo "🔗 Found linked issue #${ISSUE_NUMBER}" # Remove status/need-issue label if present - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --remove-label "status/need-issue" 2>/dev/null; then + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --remove-label "status/need-issue" 2>/dev/null; then echo " status/need-issue label not present or could not be removed" fi # Get issue labels - echo "📥 Fetching labels from issue #$ISSUE_NUMBER" + echo "📥 Fetching labels from issue #${ISSUE_NUMBER}" local ISSUE_LABELS="" - if ! ISSUE_LABELS=$(gh issue view "$ISSUE_NUMBER" --repo "$GITHUB_REPOSITORY" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then - echo " ⚠️ Could not fetch issue #$ISSUE_NUMBER (may not exist or be in different repo)" + if ! ISSUE_LABELS=$(gh issue view "${ISSUE_NUMBER}" --repo "${GITHUB_REPOSITORY}" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then + echo " ⚠️ Could not fetch issue #${ISSUE_NUMBER} (may not exist or be in different repo)" ISSUE_LABELS="" fi # Get PR labels - echo "📥 Fetching labels from PR #$PR_NUMBER" + echo "📥 Fetching labels from PR #${PR_NUMBER}" local PR_LABELS="" - if ! PR_LABELS=$(gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then + if ! PR_LABELS=$(gh pr view "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then echo " ⚠️ Could not fetch PR labels" PR_LABELS="" fi - echo " Issue labels: $ISSUE_LABELS" - echo " PR labels: $PR_LABELS" + echo " Issue labels: ${ISSUE_LABELS}" + echo " PR labels: ${PR_LABELS}" # Convert comma-separated strings to arrays local ISSUE_LABEL_ARRAY PR_LABEL_ARRAY - IFS=',' read -ra ISSUE_LABEL_ARRAY <<< "$ISSUE_LABELS" - IFS=',' read -ra PR_LABEL_ARRAY <<< "$PR_LABELS" + IFS=',' read -ra ISSUE_LABEL_ARRAY <<< "${ISSUE_LABELS}" + IFS=',' read -ra PR_LABEL_ARRAY <<< "${PR_LABELS}" # Find labels to add (on issue but not on PR) local LABELS_TO_ADD="" for label in "${ISSUE_LABEL_ARRAY[@]}"; do - if [ -n "$label" ] && [[ ! " ${PR_LABEL_ARRAY[*]} " =~ " ${label} " ]]; then - if [ -z "$LABELS_TO_ADD" ]; then - LABELS_TO_ADD="$label" + if [[ -n "${label}" ]] && [[ " ${PR_LABEL_ARRAY[*]} " != *" ${label} "* ]]; then + if [[ -z "${LABELS_TO_ADD}" ]]; then + LABELS_TO_ADD="${label}" else - LABELS_TO_ADD="$LABELS_TO_ADD,$label" + LABELS_TO_ADD="${LABELS_TO_ADD},${label}" fi fi done @@ -88,65 +98,65 @@ process_pr() { # Find labels to remove (on PR but not on issue) local LABELS_TO_REMOVE="" for label in "${PR_LABEL_ARRAY[@]}"; do - if [ -n "$label" ] && [[ ! " ${ISSUE_LABEL_ARRAY[*]} " =~ " ${label} " ]]; then + if [[ -n "${label}" ]] && [[ " ${ISSUE_LABEL_ARRAY[*]} " != *" ${label} "* ]]; then # Don't remove status/need-issue since we already handled it - if [ "$label" != "status/need-issue" ]; then - if [ -z "$LABELS_TO_REMOVE" ]; then - LABELS_TO_REMOVE="$label" + if [[ "${label}" != "status/need-issue" ]]; then + if [[ -z "${LABELS_TO_REMOVE}" ]]; then + LABELS_TO_REMOVE="${label}" else - LABELS_TO_REMOVE="$LABELS_TO_REMOVE,$label" + LABELS_TO_REMOVE="${LABELS_TO_REMOVE},${label}" fi fi fi done # Apply label changes - if [ -n "$LABELS_TO_ADD" ]; then - echo "➕ Adding labels: $LABELS_TO_ADD" - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --add-label "$LABELS_TO_ADD" 2>/dev/null; then + if [[ -n "${LABELS_TO_ADD}" ]]; then + echo "➕ Adding labels: ${LABELS_TO_ADD}" + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --add-label "${LABELS_TO_ADD}" 2>/dev/null; then echo " ⚠️ Failed to add some labels" fi fi - if [ -n "$LABELS_TO_REMOVE" ]; then - echo "➖ Removing labels: $LABELS_TO_REMOVE" - if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --remove-label "$LABELS_TO_REMOVE" 2>/dev/null; then + if [[ -n "${LABELS_TO_REMOVE}" ]]; then + echo "➖ Removing labels: ${LABELS_TO_REMOVE}" + if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --remove-label "${LABELS_TO_REMOVE}" 2>/dev/null; then echo " ⚠️ Failed to remove some labels" fi fi - if [ -z "$LABELS_TO_ADD" ] && [ -z "$LABELS_TO_REMOVE" ]; then + if [[ -z "${LABELS_TO_ADD}" ]] && [[ -z "${LABELS_TO_REMOVE}" ]]; then echo "✅ Labels already synchronized" fi - echo "needs_comment=false" >> $GITHUB_OUTPUT + echo "needs_comment=false" >> "${GITHUB_OUTPUT}" fi } # If PR_NUMBER is set, process only that PR -if [ -n "${PR_NUMBER:-}" ]; then - if ! process_pr "$PR_NUMBER"; then - echo "❌ Failed to process PR #$PR_NUMBER" +if [[ -n "${PR_NUMBER:-}" ]]; then + if ! process_pr "${PR_NUMBER}"; then + echo "❌ Failed to process PR #${PR_NUMBER}" exit 1 fi else # Otherwise, get all open PRs and process them # The script logic will determine which ones need issue linking or label sync echo "📥 Getting all open pull requests..." - if ! PR_NUMBERS=$(gh pr list --repo "$GITHUB_REPOSITORY" --state open --limit 1000 --json number -q '.[].number' 2>/dev/null); then + if ! PR_NUMBERS=$(gh pr list --repo "${GITHUB_REPOSITORY}" --state open --limit 1000 --json number -q '.[].number' 2>/dev/null); then echo "❌ Failed to fetch PR list" exit 1 fi - - if [ -z "$PR_NUMBERS" ]; then + + if [[ -z "${PR_NUMBERS}" ]]; then echo "✅ No open PRs found" else # Count the number of PRs - PR_COUNT=$(echo "$PR_NUMBERS" | wc -w | tr -d ' ') - echo "📊 Found $PR_COUNT open PRs to process" - - for pr_number in $PR_NUMBERS; do - if ! process_pr "$pr_number"; then - echo "⚠️ Failed to process PR #$pr_number, continuing with next PR..." + PR_COUNT=$(echo "${PR_NUMBERS}" | wc -w | tr -d ' ') + echo "📊 Found ${PR_COUNT} open PRs to process" + + for pr_number in ${PR_NUMBERS}; do + if ! process_pr "${pr_number}"; then + echo "⚠️ Failed to process PR #${pr_number}, continuing with next PR..." continue fi done @@ -154,10 +164,10 @@ else fi # Ensure output is always set, even if empty -if [ -z "$PRS_NEEDING_COMMENT" ]; then - echo "prs_needing_comment=[]" >> $GITHUB_OUTPUT +if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then + echo "prs_needing_comment=[]" >> "${GITHUB_OUTPUT}" else - echo "prs_needing_comment=[$PRS_NEEDING_COMMENT]" >> $GITHUB_OUTPUT + echo "prs_needing_comment=[${PRS_NEEDING_COMMENT}]" >> "${GITHUB_OUTPUT}" fi -echo "✅ PR triage completed" \ No newline at end of file +echo "✅ PR triage completed" diff --git a/scripts/create_alias.sh b/scripts/create_alias.sh index ccaf3dd4..ecb01bb3 100755 --- a/scripts/create_alias.sh +++ b/scripts/create_alias.sh @@ -1,38 +1,39 @@ -#!/bin/bash +#!/usr/bin/env bash +set -euo pipefail # This script creates an alias for the Gemini CLI # Determine the project directory PROJECT_DIR=$(cd "$(dirname "$0")/.." && pwd) -ALIAS_COMMAND="alias gemini='node $PROJECT_DIR/scripts/start.js'" +ALIAS_COMMAND="alias gemini='node "${PROJECT_DIR}/scripts/start.js"'" # Detect shell and set config file path -if [[ "$SHELL" == *"/bash" ]]; then - CONFIG_FILE="$HOME/.bashrc" -elif [[ "$SHELL" == *"/zsh" ]]; then - CONFIG_FILE="$HOME/.zshrc" +if [[ "${SHELL}" == *"/bash" ]]; then + CONFIG_FILE="${HOME}/.bashrc" +elif [[ "${SHELL}" == *"/zsh" ]]; then + CONFIG_FILE="${HOME}/.zshrc" else echo "Unsupported shell. Only bash and zsh are supported." exit 1 fi -echo "This script will add the following alias to your shell configuration file ($CONFIG_FILE):" -echo " $ALIAS_COMMAND" +echo "This script will add the following alias to your shell configuration file (${CONFIG_FILE}):" +echo " ${ALIAS_COMMAND}" echo "" # Check if the alias already exists -if grep -q "alias gemini=" "$CONFIG_FILE"; then - echo "A 'gemini' alias already exists in $CONFIG_FILE. No changes were made." +if grep -q "alias gemini=" "${CONFIG_FILE}"; then + echo "A 'gemini' alias already exists in ${CONFIG_FILE}. No changes were made." exit 0 fi read -p "Do you want to proceed? (y/n) " -n 1 -r echo "" -if [[ $REPLY =~ ^[Yy]$ ]]; then - echo "$ALIAS_COMMAND" >> "$CONFIG_FILE" +if [[ "${REPLY}" =~ ^[Yy]$ ]]; then + echo "${ALIAS_COMMAND}" >> "${CONFIG_FILE}" echo "" - echo "Alias added to $CONFIG_FILE." - echo "Please run 'source $CONFIG_FILE' or open a new terminal to use the 'gemini' command." + echo "Alias added to ${CONFIG_FILE}." + echo "Please run 'source ${CONFIG_FILE}' or open a new terminal to use the 'gemini' command." else echo "Aborted. No changes were made." fi From b6da98e8e9ecaebc95bbc0cd0e2b93bd4a81b07d Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 22:36:37 -0400 Subject: [PATCH 18/45] chore(ci): Ensure action and community workflows are consistent and not vulnerable to injection attacks (#6107) --- .../actions/post-coverage-comment/action.yml | 125 +++++++++-------- .github/workflows/community-report.yml | 131 +++++++++--------- docs/npm.md | 2 +- 3 files changed, 136 insertions(+), 122 deletions(-) diff --git a/.github/actions/post-coverage-comment/action.yml b/.github/actions/post-coverage-comment/action.yml index 10a4afeb..6862e6be 100644 --- a/.github/actions/post-coverage-comment/action.yml +++ b/.github/actions/post-coverage-comment/action.yml @@ -27,79 +27,88 @@ inputs: runs: using: 'composite' steps: - - name: Prepare Coverage Comment - id: prep_coverage_comment - shell: bash - run: | - cli_json_file="${{ inputs.cli_json_file }}" - core_json_file="${{ inputs.core_json_file }}" - cli_full_text_summary_file="${{ inputs.cli_full_text_summary_file }}" - core_full_text_summary_file="${{ inputs.core_full_text_summary_file }}" - comment_file="coverage-comment.md" - + - name: 'Prepare Coverage Comment' + id: 'prep_coverage_comment' + shell: 'bash' + env: + CLI_JSON_FILE: '${{ inputs.cli_json_file }}' + CORE_JSON_FILE: '${{ inputs.core_json_file }}' + CLI_FULL_TEXT_SUMMARY_FILE: '${{ inputs.cli_full_text_summary_file }}' + CORE_FULL_TEXT_SUMMARY_FILE: '${{ inputs.core_full_text_summary_file }}' + COMMENT_FILE: 'coverage-comment.md' + NODE_VERSION: '${{ inputs.node_version }}' + OS: '${{ inputs.os }}' + run: |- # Extract percentages using jq for the main table - if [ -f "$cli_json_file" ]; then - cli_lines_pct=$(jq -r '.total.lines.pct' "$cli_json_file") - cli_statements_pct=$(jq -r '.total.statements.pct' "$cli_json_file") - cli_functions_pct=$(jq -r '.total.functions.pct' "$cli_json_file") - cli_branches_pct=$(jq -r '.total.branches.pct' "$cli_json_file") + if [ -f "${CLI_JSON_FILE}" ]; then + cli_lines_pct="$(jq -r '.total.lines.pct' "${CLI_JSON_FILE}")" + cli_statements_pct="$(jq -r '.total.statements.pct' "${CLI_JSON_FILE}")" + cli_functions_pct="$(jq -r '.total.functions.pct' "${CLI_JSON_FILE}")" + cli_branches_pct="$(jq -r '.total.branches.pct' "${CLI_JSON_FILE}")" else - cli_lines_pct="N/A"; cli_statements_pct="N/A"; cli_functions_pct="N/A"; cli_branches_pct="N/A" - echo "CLI coverage-summary.json not found at: $cli_json_file" >&2 # Error to stderr + cli_lines_pct="N/A" + cli_statements_pct="N/A" + cli_functions_pct="N/A" + cli_branches_pct="N/A" + echo "CLI coverage-summary.json not found at: ${CLI_JSON_FILE}" >&2 # Error to stderr fi - if [ -f "$core_json_file" ]; then - core_lines_pct=$(jq -r '.total.lines.pct' "$core_json_file") - core_statements_pct=$(jq -r '.total.statements.pct' "$core_json_file") - core_functions_pct=$(jq -r '.total.functions.pct' "$core_json_file") - core_branches_pct=$(jq -r '.total.branches.pct' "$core_json_file") + if [ -f "${CORE_JSON_FILE}" ]; then + core_lines_pct="$(jq -r '.total.lines.pct' "${CORE_JSON_FILE}")" + core_statements_pct="$(jq -r '.total.statements.pct' "${CORE_JSON_FILE}")" + core_functions_pct="$(jq -r '.total.functions.pct' "${CORE_JSON_FILE}")" + core_branches_pct="$(jq -r '.total.branches.pct' "${CORE_JSON_FILE}")" else - core_lines_pct="N/A"; core_statements_pct="N/A"; core_functions_pct="N/A"; core_branches_pct="N/A" - echo "Core coverage-summary.json not found at: $core_json_file" >&2 # Error to stderr + core_lines_pct="N/A" + core_statements_pct="N/A" + core_functions_pct="N/A" + core_branches_pct="N/A" + echo "Core coverage-summary.json not found at: ${CORE_JSON_FILE}" >&2 # Error to stderr fi - echo "## Code Coverage Summary" > "$comment_file" - echo "" >> "$comment_file" - echo "| Package | Lines | Statements | Functions | Branches |" >> "$comment_file" - echo "|---|---|---|---|---|" >> "$comment_file" - echo "| CLI | ${cli_lines_pct}% | ${cli_statements_pct}% | ${cli_functions_pct}% | ${cli_branches_pct}% |" >> "$comment_file" - echo "| Core | ${core_lines_pct}% | ${core_statements_pct}% | ${core_functions_pct}% | ${core_branches_pct}% |" >> "$comment_file" - echo "" >> "$comment_file" + echo "## Code Coverage Summary" > "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo "| Package | Lines | Statements | Functions | Branches |" >> "${COMMENT_FILE}" + echo "|---|---|---|---|---|" >> "${COMMENT_FILE}" + echo "| CLI | ${cli_lines_pct}% | ${cli_statements_pct}% | ${cli_functions_pct}% | ${cli_branches_pct}% |" >> "${COMMENT_FILE}" + echo "| Core | ${core_lines_pct}% | ${core_statements_pct}% | ${core_functions_pct}% | ${core_branches_pct}% |" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" # CLI Package - Collapsible Section (with full text summary from file) - echo "
" >> "$comment_file" - echo "CLI Package - Full Text Report" >> "$comment_file" - echo "" >> "$comment_file" - echo '```text' >> "$comment_file" - if [ -f "$cli_full_text_summary_file" ]; then - cat "$cli_full_text_summary_file" >> "$comment_file" + echo "
" >> "${COMMENT_FILE}" + echo "CLI Package - Full Text Report" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo '```text' >> "${COMMENT_FILE}" + if [ -f "${CLI_FULL_TEXT_SUMMARY_FILE}" ]; then + cat "${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" else - echo "CLI full-text-summary.txt not found at: $cli_full_text_summary_file" >> "$comment_file" + echo "CLI full-text-summary.txt not found at: ${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" fi - echo '```' >> "$comment_file" - echo "
" >> "$comment_file" - echo "" >> "$comment_file" + echo '```' >> "${COMMENT_FILE}" + echo "
" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" # Core Package - Collapsible Section (with full text summary from file) - echo "
" >> "$comment_file" - echo "Core Package - Full Text Report" >> "$comment_file" - echo "" >> "$comment_file" - echo '```text' >> "$comment_file" - if [ -f "$core_full_text_summary_file" ]; then - cat "$core_full_text_summary_file" >> "$comment_file" + echo "
" >> "${COMMENT_FILE}" + echo "Core Package - Full Text Report" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" + echo '```text' >> "${COMMENT_FILE}" + if [ -f "${CORE_FULL_TEXT_SUMMARY_FILE}" ]; then + cat "${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" else - echo "Core full-text-summary.txt not found at: $core_full_text_summary_file" >> "$comment_file" + echo "Core full-text-summary.txt not found at: ${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}" fi - echo '```' >> "$comment_file" - echo "
" >> "$comment_file" - echo "" >> "$comment_file" + echo '```' >> "${COMMENT_FILE}" + echo "
" >> "${COMMENT_FILE}" + echo "" >> "${COMMENT_FILE}" - echo "_For detailed HTML reports, please see the 'coverage-reports-${{ inputs.node_version }}-${{ inputs.os }}' artifact from the main CI run._" >> "$comment_file" + echo "_For detailed HTML reports, please see the 'coverage-reports-${NODE_VERSION}-${OS}' artifact from the main CI run._" >> "${COMMENT_FILE}" - - name: Post Coverage Comment - uses: thollander/actions-comment-pull-request@v3 - if: always() + - name: 'Post Coverage Comment' + uses: 'thollander/actions-comment-pull-request@65f9e5c9a1f2cd378bd74b2e057c9736982a8e74' # ratchet:thollander/actions-comment-pull-request@v3 + if: |- + ${{ always() }} with: - file-path: coverage-comment.md # Use the generated file directly - comment-tag: code-coverage-summary - github-token: ${{ inputs.github_token }} + file-path: 'coverage-comment.md' # Use the generated file directly + comment-tag: 'code-coverage-summary' + github-token: '${{ inputs.github_token }}' diff --git a/.github/workflows/community-report.yml b/.github/workflows/community-report.yml index 28aa2cba..59fd427f 100644 --- a/.github/workflows/community-report.yml +++ b/.github/workflows/community-report.yml @@ -1,4 +1,4 @@ -name: Generate Weekly Community Report 📊 +name: 'Generate Weekly Community Report 📊' on: schedule: @@ -12,56 +12,57 @@ on: jobs: generate-report: - name: Generate Report 📝 - if: ${{ github.repository == 'google-gemini/gemini-cli' }} - runs-on: ubuntu-latest + name: 'Generate Report 📝' + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} + runs-on: 'ubuntu-latest' permissions: - issues: write - pull-requests: read - discussions: read - contents: read - id-token: write + issues: 'write' + pull-requests: 'read' + discussions: 'read' + contents: 'read' + id-token: 'write' steps: - - name: Generate GitHub App Token 🔑 - id: generate_token - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2 + - name: 'Generate GitHub App Token 🔑' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.PRIVATE_KEY }} + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' - - name: Generate Report 📜 - id: report + - name: 'Generate Report 📜' + id: 'report' env: - GH_TOKEN: ${{ steps.generate_token.outputs.token }} - REPO: ${{ github.repository }} - DAYS: ${{ github.event.inputs.days || '7' }} - run: | + GH_TOKEN: '${{ steps.generate_token.outputs.token }}' + REPO: '${{ github.repository }}' + DAYS: '${{ github.event.inputs.days || 7 }}' + run: |- set -e - START_DATE=$(date -u -d "$DAYS days ago" +'%Y-%m-%d') - END_DATE=$(date -u +'%Y-%m-%d') - echo "⏳ Generating report for contributions from $START_DATE to $END_DATE..." + START_DATE="$(date -u -d "$DAYS days ago" +'%Y-%m-%d')" + END_DATE="$(date -u +'%Y-%m-%d')" + echo "⏳ Generating report for contributions from ${START_DATE} to ${END_DATE}..." declare -A author_is_googler check_googler_status() { - local author=$1 - if [[ "$author" == *"[bot]" ]]; then - author_is_googler[$author]=1 + local author="$1" + if [[ "${author}" == *"[bot]" ]]; then + author_is_googler[${author}]=1 return 1 fi - if [[ -v "author_is_googler[$author]" ]]; then - return ${author_is_googler[$author]} + if [[ -v "author_is_googler[${author}]" ]]; then + return "${author_is_googler[${author}]}" fi - if gh api "orgs/googlers/members/$author" --silent 2>/dev/null; then - echo "🧑‍💻 $author is a Googler." - author_is_googler[$author]=0 + if gh api "orgs/googlers/members/${author}" --silent 2>/dev/null; then + echo "🧑‍💻 ${author} is a Googler." + author_is_googler[${author}]=0 else - echo "🌍 $author is a community contributor." - author_is_googler[$author]=1 + echo "🌍 ${author} is a community contributor." + author_is_googler[${author}]=1 fi - return ${author_is_googler[$author]} + return "${author_is_googler[${author}]}" } googler_issues=0 @@ -70,27 +71,27 @@ jobs: non_googler_prs=0 echo "🔎 Fetching issues and pull requests..." - ITEMS_JSON=$(gh search issues --repo "$REPO" "created:>$START_DATE" --json author,isPullRequest --limit 1000) + ITEMS_JSON="$(gh search issues --repo "${REPO}" "created:>${START_DATE}" --json author,isPullRequest --limit 1000)" for row in $(echo "${ITEMS_JSON}" | jq -r '.[] | @base64'); do _jq() { - echo ${row} | base64 --decode | jq -r ${1} + echo "${row}" | base64 --decode | jq -r "${1}" } - author=$(_jq '.author.login') - is_pr=$(_jq '.isPullRequest') + author="$(_jq '.author.login')" + is_pr="$(_jq '.isPullRequest')" - if [[ -z "$author" || "$author" == "null" ]]; then + if [[ -z "${author}" || "${author}" == "null" ]]; then continue fi - if check_googler_status "$author"; then - if [[ "$is_pr" == "true" ]]; then + if check_googler_status "${author}"; then + if [[ "${is_pr}" == "true" ]]; then ((googler_prs++)) else ((googler_issues++)) fi else - if [[ "$is_pr" == "true" ]]; then + if [[ "${is_pr}" == "true" ]]; then ((non_googler_prs++)) else ((non_googler_issues++)) @@ -114,19 +115,19 @@ jobs: } } }''' - DISCUSSIONS_JSON=$(gh api graphql -f q="repo:$REPO created:>$START_DATE" -f query="$DISCUSSION_QUERY") + DISCUSSIONS_JSON="$(gh api graphql -f q="repo:${REPO} created:>${START_DATE}" -f query="${DISCUSSION_QUERY}")" for row in $(echo "${DISCUSSIONS_JSON}" | jq -r '.data.search.nodes[] | @base64'); do _jq() { - echo ${row} | base64 --decode | jq -r ${1} + echo "${row}" | base64 --decode | jq -r "${1}" } - author=$(_jq '.author.login') + author="$(_jq '.author.login')" - if [[ -z "$author" || "$author" == "null" ]]; then + if [[ -z "${author}" || "${author}" == "null" ]]; then continue fi - if check_googler_status "$author"; then + if check_googler_status "${author}"; then ((googler_discussions++)) else ((non_googler_discussions++)) @@ -134,7 +135,6 @@ jobs: done echo "✍️ Generating report content..." - REPORT_TITLE="Community Contribution Report: $START_DATE to $END_DATE" TOTAL_ISSUES=$((googler_issues + non_googler_issues)) TOTAL_PRS=$((googler_prs + non_googler_prs)) TOTAL_DISCUSSIONS=$((googler_discussions + non_googler_discussions)) @@ -142,7 +142,7 @@ jobs: REPORT_BODY=$(cat <> $GITHUB_OUTPUT - echo "$REPORT_BODY" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT + echo "report_body<> "${GITHUB_OUTPUT}" + echo "${REPORT_BODY}" >> "${GITHUB_OUTPUT}" + echo "EOF" >> "${GITHUB_OUTPUT}" echo "📊 Community Contribution Report:" - echo "$REPORT_BODY" + echo "${REPORT_BODY}" - - name: 🤖 Get Insights from Report - if: steps.report.outputs.report_body != '' - uses: google-gemini/gemini-cli-action@df3f890f003d28c60a2a09d2c29e0126e4d1e2ff + - name: '🤖 Get Insights from Report' + if: |- + ${{ steps.report.outputs.report_body != '' }} + uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0 env: - GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' + REPOSITORY: '${{ github.repository }}' with: - version: 0.1.8-rc.0 - GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} - OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }} - OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }} - settings_json: | + gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}' + gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}' + gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}' + gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}' + gemini_api_key: '${{ secrets.GEMINI_API_KEY }}' + use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}' + use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}' + settings: |- { "coreTools": [ "run_shell_command(gh issue list)", @@ -180,7 +185,7 @@ jobs: "run_shell_command(gh search prs)" ] } - prompt: | + prompt: |- You are a helpful assistant that analyzes community contribution reports. Based on the following report, please provide a brief summary and highlight any interesting trends or potential areas for improvement. diff --git a/docs/npm.md b/docs/npm.md index ed99f0b8..466e25d0 100644 --- a/docs/npm.md +++ b/docs/npm.md @@ -58,7 +58,7 @@ To install the latest nightly build, use the `@nightly` tag: npm install -g @google/gemini-cli@nightly ``` -We also run a Google cloud build called [release-docker.yml](../.gcp/release-docker.yaml). Which publishes the sandbox docker to match your release. This will also be moved to GH and combined with the main release file once service account permissions are sorted out. +We also run a Google cloud build called [release-docker.yml](../.gcp/release-docker.yml). Which publishes the sandbox docker to match your release. This will also be moved to GH and combined with the main release file once service account permissions are sorted out. ### After the Release From b655d8f06208e5989161f2d20f20c10a9ef7f36c Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 22:50:18 -0400 Subject: [PATCH 19/45] chore(ci): Ensure triage workflows are consistent (#6108) --- .../gemini-automated-issue-triage.yml | 71 ++++++------ .../gemini-scheduled-issue-triage.yml | 101 +++++++++--------- .../workflows/gemini-scheduled-pr-triage.yml | 45 ++++---- 3 files changed, 111 insertions(+), 106 deletions(-) diff --git a/.github/workflows/gemini-automated-issue-triage.yml b/.github/workflows/gemini-automated-issue-triage.yml index 950d11e8..dcb33fbb 100644 --- a/.github/workflows/gemini-automated-issue-triage.yml +++ b/.github/workflows/gemini-automated-issue-triage.yml @@ -32,7 +32,7 @@ permissions: jobs: triage-issue: - if: > + if: |- github.repository == 'google-gemini/gemini-cli' && (github.event_name == 'issues' || github.event_name == 'workflow_dispatch' || @@ -45,18 +45,18 @@ jobs: runs-on: 'ubuntu-latest' steps: - - name: 'Checkout repository' - uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - name: 'Generate GitHub App Token' id: 'generate_token' - uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: app-id: '${{ secrets.APP_ID }}' private-key: '${{ secrets.PRIVATE_KEY }}' - name: 'Run Gemini Issue Triage' - uses: 'google-github-actions/run-gemini-cli@20351b5ea2b4179431f1ae8918a246a0808f8747' + uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0 id: 'gemini_issue_triage' env: GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' @@ -94,13 +94,13 @@ jobs: ## Steps - 1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels. + 1. Run: `gh label list --repo "${REPOSITORY}" --limit 100` to get all available labels. 2. Review the issue title and body provided in the environment variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}". 3. Ignore any existing priorities or tags on the issue. Just report your findings. - 4. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case. - 6. Apply the selected labels to this issue using: `gh issue edit ${{ github.event.issue.number }} --repo ${{ github.repository }} --add-label "label1,label2"`. + 4. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case. + 6. Apply the selected labels to this issue using: `gh issue edit "${ISSUE_NUMBER}" --repo "${REPOSITORY}" --add-label "label1,label2"`. 7. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 for anything more than 6 versions older than the most recent should add the status/need-retesting label. - 8. If you see that the issue doesn’t look like it has sufficient information recommend the status/need-information label. + 8. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label. 9. Use Area definitions mentioned below to help you narrow down issues. ## Guidelines @@ -112,7 +112,7 @@ jobs: - Apply only one kind/ label. - Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these. - Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario. - Categorization Guidelines: + Categorization Guidelines: P0: Critical / Blocker - A P0 bug is a catastrophic failure that demands immediate attention. It represents a complete showstopper for a significant portion of users or for the development process itself. Impact: @@ -150,16 +150,16 @@ jobs: - If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue - This product is designed to use different models eg.. using pro, downgrading to flash etc. when users report that they dont expect the model to change those would be categorized as feature requests. Definition of Areas - area/ux: + area/ux: - Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance. - - I am seeing my screen flicker when using Gemini CLI - - I am seeing the output malformed - - Theme changes aren't taking effect + - I am seeing my screen flicker when using Gemini CLI + - I am seeing the output malformed + - Theme changes aren't taking effect - My keyboard inputs arent' being recognzied - area/platform: + area/platform: - Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework. area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features. - area/models: + area/models: - i am not getting a response that is reasonable or expected. this can include things like - I am calling a tool and the tool is not performing as expected. - i am expecting a tool to be called and it is not getting called , @@ -173,10 +173,10 @@ jobs: - Memory compression - unexpected responses, - poor quality of generated code - area/tools: - - These are primarily issues related to Model Context Protocol - - These are issues that mention MCP support - - feature requests asking for support for new tools. + area/tools: + - These are primarily issues related to Model Context Protocol + - These are issues that mention MCP support + - feature requests asking for support for new tools. area/core: Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality area/contribution: Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure. area/authentication: Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc.. @@ -192,19 +192,22 @@ jobs: - name: 'Post Issue Triage Failure Comment' if: |- ${{ failure() && steps.gemini_issue_triage.outcome == 'failure' }} - uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' + uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7 + env: + REPOSITORY: '${{ github.repository }}' + RUN_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}' with: github-token: '${{ steps.generate_token.outputs.token }}' script: |- github.rest.issues.createComment({ - owner: '${{ github.repository }}'.split('/')[0], - repo: '${{ github.repository }}'.split('/')[1], + owner: process.env.REPOSITORY.split('/')[0], + repo: process.env.REPOSITORY.split('/')[1], issue_number: '${{ github.event.issue.number }}', - body: 'There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.' + body: `There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${process.env.RUN_URL}) for details.` }) deduplicate-issues: - if: > + if: |- github.repository == 'google-gemini/gemini-cli' && vars.TRIAGE_DEDUPLICATE_ISSUES != '' && (github.event_name == 'issues' || @@ -218,25 +221,25 @@ jobs: timeout-minutes: 20 runs-on: 'ubuntu-latest' steps: - - name: 'Checkout repository' - uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - name: 'Generate GitHub App Token' id: 'generate_token' - uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: app-id: '${{ secrets.APP_ID }}' private-key: '${{ secrets.PRIVATE_KEY }}' - - name: Log in to GitHub Container Registry - uses: docker/login-action@v3 + - name: 'Log in to GitHub Container Registry' + uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3 with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + registry: 'ghcr.io' + username: '${{ github.actor }}' + password: '${{ secrets.GITHUB_TOKEN }}' - name: 'Run Gemini Issue Deduplication' - uses: 'google-github-actions/run-gemini-cli@20351b5ea2b4179431f1ae8918a246a0808f8747' + uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0 id: 'gemini_issue_deduplication' env: GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' diff --git a/.github/workflows/gemini-scheduled-issue-triage.yml b/.github/workflows/gemini-scheduled-issue-triage.yml index c6553706..3005c738 100644 --- a/.github/workflows/gemini-scheduled-issue-triage.yml +++ b/.github/workflows/gemini-scheduled-issue-triage.yml @@ -23,16 +23,16 @@ permissions: jobs: triage-issues: timeout-minutes: 10 - if: ${{ github.repository == 'google-gemini/gemini-cli' }} + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} runs-on: 'ubuntu-latest' - steps: - - name: 'Checkout repository' - uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - name: 'Generate GitHub App Token' id: 'generate_token' - uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2 + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: app-id: '${{ secrets.APP_ID }}' private-key: '${{ secrets.PRIVATE_KEY }}' @@ -42,7 +42,6 @@ jobs: env: GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' GITHUB_REPOSITORY: '${{ github.repository }}' - GITHUB_OUTPUT: '${{ github.output }}' run: |- set -euo pipefail @@ -66,7 +65,7 @@ jobs: - name: 'Run Gemini Issue Triage' if: |- ${{ steps.find_issues.outputs.issues_to_triage != '[]' }} - uses: 'google-github-actions/run-gemini-cli@20351b5ea2b4179431f1ae8918a246a0808f8747' + uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0 id: 'gemini_issue_triage' env: GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' @@ -104,28 +103,28 @@ jobs: ## Steps - 1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels. + 1. Run: `gh label list --repo "${REPOSITORY}" --limit 100` to get all available labels. 2. Check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues) 3. Review the issue title, body and any comments provided in the environment variables. 4. Ignore any existing priorities or tags on the issue. - 5. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. - 6. Get the list of labels already on the issue using `gh issue view ISSUE_NUMBER --repo ${{ github.repository }} --json labels -t '{{range .labels}}{{.name}}{{"\n"}}{{end}}' - 7. For area/* and kind/* limit yourself to only the single most applicable label in each case. + 5. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. + 6. Get the list of labels already on the issue using `gh issue view ISSUE_NUMBER --repo "${REPOSITORY}" --json labels -t '{{range .labels}}{{.name}}{{"\n"}}{{end}}' + 7. For area/* and kind/* limit yourself to only the single most applicable label in each case. 8. Give me a single short paragraph about why you are selecting each label in the process. use the format Issue ID: , Title, Label applied:, Label removed, ovearll explanation 9. Parse the JSON array from step 2 and for EACH INDIVIDUAL issue, apply appropriate labels using separate commands: - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label1"` - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label2"` + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --add-label "label1"` + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --add-label "label2"` - Continue for each label separately - IMPORTANT: Label each issue individually, one command per issue, one label at a time if needed. - - Make sure after you apply labels there is only one area/* and one kind/* label per issue. - - To do this look for labels found in step 6 that no longer apply remove them one at a time using - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name1"` - - `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name2"` + - Make sure after you apply labels there is only one area/* and one kind/* label per issue. + - To do this look for labels found in step 6 that no longer apply remove them one at a time using + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --remove-label "label-name1"` + - `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --remove-label "label-name2"` - IMPORTANT: Remove each label one at a time, one command per issue if needed. - 10. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 + 10. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 - Anything more than 6 versions older than the most recent should add the status/need-retesting label - 11. If you see that the issue doesn’t look like it has sufficient information recommend the status/need-information label - - After applying appropriate labels to an issue, remove the "status/need-triage" label if present: `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "status/need-triage"` + 11. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label + - After applying appropriate labels to an issue, remove the "status/need-triage" label if present: `gh issue edit ISSUE_NUMBER --repo "${REPOSITORY}" --remove-label "status/need-triage"` - Execute one `gh issue edit` command per issue, wait for success before proceeding to the next Process each issue sequentially and confirm each labeling operation before moving to the next issue. @@ -136,22 +135,22 @@ jobs: - Do not remove labels titled help wanted or good first issue. - Triage only the current issue. - Apply only one area/ label - - Apply only one kind/ label (Do not apply kind/duplicate or kind/parent-issue) + - Apply only one kind/ label (Do not apply kind/duplicate or kind/parent-issue) - Apply all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these. - Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario. - Categorization Guidelines: + Categorization Guidelines: P0: Critical / Blocker - A P0 bug is a catastrophic failure that demands immediate attention. It represents a complete showstopper for a significant portion of users or for the development process itself. Impact: - Blocks development or testing for the entire team. - Major security vulnerability that could compromise user data or system integrity. - Causes data loss or corruption with no workaround. - - Crashes the application or makes a core feature completely unusable for all or most users in a production environment. Will it cause severe quality degration? + - Crashes the application or makes a core feature completely unusable for all or most users in a production environment. Will it cause severe quality degration? - Is it preventing contributors from contributing to the repository or is it a release blocker? Qualifier: Is the main function of the software broken? Example: The gemini auth login command fails with an unrecoverable error, preventing any user from authenticating and using the rest of the CLI. P1: High - - A P1 bug is a serious issue that significantly degrades the user experience or impacts a core feature. While not a complete blocker, it's a major problem that needs a fast resolution. + - A P1 bug is a serious issue that significantly degrades the user experience or impacts a core feature. While not a complete blocker, it's a major problem that needs a fast resolution. - Feature requests are almost never P1. Impact: - A core feature is broken or behaving incorrectly for a large number of users or large number of use cases. @@ -175,21 +174,21 @@ jobs: - An edge-case bug that is very difficult to reproduce and affects a tiny fraction of users. Qualifier: Is it a "nice-to-fix" issue? Example: Spelling mistakes etc. - Additional Context: + Additional Context: - If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue - - This product is designed to use different models eg.. using pro, downgrading to flash etc. + - This product is designed to use different models eg.. using pro, downgrading to flash etc. - When users report that they dont expect the model to change those would be categorized as feature requests. Definition of Areas - area/ux: + area/ux: - Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance. - - I am seeing my screen flicker when using Gemini CLI - - I am seeing the output malformed - - Theme changes aren't taking effect + - I am seeing my screen flicker when using Gemini CLI + - I am seeing the output malformed + - Theme changes aren't taking effect - My keyboard inputs arent' being recognzied - area/platform: + area/platform: - Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework. area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features. - area/models: + area/models: - i am not getting a response that is reasonable or expected. this can include things like - I am calling a tool and the tool is not performing as expected. - i am expecting a tool to be called and it is not getting called , @@ -203,21 +202,21 @@ jobs: - Memory compression - unexpected responses, - poor quality of generated code - area/tools: - - These are primarily issues related to Model Context Protocol - - These are issues that mention MCP support - - feature requests asking for support for new tools. - area/core: + area/tools: + - These are primarily issues related to Model Context Protocol + - These are issues that mention MCP support + - feature requests asking for support for new tools. + area/core: - Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality - area/contribution: + area/contribution: - Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure. - area/authentication: + area/authentication: - Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc.. - area/security-privacy: + area/security-privacy: - Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access. - area/extensibility: + area/extensibility: - Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc.. - area/performance: + area/performance: - Issues focused on model performance - Issues with running out of capacity, - 429 errors etc.. @@ -231,25 +230,25 @@ jobs: timeout-minutes: 20 runs-on: 'ubuntu-latest' steps: - - name: 'Checkout repository' - uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - name: 'Generate GitHub App Token' id: 'generate_token' - uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: app-id: '${{ secrets.APP_ID }}' private-key: '${{ secrets.PRIVATE_KEY }}' - - name: Log in to GitHub Container Registry - uses: docker/login-action@v3 + - name: 'Log in to GitHub Container Registry' + uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3 with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + registry: 'ghcr.io' + username: '${{ github.actor }}' + password: '${{ secrets.GITHUB_TOKEN }}' - name: 'Run Gemini Issue Deduplication Refresh' - uses: 'google-github-actions/run-gemini-cli@20351b5ea2b4179431f1ae8918a246a0808f8747' + uses: 'google-github-actions/run-gemini-cli@06123c6a203eb7a964ce3be7c48479cc66059f23' # ratchet:google-github-actions/run-gemini-cli@v0 id: 'gemini_refresh_embeddings' env: GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' diff --git a/.github/workflows/gemini-scheduled-pr-triage.yml b/.github/workflows/gemini-scheduled-pr-triage.yml index dc2228bc..d47c9672 100644 --- a/.github/workflows/gemini-scheduled-pr-triage.yml +++ b/.github/workflows/gemini-scheduled-pr-triage.yml @@ -1,36 +1,39 @@ -name: Gemini Scheduled PR Triage 🚀 +name: 'Gemini Scheduled PR Triage 🚀' on: schedule: - cron: '*/15 * * * *' # Runs every 15 minutes - workflow_dispatch: {} + workflow_dispatch: jobs: audit-prs: timeout-minutes: 15 - if: ${{ github.repository == 'google-gemini/gemini-cli' }} + if: |- + ${{ github.repository == 'google-gemini/gemini-cli' }} permissions: - contents: read - id-token: write - issues: write - pull-requests: write - runs-on: ubuntu-latest + contents: 'read' + id-token: 'write' + issues: 'write' + pull-requests: 'write' + runs-on: 'ubuntu-latest' outputs: - prs_needing_comment: ${{ steps.run_triage.outputs.prs_needing_comment }} + prs_needing_comment: '${{ steps.run_triage.outputs.prs_needing_comment }}' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Generate GitHub App Token - id: generate_token - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2 + - name: 'Generate GitHub App Token' + id: 'generate_token' + uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2 with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.PRIVATE_KEY }} + app-id: '${{ secrets.APP_ID }}' + private-key: '${{ secrets.PRIVATE_KEY }}' - - name: Run PR Triage Script - id: run_triage + - name: 'Run PR Triage Script' + id: 'run_triage' + shell: 'bash' env: - GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} - GITHUB_REPOSITORY: ${{ github.repository }} - run: ./.github/scripts/pr-triage.sh + GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}' + GITHUB_REPOSITORY: '${{ github.repository }}' + run: |- + ./.github/scripts/pr-triage.sh From 214800cfc69082db7e28e8f8104b7bdec9a4c152 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 22:56:49 -0400 Subject: [PATCH 20/45] chore(ci): Ensure ci workflows is consistent and not vulnerable to injection attacks (#6109) --- .github/workflows/ci.yml | 170 ++++++++++++++++++++++----------------- 1 file changed, 95 insertions(+), 75 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5569b2f9..1945fa4f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,14 +1,20 @@ -# .github/workflows/ci.yml - -name: Gemini CLI CI +name: 'Gemini CLI CI' on: push: - branches: [main, release] + branches: + - 'main' + - 'release' pull_request: - branches: [main, release] + branches: + - 'main' + - 'release' merge_group: +concurrency: + group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}' + cancel-in-progress: ${{ github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/heads/release/') }} + jobs: lint: name: Lint @@ -51,112 +57,126 @@ jobs: run: npm run typecheck test: - name: Test - runs-on: ${{ matrix.os }} - needs: lint + name: 'Test' + runs-on: '${{ matrix.os }}' + needs: 'lint' permissions: - contents: read - checks: write - pull-requests: write + contents: 'read' + checks: 'write' + pull-requests: 'write' strategy: fail-fast: false # So we can see all test failures matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - node-version: [20.x, 22.x, 24.x] + os: + - 'macos-latest' + - 'ubuntu-latest' + - 'windows-latest' + node-version: + - '20.x' + - '22.x' + - '24.x' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Set up Node.js ${{ matrix.node-version }} - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Set up Node.js ${{ matrix.node-version }}' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4 with: - node-version: ${{ matrix.node-version }} + node-version: '${{ matrix.node-version }}' cache: 'npm' - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Install dependencies for testing - run: npm ci # Install fresh dependencies using the downloaded package-lock.json + - name: 'Install dependencies for testing' + run: |- + npm ci - - name: Run tests and generate reports - run: npm run test:ci + - name: 'Run tests and generate reports' env: NO_COLOR: true + run: 'npm run test:ci' - - name: Publish Test Report (for non-forks) - if: always() && (github.event.pull_request.head.repo.full_name == github.repository) - uses: dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3 # v2 + - name: 'Publish Test Report (for non-forks)' + if: |- + ${{ always() && (github.event.pull_request.head.repo.full_name == github.repository) }} + uses: 'dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3' # ratchet:dorny/test-reporter@v2 with: - name: Test Results (Node ${{ matrix.node-version }}) - path: packages/*/junit.xml - reporter: java-junit + name: 'Test Results (Node ${{ matrix.node-version }})' + path: 'packages/*/junit.xml' + reporter: 'java-junit' fail-on-error: 'false' - - name: Upload Test Results Artifact (for forks) - if: always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + - name: 'Upload Test Results Artifact (for forks)' + if: |- + ${{ always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) }} + uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4 with: - name: test-results-fork-${{ matrix.node-version }}-${{ matrix.os }} - path: packages/*/junit.xml + name: 'test-results-fork-${{ matrix.node-version }}-${{ matrix.os }}' + path: 'packages/*/junit.xml' - - name: Upload coverage reports - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 - if: always() + - name: 'Upload coverage reports' + if: |- + ${{ always() }} + uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4 with: - name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }} - path: packages/*/coverage + name: 'coverage-reports-${{ matrix.node-version }}-${{ matrix.os }}' + path: 'packages/*/coverage' post_coverage_comment: - name: Post Coverage Comment - runs-on: ubuntu-latest - needs: test - if: always() && github.event_name == 'pull_request' && (github.event.pull_request.head.repo.full_name == github.repository) + name: 'Post Coverage Comment' + runs-on: 'ubuntu-latest' + needs: 'test' + if: |- + ${{ always() && github.event_name == 'pull_request' && (github.event.pull_request.head.repo.full_name == github.repository) }} continue-on-error: true permissions: - contents: read # For checkout - pull-requests: write # For commenting + contents: 'read' # For checkout + pull-requests: 'write' # For commenting strategy: matrix: # Reduce noise by only posting the comment once - os: [ubuntu-latest] - node-version: [22.x] + os: + - 'ubuntu-latest' + node-version: + - '22.x' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Download coverage reports artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + - name: 'Download coverage reports artifact' + uses: 'actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0' # ratchet:actions/download-artifact@v5 with: - name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }} - path: coverage_artifact # Download to a specific directory + name: 'coverage-reports-${{ matrix.node-version }}-${{ matrix.os }}' + path: 'coverage_artifact' # Download to a specific directory - - name: Post Coverage Comment using Composite Action - uses: ./.github/actions/post-coverage-comment # Path to the composite action directory + - name: 'Post Coverage Comment using Composite Action' + uses: './.github/actions/post-coverage-comment' # Path to the composite action directory with: - cli_json_file: coverage_artifact/cli/coverage/coverage-summary.json - core_json_file: coverage_artifact/core/coverage/coverage-summary.json - cli_full_text_summary_file: coverage_artifact/cli/coverage/full-text-summary.txt - core_full_text_summary_file: coverage_artifact/core/coverage/full-text-summary.txt - node_version: ${{ matrix.node-version }} - os: ${{ matrix.os }} - github_token: ${{ secrets.GITHUB_TOKEN }} + cli_json_file: 'coverage_artifact/cli/coverage/coverage-summary.json' + core_json_file: 'coverage_artifact/core/coverage/coverage-summary.json' + cli_full_text_summary_file: 'coverage_artifact/cli/coverage/full-text-summary.txt' + core_full_text_summary_file: 'coverage_artifact/core/coverage/full-text-summary.txt' + node_version: '${{ matrix.node-version }}' + os: '${{ matrix.os }}' + github_token: '${{ secrets.GITHUB_TOKEN }}' codeql: - name: CodeQL - runs-on: ubuntu-latest + name: 'CodeQL' + runs-on: 'ubuntu-latest' permissions: - actions: read - contents: read - security-events: write + actions: 'read' + contents: 'read' + security-events: 'write' steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 - - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3 + - name: 'Initialize CodeQL' + uses: 'github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2' # ratchet:github/codeql-action/init@v3 with: - languages: javascript + languages: 'javascript' - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3 + - name: 'Perform CodeQL Analysis' + uses: 'github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2' # ratchet:github/codeql-action/analyze@v3 From 4074e8e6ec3985f3d9a7b53f3497d38a0ba6743e Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 12 Aug 2025 23:24:39 -0400 Subject: [PATCH 21/45] chore(ci): fix yamllint issues (#6112) --- .gcp/release-docker.yml | 14 +++---- .gemini/config.yaml | 2 +- .github/ISSUE_TEMPLATE/bug_report.yml | 48 +++++++++++----------- .github/ISSUE_TEMPLATE/feature_request.yml | 34 +++++++-------- .github/workflows/docs-page-action.yml | 2 +- 5 files changed, 52 insertions(+), 48 deletions(-) diff --git a/.gcp/release-docker.yml b/.gcp/release-docker.yml index 59220b8d..f413da5b 100644 --- a/.gcp/release-docker.yml +++ b/.gcp/release-docker.yml @@ -22,8 +22,8 @@ steps: id: 'Determine Docker Image Tag' entrypoint: 'bash' args: - - -c - - | + - '-c' + - |- SHELL_TAG_NAME="$TAG_NAME" FINAL_TAG="$SHORT_SHA" # Default to SHA if [[ "$$SHELL_TAG_NAME" == *"-nightly"* ]]; then @@ -44,8 +44,8 @@ steps: id: 'Build sandbox Docker image' entrypoint: 'bash' args: - - -c - - | + - '-c' + - |- export GEMINI_SANDBOX_IMAGE_TAG=$$(cat /workspace/image_tag.txt) echo "Using Docker image tag for build: $$GEMINI_SANDBOX_IMAGE_TAG" npm run build:sandbox -- --output-file /workspace/final_image_uri.txt @@ -57,8 +57,8 @@ steps: id: 'Publish sandbox Docker image' entrypoint: 'bash' args: - - -c - - | + - '-c' + - |- set -e FINAL_IMAGE_URI=$$(cat /workspace/final_image_uri.txt) @@ -68,7 +68,7 @@ steps: - 'GEMINI_SANDBOX=$_CONTAINER_TOOL' options: - defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET + defaultLogsBucketBehavior: 'REGIONAL_USER_OWNED_BUCKET' dynamicSubstitutions: true substitutions: diff --git a/.gemini/config.yaml b/.gemini/config.yaml index 781e7690..cbfb0c80 100644 --- a/.gemini/config.yaml +++ b/.gemini/config.yaml @@ -3,7 +3,7 @@ have_fun: false code_review: disable: false - comment_severity_threshold: HIGH + comment_severity_threshold: 'HIGH' max_review_comments: -1 pull_request_opened: help: false diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index cb6a456a..1780f945 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,8 +1,10 @@ -name: Bug Report -description: Report a bug to help us improve Gemini CLI -labels: ['kind/bug', 'status/need-triage'] +name: 'Bug Report' +description: 'Report a bug to help us improve Gemini CLI' +labels: + - 'kind/bug' + - 'status/need-triage' body: - - type: markdown + - type: 'markdown' attributes: value: |- > [!IMPORTANT] @@ -10,26 +12,26 @@ body: > > Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if an issue already exists for the bug you encountered. - - type: textarea - id: problem + - type: 'textarea' + id: 'problem' attributes: - label: What happened? - description: A clear and concise description of what the bug is. + label: 'What happened?' + description: 'A clear and concise description of what the bug is.' validations: required: true - - type: textarea - id: expected + - type: 'textarea' + id: 'expected' attributes: - label: What did you expect to happen? + label: 'What did you expect to happen?' validations: required: true - - type: textarea - id: info + - type: 'textarea' + id: 'info' attributes: - label: Client information - description: Please paste the full text from the `/about` command run from Gemini CLI. Also include which platform (macOS, Windows, Linux). + label: 'Client information' + description: 'Please paste the full text from the `/about` command run from Gemini CLI. Also include which platform (macOS, Windows, Linux).' value: |-
@@ -42,14 +44,14 @@ body: validations: required: true - - type: textarea - id: login-info + - type: 'textarea' + id: 'login-info' attributes: - label: Login information - description: Describe how you are logging in (e.g., Google Account, API key). + label: 'Login information' + description: 'Describe how you are logging in (e.g., Google Account, API key).' - - type: textarea - id: additional-context + - type: 'textarea' + id: 'additional-context' attributes: - label: Anything else we need to know? - description: Add any other context about the problem here. + label: 'Anything else we need to know?' + description: 'Add any other context about the problem here.' diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index bf4d4871..1f7e48d4 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,8 +1,10 @@ -name: Feature Request -description: Suggest an idea for this project -labels: ['kind/enhancement', 'status/need-triage'] +name: 'Feature Request' +description: 'Suggest an idea for this project' +labels: + - 'kind/enhancement' + - 'status/need-triage' body: - - type: markdown + - type: 'markdown' attributes: value: |- > [!IMPORTANT] @@ -10,24 +12,24 @@ body: > > Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if a similar feature has already been requested. - - type: textarea - id: feature + - type: 'textarea' + id: 'feature' attributes: - label: What would you like to be added? - description: A clear and concise description of the enhancement. + label: 'What would you like to be added?' + description: 'A clear and concise description of the enhancement.' validations: required: true - - type: textarea - id: rationale + - type: 'textarea' + id: 'rationale' attributes: - label: Why is this needed? - description: A clear and concise description of why this enhancement is needed. + label: 'Why is this needed?' + description: 'A clear and concise description of why this enhancement is needed.' validations: required: true - - type: textarea - id: additional-context + - type: 'textarea' + id: 'additional-context' attributes: - label: Additional context - description: Add any other context or screenshots about the feature request here. + label: 'Additional context' + description: 'Add any other context or screenshots about the feature request here.' diff --git a/.github/workflows/docs-page-action.yml b/.github/workflows/docs-page-action.yml index 59588109..2d485278 100644 --- a/.github/workflows/docs-page-action.yml +++ b/.github/workflows/docs-page-action.yml @@ -47,4 +47,4 @@ jobs: steps: - name: 'Deploy to GitHub Pages' id: 'deployment' - uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # ratchet:actions/deploy-pages@v4 + uses: 'actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e' # ratchet:actions/deploy-pages@v4 From 150103e5ddaa3d6790f7d64e86b0e0deed576ad8 Mon Sep 17 00:00:00 2001 From: Richie Foreman Date: Wed, 13 Aug 2025 11:42:38 -0400 Subject: [PATCH 22/45] chore(cli/slashcommands): Add status enum to SlashCommandEvent telemetry (#6023) --- .../ui/hooks/slashCommandProcessor.test.ts | 110 ++++++++++---- .../cli/src/ui/hooks/slashCommandProcessor.ts | 136 ++++++++++-------- packages/core/index.ts | 1 + .../clearcut-logger/clearcut-logger.ts | 7 + .../clearcut-logger/event-metadata-key.ts | 3 + packages/core/src/telemetry/index.ts | 2 + packages/core/src/telemetry/types.ts | 86 ++++++----- 7 files changed, 220 insertions(+), 125 deletions(-) diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts index 66c1b883..24880fc1 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts @@ -4,18 +4,17 @@ * SPDX-License-Identifier: Apache-2.0 */ -const { logSlashCommand, SlashCommandEvent } = vi.hoisted(() => ({ +const { logSlashCommand } = vi.hoisted(() => ({ logSlashCommand: vi.fn(), - SlashCommandEvent: vi.fn((command, subCommand) => ({ command, subCommand })), })); vi.mock('@google/gemini-cli-core', async (importOriginal) => { const original = await importOriginal(); + return { ...original, logSlashCommand, - SlashCommandEvent, getIdeInstaller: vi.fn().mockReturnValue(null), }; }); @@ -25,10 +24,10 @@ const { mockProcessExit } = vi.hoisted(() => ({ })); vi.mock('node:process', () => { - const mockProcess = { + const mockProcess: Partial = { exit: mockProcessExit, - platform: 'test-platform', - }; + platform: 'sunos', + } as unknown as NodeJS.Process; return { ...mockProcess, default: mockProcess, @@ -77,22 +76,28 @@ import { ConfirmShellCommandsActionReturn, SlashCommand, } from '../commands/types.js'; -import { Config, ToolConfirmationOutcome } from '@google/gemini-cli-core'; +import { ToolConfirmationOutcome } from '@google/gemini-cli-core'; import { LoadedSettings } from '../../config/settings.js'; import { MessageType } from '../types.js'; import { BuiltinCommandLoader } from '../../services/BuiltinCommandLoader.js'; import { FileCommandLoader } from '../../services/FileCommandLoader.js'; import { McpPromptLoader } from '../../services/McpPromptLoader.js'; +import { + SlashCommandStatus, + makeFakeConfig, +} from '@google/gemini-cli-core/index.js'; -const createTestCommand = ( +function createTestCommand( overrides: Partial, kind: CommandKind = CommandKind.BUILT_IN, -): SlashCommand => ({ - name: 'test', - description: 'a test command', - kind, - ...overrides, -}); +): SlashCommand { + return { + name: 'test', + description: 'a test command', + kind, + ...overrides, + }; +} describe('useSlashCommandProcessor', () => { const mockAddItem = vi.fn(); @@ -102,15 +107,7 @@ describe('useSlashCommandProcessor', () => { const mockOpenAuthDialog = vi.fn(); const mockSetQuittingMessages = vi.fn(); - const mockConfig = { - getProjectRoot: vi.fn(() => '/mock/cwd'), - getSessionId: vi.fn(() => 'test-session'), - getGeminiClient: vi.fn(() => ({ - setHistory: vi.fn().mockResolvedValue(undefined), - })), - getExtensions: vi.fn(() => []), - getIdeMode: vi.fn(() => false), - } as unknown as Config; + const mockConfig = makeFakeConfig({}); const mockSettings = {} as LoadedSettings; @@ -884,7 +881,9 @@ describe('useSlashCommandProcessor', () => { const loggingTestCommands: SlashCommand[] = [ createTestCommand({ name: 'logtest', - action: mockCommandAction, + action: vi + .fn() + .mockResolvedValue({ type: 'message', content: 'hello world' }), }), createTestCommand({ name: 'logwithsub', @@ -895,6 +894,10 @@ describe('useSlashCommandProcessor', () => { }), ], }), + createTestCommand({ + name: 'fail', + action: vi.fn().mockRejectedValue(new Error('oh no!')), + }), createTestCommand({ name: 'logalias', altNames: ['la'], @@ -905,7 +908,6 @@ describe('useSlashCommandProcessor', () => { beforeEach(() => { mockCommandAction.mockClear(); vi.mocked(logSlashCommand).mockClear(); - vi.mocked(SlashCommandEvent).mockClear(); }); it('should log a simple slash command', async () => { @@ -917,8 +919,45 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logtest'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logtest', undefined); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logtest', + subcommand: undefined, + status: SlashCommandStatus.SUCCESS, + }), + ); + }); + + it('logs nothing for a bogus command', async () => { + const result = setupProcessorHook(loggingTestCommands); + await waitFor(() => + expect(result.current.slashCommands.length).toBeGreaterThan(0), + ); + await act(async () => { + await result.current.handleSlashCommand('/bogusbogusbogus'); + }); + + expect(logSlashCommand).not.toHaveBeenCalled(); + }); + + it('logs a failure event for a failed command', async () => { + const result = setupProcessorHook(loggingTestCommands); + await waitFor(() => + expect(result.current.slashCommands.length).toBeGreaterThan(0), + ); + await act(async () => { + await result.current.handleSlashCommand('/fail'); + }); + + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'fail', + status: 'error', + subcommand: undefined, + }), + ); }); it('should log a slash command with a subcommand', async () => { @@ -930,8 +969,13 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logwithsub sub'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logwithsub', 'sub'); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logwithsub', + subcommand: 'sub', + }), + ); }); it('should log the command path when an alias is used', async () => { @@ -942,8 +986,12 @@ describe('useSlashCommandProcessor', () => { await act(async () => { await result.current.handleSlashCommand('/la'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logalias', undefined); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logalias', + }), + ); }); it('should not log for unknown commands', async () => { diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.ts index b4ce0d4d..aaa2fbff 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.ts @@ -14,7 +14,8 @@ import { GitService, Logger, logSlashCommand, - SlashCommandEvent, + makeSlashCommandEvent, + SlashCommandStatus, ToolConfirmationOutcome, } from '@google/gemini-cli-core'; import { useSessionStats } from '../contexts/SessionContext.js'; @@ -229,76 +230,70 @@ export const useSlashCommandProcessor = ( overwriteConfirmed?: boolean, ): Promise => { setIsProcessing(true); - try { - if (typeof rawQuery !== 'string') { - return false; + + if (typeof rawQuery !== 'string') { + return false; + } + + const trimmed = rawQuery.trim(); + if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { + return false; + } + + const userMessageTimestamp = Date.now(); + addItem({ type: MessageType.USER, text: trimmed }, userMessageTimestamp); + + const parts = trimmed.substring(1).trim().split(/\s+/); + const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] + + let currentCommands = commands; + let commandToExecute: SlashCommand | undefined; + let pathIndex = 0; + let hasError = false; + const canonicalPath: string[] = []; + + for (const part of commandPath) { + // TODO: For better performance and architectural clarity, this two-pass + // search could be replaced. A more optimal approach would be to + // pre-compute a single lookup map in `CommandService.ts` that resolves + // all name and alias conflicts during the initial loading phase. The + // processor would then perform a single, fast lookup on that map. + + // First pass: check for an exact match on the primary command name. + let foundCommand = currentCommands.find((cmd) => cmd.name === part); + + // Second pass: if no primary name matches, check for an alias. + if (!foundCommand) { + foundCommand = currentCommands.find((cmd) => + cmd.altNames?.includes(part), + ); } - const trimmed = rawQuery.trim(); - if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { - return false; - } - - const userMessageTimestamp = Date.now(); - addItem( - { type: MessageType.USER, text: trimmed }, - userMessageTimestamp, - ); - - const parts = trimmed.substring(1).trim().split(/\s+/); - const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] - - let currentCommands = commands; - let commandToExecute: SlashCommand | undefined; - let pathIndex = 0; - const canonicalPath: string[] = []; - - for (const part of commandPath) { - // TODO: For better performance and architectural clarity, this two-pass - // search could be replaced. A more optimal approach would be to - // pre-compute a single lookup map in `CommandService.ts` that resolves - // all name and alias conflicts during the initial loading phase. The - // processor would then perform a single, fast lookup on that map. - - // First pass: check for an exact match on the primary command name. - let foundCommand = currentCommands.find((cmd) => cmd.name === part); - - // Second pass: if no primary name matches, check for an alias. - if (!foundCommand) { - foundCommand = currentCommands.find((cmd) => - cmd.altNames?.includes(part), - ); - } - - if (foundCommand) { - commandToExecute = foundCommand; - canonicalPath.push(foundCommand.name); - pathIndex++; - if (foundCommand.subCommands) { - currentCommands = foundCommand.subCommands; - } else { - break; - } + if (foundCommand) { + commandToExecute = foundCommand; + canonicalPath.push(foundCommand.name); + pathIndex++; + if (foundCommand.subCommands) { + currentCommands = foundCommand.subCommands; } else { break; } + } else { + break; } + } + const resolvedCommandPath = canonicalPath; + const subcommand = + resolvedCommandPath.length > 1 + ? resolvedCommandPath.slice(1).join(' ') + : undefined; + + try { if (commandToExecute) { const args = parts.slice(pathIndex).join(' '); if (commandToExecute.action) { - if (config) { - const resolvedCommandPath = canonicalPath; - const event = new SlashCommandEvent( - resolvedCommandPath[0], - resolvedCommandPath.length > 1 - ? resolvedCommandPath.slice(1).join(' ') - : undefined, - ); - logSlashCommand(config, event); - } - const fullCommandContext: CommandContext = { ...commandContext, invocation: { @@ -320,7 +315,6 @@ export const useSlashCommandProcessor = ( ]), }; } - const result = await commandToExecute.action( fullCommandContext, args, @@ -493,8 +487,18 @@ export const useSlashCommandProcessor = ( content: `Unknown command: ${trimmed}`, timestamp: new Date(), }); + return { type: 'handled' }; - } catch (e) { + } catch (e: unknown) { + hasError = true; + if (config) { + const event = makeSlashCommandEvent({ + command: resolvedCommandPath[0], + subcommand, + status: SlashCommandStatus.ERROR, + }); + logSlashCommand(config, event); + } addItem( { type: MessageType.ERROR, @@ -504,6 +508,14 @@ export const useSlashCommandProcessor = ( ); return { type: 'handled' }; } finally { + if (config && resolvedCommandPath[0] && !hasError) { + const event = makeSlashCommandEvent({ + command: resolvedCommandPath[0], + subcommand, + status: SlashCommandStatus.SUCCESS, + }); + logSlashCommand(config, event); + } setIsProcessing(false); } }, diff --git a/packages/core/index.ts b/packages/core/index.ts index 65a214ae..7b75b365 100644 --- a/packages/core/index.ts +++ b/packages/core/index.ts @@ -15,3 +15,4 @@ export { IdeConnectionEvent, IdeConnectionType, } from './src/telemetry/types.js'; +export { makeFakeConfig } from './src/test-utils/config.js'; diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index a41f832d..0c13e864 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -637,6 +637,13 @@ export class ClearcutLogger { }); } + if (event.status) { + data.push({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_STATUS, + value: JSON.stringify(event.status), + }); + } + this.enqueueLogEvent(this.createLogEvent(slash_command_event_name, data)); this.flushIfNeeded(); } diff --git a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts index 81f41603..9dae3e0d 100644 --- a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts +++ b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts @@ -174,6 +174,9 @@ export enum EventMetadataKey { // Logs the subcommand of the slash command. GEMINI_CLI_SLASH_COMMAND_SUBCOMMAND = 42, + // Logs the status of the slash command (e.g. 'success', 'error') + GEMINI_CLI_SLASH_COMMAND_STATUS = 51, + // ========================================================================== // Next Speaker Check Event Keys // =========================================================================== diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts index 6648b229..1663abdf 100644 --- a/packages/core/src/telemetry/index.ts +++ b/packages/core/src/telemetry/index.ts @@ -39,6 +39,8 @@ export { TelemetryEvent, FlashFallbackEvent, SlashCommandEvent, + makeSlashCommandEvent, + SlashCommandStatus, } from './types.js'; export { SpanStatusCode, ValueType } from '@opentelemetry/api'; export { SemanticAttributes } from '@opentelemetry/semantic-conventions'; diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index edcd8b1b..d590699c 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -14,9 +14,17 @@ import { ToolCallDecision, } from './tool-call-decision.js'; -export class StartSessionEvent { +interface BaseTelemetryEvent { + 'event.name': string; + /** Current timestamp in ISO 8601 format */ + 'event.timestamp': string; +} + +type CommonFields = keyof BaseTelemetryEvent; + +export class StartSessionEvent implements BaseTelemetryEvent { 'event.name': 'cli_config'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; embedding_model: string; sandbox_enabled: boolean; @@ -60,9 +68,9 @@ export class StartSessionEvent { } } -export class EndSessionEvent { +export class EndSessionEvent implements BaseTelemetryEvent { 'event.name': 'end_session'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; session_id?: string; constructor(config?: Config) { @@ -72,9 +80,9 @@ export class EndSessionEvent { } } -export class UserPromptEvent { +export class UserPromptEvent implements BaseTelemetryEvent { 'event.name': 'user_prompt'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; prompt_length: number; prompt_id: string; auth_type?: string; @@ -95,9 +103,9 @@ export class UserPromptEvent { } } -export class ToolCallEvent { +export class ToolCallEvent implements BaseTelemetryEvent { 'event.name': 'tool_call'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; function_name: string; function_args: Record; duration_ms: number; @@ -142,9 +150,9 @@ export class ToolCallEvent { } } -export class ApiRequestEvent { +export class ApiRequestEvent implements BaseTelemetryEvent { 'event.name': 'api_request'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; prompt_id: string; request_text?: string; @@ -158,9 +166,9 @@ export class ApiRequestEvent { } } -export class ApiErrorEvent { +export class ApiErrorEvent implements BaseTelemetryEvent { 'event.name': 'api_error'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; error: string; error_type?: string; @@ -190,9 +198,9 @@ export class ApiErrorEvent { } } -export class ApiResponseEvent { +export class ApiResponseEvent implements BaseTelemetryEvent { 'event.name': 'api_response'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; status_code?: number | string; duration_ms: number; @@ -234,9 +242,9 @@ export class ApiResponseEvent { } } -export class FlashFallbackEvent { +export class FlashFallbackEvent implements BaseTelemetryEvent { 'event.name': 'flash_fallback'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; auth_type: string; constructor(auth_type: string) { @@ -252,9 +260,9 @@ export enum LoopType { LLM_DETECTED_LOOP = 'llm_detected_loop', } -export class LoopDetectedEvent { +export class LoopDetectedEvent implements BaseTelemetryEvent { 'event.name': 'loop_detected'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; loop_type: LoopType; prompt_id: string; @@ -266,9 +274,9 @@ export class LoopDetectedEvent { } } -export class NextSpeakerCheckEvent { +export class NextSpeakerCheckEvent implements BaseTelemetryEvent { 'event.name': 'next_speaker_check'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; prompt_id: string; finish_reason: string; result: string; @@ -282,23 +290,36 @@ export class NextSpeakerCheckEvent { } } -export class SlashCommandEvent { +export interface SlashCommandEvent extends BaseTelemetryEvent { 'event.name': 'slash_command'; 'event.timestamp': string; // ISO 8106 command: string; subcommand?: string; - - constructor(command: string, subcommand?: string) { - this['event.name'] = 'slash_command'; - this['event.timestamp'] = new Date().toISOString(); - this.command = command; - this.subcommand = subcommand; - } + status?: SlashCommandStatus; } -export class MalformedJsonResponseEvent { +export function makeSlashCommandEvent({ + command, + subcommand, + status, +}: Omit): SlashCommandEvent { + return { + 'event.name': 'slash_command', + 'event.timestamp': new Date().toISOString(), + command, + subcommand, + status, + }; +} + +export enum SlashCommandStatus { + SUCCESS = 'success', + ERROR = 'error', +} + +export class MalformedJsonResponseEvent implements BaseTelemetryEvent { 'event.name': 'malformed_json_response'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; constructor(model: string) { @@ -315,7 +336,7 @@ export enum IdeConnectionType { export class IdeConnectionEvent { 'event.name': 'ide_connection'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; connection_type: IdeConnectionType; constructor(connection_type: IdeConnectionType) { @@ -338,4 +359,5 @@ export type TelemetryEvent = | NextSpeakerCheckEvent | SlashCommandEvent | MalformedJsonResponseEvent - | IdeConnectionEvent; + | IdeConnectionEvent + | SlashCommandEvent; From d3fda9dafb3921c9edd5cf4fc166dedecd91d84f Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Wed, 13 Aug 2025 12:58:26 -0300 Subject: [PATCH 23/45] Zed integration schema upgrade (#5536) Co-authored-by: Conrad Irwin Co-authored-by: Ben Brandt --- packages/cli/src/acp/acp.ts | 464 ------------- packages/cli/src/config/config.ts | 13 +- packages/cli/src/gemini.tsx | 6 +- .../cli/src/ui/hooks/useToolScheduler.test.ts | 4 +- packages/cli/src/zed-integration/acp.ts | 366 ++++++++++ packages/cli/src/zed-integration/schema.ts | 457 +++++++++++++ .../zedIntegration.ts} | 632 +++++++++++------- packages/core/src/config/config.ts | 11 +- .../core/src/core/coreToolScheduler.test.ts | 4 +- packages/core/src/test-utils/tools.ts | 4 +- packages/core/src/tools/edit.ts | 4 +- packages/core/src/tools/glob.ts | 4 +- packages/core/src/tools/grep.ts | 4 +- packages/core/src/tools/ls.ts | 4 +- packages/core/src/tools/mcp-tool.ts | 4 +- packages/core/src/tools/memoryTool.ts | 4 +- packages/core/src/tools/read-file.ts | 4 +- packages/core/src/tools/read-many-files.ts | 4 +- packages/core/src/tools/shell.ts | 4 +- packages/core/src/tools/tool-registry.ts | 4 +- packages/core/src/tools/tools.ts | 29 +- packages/core/src/tools/web-fetch.ts | 4 +- packages/core/src/tools/web-search.ts | 4 +- packages/core/src/tools/write-file.ts | 9 +- 24 files changed, 1293 insertions(+), 754 deletions(-) delete mode 100644 packages/cli/src/acp/acp.ts create mode 100644 packages/cli/src/zed-integration/acp.ts create mode 100644 packages/cli/src/zed-integration/schema.ts rename packages/cli/src/{acp/acpPeer.ts => zed-integration/zedIntegration.ts} (54%) diff --git a/packages/cli/src/acp/acp.ts b/packages/cli/src/acp/acp.ts deleted file mode 100644 index 1fbdf7a8..00000000 --- a/packages/cli/src/acp/acp.ts +++ /dev/null @@ -1,464 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -/* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */ - -import { Icon } from '@google/gemini-cli-core'; -import { WritableStream, ReadableStream } from 'node:stream/web'; - -export class ClientConnection implements Client { - #connection: Connection; - - constructor( - agent: (client: Client) => Agent, - input: WritableStream, - output: ReadableStream, - ) { - this.#connection = new Connection(agent(this), input, output); - } - - /** - * Streams part of an assistant response to the client - */ - async streamAssistantMessageChunk( - params: StreamAssistantMessageChunkParams, - ): Promise { - await this.#connection.sendRequest('streamAssistantMessageChunk', params); - } - - /** - * Request confirmation before running a tool - * - * When allowed, the client returns a [`ToolCallId`] which can be used - * to update the tool call's `status` and `content` as it runs. - */ - requestToolCallConfirmation( - params: RequestToolCallConfirmationParams, - ): Promise { - return this.#connection.sendRequest('requestToolCallConfirmation', params); - } - - /** - * pushToolCall allows the agent to start a tool call - * when it does not need to request permission to do so. - * - * The returned id can be used to update the UI for the tool - * call as needed. - */ - pushToolCall(params: PushToolCallParams): Promise { - return this.#connection.sendRequest('pushToolCall', params); - } - - /** - * updateToolCall allows the agent to update the content and status of the tool call. - * - * The new content replaces what is currently displayed in the UI. - * - * The [`ToolCallId`] is included in the response of - * `pushToolCall` or `requestToolCallConfirmation` respectively. - */ - async updateToolCall(params: UpdateToolCallParams): Promise { - await this.#connection.sendRequest('updateToolCall', params); - } -} - -type AnyMessage = AnyRequest | AnyResponse; - -type AnyRequest = { - id: number; - method: string; - params?: unknown; -}; - -type AnyResponse = { jsonrpc: '2.0'; id: number } & Result; - -type Result = - | { - result: T; - } - | { - error: ErrorResponse; - }; - -type ErrorResponse = { - code: number; - message: string; - data?: { details?: string }; -}; - -type PendingResponse = { - resolve: (response: unknown) => void; - reject: (error: ErrorResponse) => void; -}; - -class Connection { - #pendingResponses: Map = new Map(); - #nextRequestId: number = 0; - #delegate: D; - #peerInput: WritableStream; - #writeQueue: Promise = Promise.resolve(); - #textEncoder: TextEncoder; - - constructor( - delegate: D, - peerInput: WritableStream, - peerOutput: ReadableStream, - ) { - this.#peerInput = peerInput; - this.#textEncoder = new TextEncoder(); - - this.#delegate = delegate; - this.#receive(peerOutput); - } - - async #receive(output: ReadableStream) { - let content = ''; - const decoder = new TextDecoder(); - for await (const chunk of output) { - content += decoder.decode(chunk, { stream: true }); - const lines = content.split('\n'); - content = lines.pop() || ''; - - for (const line of lines) { - const trimmedLine = line.trim(); - - if (trimmedLine) { - const message = JSON.parse(trimmedLine); - this.#processMessage(message); - } - } - } - } - - async #processMessage(message: AnyMessage) { - if ('method' in message) { - const response = await this.#tryCallDelegateMethod( - message.method, - message.params, - ); - - await this.#sendMessage({ - jsonrpc: '2.0', - id: message.id, - ...response, - }); - } else { - this.#handleResponse(message); - } - } - - async #tryCallDelegateMethod( - method: string, - params?: unknown, - ): Promise> { - const methodName = method as keyof D; - if (typeof this.#delegate[methodName] !== 'function') { - return RequestError.methodNotFound(method).toResult(); - } - - try { - const result = await this.#delegate[methodName](params); - return { result: result ?? null }; - } catch (error: unknown) { - if (error instanceof RequestError) { - return error.toResult(); - } - - let details; - - if (error instanceof Error) { - details = error.message; - } else if ( - typeof error === 'object' && - error != null && - 'message' in error && - typeof error.message === 'string' - ) { - details = error.message; - } - - return RequestError.internalError(details).toResult(); - } - } - - #handleResponse(response: AnyResponse) { - const pendingResponse = this.#pendingResponses.get(response.id); - if (pendingResponse) { - if ('result' in response) { - pendingResponse.resolve(response.result); - } else if ('error' in response) { - pendingResponse.reject(response.error); - } - this.#pendingResponses.delete(response.id); - } - } - - async sendRequest(method: string, params?: Req): Promise { - const id = this.#nextRequestId++; - const responsePromise = new Promise((resolve, reject) => { - this.#pendingResponses.set(id, { resolve, reject }); - }); - await this.#sendMessage({ jsonrpc: '2.0', id, method, params }); - return responsePromise as Promise; - } - - async #sendMessage(json: AnyMessage) { - const content = JSON.stringify(json) + '\n'; - this.#writeQueue = this.#writeQueue - .then(async () => { - const writer = this.#peerInput.getWriter(); - try { - await writer.write(this.#textEncoder.encode(content)); - } finally { - writer.releaseLock(); - } - }) - .catch((error) => { - // Continue processing writes on error - console.error('ACP write error:', error); - }); - return this.#writeQueue; - } -} - -export class RequestError extends Error { - data?: { details?: string }; - - constructor( - public code: number, - message: string, - details?: string, - ) { - super(message); - this.name = 'RequestError'; - if (details) { - this.data = { details }; - } - } - - static parseError(details?: string): RequestError { - return new RequestError(-32700, 'Parse error', details); - } - - static invalidRequest(details?: string): RequestError { - return new RequestError(-32600, 'Invalid request', details); - } - - static methodNotFound(details?: string): RequestError { - return new RequestError(-32601, 'Method not found', details); - } - - static invalidParams(details?: string): RequestError { - return new RequestError(-32602, 'Invalid params', details); - } - - static internalError(details?: string): RequestError { - return new RequestError(-32603, 'Internal error', details); - } - - toResult(): Result { - return { - error: { - code: this.code, - message: this.message, - data: this.data, - }, - }; - } -} - -// Protocol types - -export const LATEST_PROTOCOL_VERSION = '0.0.9'; - -export type AssistantMessageChunk = - | { - text: string; - } - | { - thought: string; - }; - -export type ToolCallConfirmation = - | { - description?: string | null; - type: 'edit'; - } - | { - description?: string | null; - type: 'execute'; - command: string; - rootCommand: string; - } - | { - description?: string | null; - type: 'mcp'; - serverName: string; - toolDisplayName: string; - toolName: string; - } - | { - description?: string | null; - type: 'fetch'; - urls: string[]; - } - | { - description: string; - type: 'other'; - }; - -export type ToolCallContent = - | { - type: 'markdown'; - markdown: string; - } - | { - type: 'diff'; - newText: string; - oldText: string | null; - path: string; - }; - -export type ToolCallStatus = 'running' | 'finished' | 'error'; - -export type ToolCallId = number; - -export type ToolCallConfirmationOutcome = - | 'allow' - | 'alwaysAllow' - | 'alwaysAllowMcpServer' - | 'alwaysAllowTool' - | 'reject' - | 'cancel'; - -/** - * A part in a user message - */ -export type UserMessageChunk = - | { - text: string; - } - | { - path: string; - }; - -export interface StreamAssistantMessageChunkParams { - chunk: AssistantMessageChunk; -} - -export interface RequestToolCallConfirmationParams { - confirmation: ToolCallConfirmation; - content?: ToolCallContent | null; - icon: Icon; - label: string; - locations?: ToolCallLocation[]; -} - -export interface ToolCallLocation { - line?: number | null; - path: string; -} - -export interface PushToolCallParams { - content?: ToolCallContent | null; - icon: Icon; - label: string; - locations?: ToolCallLocation[]; -} - -export interface UpdateToolCallParams { - content: ToolCallContent | null; - status: ToolCallStatus; - toolCallId: ToolCallId; -} - -export interface RequestToolCallConfirmationResponse { - id: ToolCallId; - outcome: ToolCallConfirmationOutcome; -} - -export interface PushToolCallResponse { - id: ToolCallId; -} - -export interface InitializeParams { - /** - * The version of the protocol that the client supports. - * This should be the latest version supported by the client. - */ - protocolVersion: string; -} - -export interface SendUserMessageParams { - chunks: UserMessageChunk[]; -} - -export interface InitializeResponse { - /** - * Indicates whether the agent is authenticated and - * ready to handle requests. - */ - isAuthenticated: boolean; - /** - * The version of the protocol that the agent supports. - * If the agent supports the requested version, it should respond with the same version. - * Otherwise, the agent should respond with the latest version it supports. - */ - protocolVersion: string; -} - -export interface Error { - code: number; - data?: unknown; - message: string; -} - -export interface Client { - streamAssistantMessageChunk( - params: StreamAssistantMessageChunkParams, - ): Promise; - - requestToolCallConfirmation( - params: RequestToolCallConfirmationParams, - ): Promise; - - pushToolCall(params: PushToolCallParams): Promise; - - updateToolCall(params: UpdateToolCallParams): Promise; -} - -export interface Agent { - /** - * Initializes the agent's state. It should be called before any other method, - * and no other methods should be called until it has completed. - * - * If the agent is not authenticated, then the client should prompt the user to authenticate, - * and then call the `authenticate` method. - * Otherwise the client can send other messages to the agent. - */ - initialize(params: InitializeParams): Promise; - - /** - * Begins the authentication process. - * - * This method should only be called if `initialize` indicates the user isn't already authenticated. - * The Promise MUST not resolve until authentication is complete. - */ - authenticate(): Promise; - - /** - * Allows the user to send a message to the agent. - * This method should complete after the agent is finished, during - * which time the agent may update the client by calling - * streamAssistantMessageChunk and other methods. - */ - sendUserMessage(params: SendUserMessageParams): Promise; - - /** - * Cancels the current generation. - */ - cancelSendMessage(): Promise; -} diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index dd207ff2..636696fa 100644 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -304,6 +304,7 @@ export async function loadCliConfig( extensions: Extension[], sessionId: string, argv: CliArgs, + cwd: string = process.cwd(), ): Promise { const debugMode = argv.debug || @@ -343,7 +344,7 @@ export async function loadCliConfig( (e) => e.contextFiles, ); - const fileService = new FileDiscoveryService(process.cwd()); + const fileService = new FileDiscoveryService(cwd); const fileFiltering = { ...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, @@ -356,7 +357,7 @@ export async function loadCliConfig( // Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory( - process.cwd(), + cwd, settings.loadMemoryFromIncludeDirectories ? includeDirectories : [], debugMode, fileService, @@ -398,7 +399,7 @@ export async function loadCliConfig( !!argv.promptInteractive || (process.stdin.isTTY && question.length === 0); // In non-interactive mode, exclude tools that require a prompt. const extraExcludes: string[] = []; - if (!interactive) { + if (!interactive && !argv.experimentalAcp) { switch (approvalMode) { case ApprovalMode.DEFAULT: // In default non-interactive mode, all tools that require approval are excluded. @@ -457,7 +458,7 @@ export async function loadCliConfig( sessionId, embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL, sandbox: sandboxConfig, - targetDir: process.cwd(), + targetDir: cwd, includeDirectories, loadMemoryFromIncludeDirectories: settings.loadMemoryFromIncludeDirectories || false, @@ -505,13 +506,13 @@ export async function loadCliConfig( process.env.https_proxy || process.env.HTTP_PROXY || process.env.http_proxy, - cwd: process.cwd(), + cwd, fileDiscoveryService: fileService, bugCommand: settings.bugCommand, model: argv.model || settings.model || DEFAULT_GEMINI_MODEL, extensionContextFilePaths, maxSessionTurns: settings.maxSessionTurns ?? -1, - experimentalAcp: argv.experimentalAcp || false, + experimentalZedIntegration: argv.experimentalAcp || false, listExtensions: argv.listExtensions || false, extensions: allExtensions, blockedMcpServers, diff --git a/packages/cli/src/gemini.tsx b/packages/cli/src/gemini.tsx index acc9c4b2..68f948da 100644 --- a/packages/cli/src/gemini.tsx +++ b/packages/cli/src/gemini.tsx @@ -106,7 +106,7 @@ async function relaunchWithAdditionalArgs(additionalArgs: string[]) { await new Promise((resolve) => child.on('close', resolve)); process.exit(0); } -import { runAcpPeer } from './acp/acpPeer.js'; +import { runZedIntegration } from './zed-integration/zedIntegration.js'; export function setupUnhandledRejectionHandler() { let unhandledRejectionOccurred = false; @@ -250,8 +250,8 @@ export async function main() { await getOauthClient(settings.merged.selectedAuthType, config); } - if (config.getExperimentalAcp()) { - return runAcpPeer(config, settings); + if (config.getExperimentalZedIntegration()) { + return runZedIntegration(config, settings, extensions, argv); } let input = config.getQuestion(); diff --git a/packages/cli/src/ui/hooks/useToolScheduler.test.ts b/packages/cli/src/ui/hooks/useToolScheduler.test.ts index ee5251d3..64b064e2 100644 --- a/packages/cli/src/ui/hooks/useToolScheduler.test.ts +++ b/packages/cli/src/ui/hooks/useToolScheduler.test.ts @@ -23,7 +23,7 @@ import { ToolCall, // Import from core Status as ToolCallStatusType, ApprovalMode, - Icon, + Kind, BaseTool, AnyDeclarativeTool, AnyToolInvocation, @@ -67,7 +67,7 @@ class MockTool extends BaseTool { name, displayName, 'A mock tool for testing', - Icon.Hammer, + Kind.Other, {}, isOutputMarkdown, canUpdateOutput, diff --git a/packages/cli/src/zed-integration/acp.ts b/packages/cli/src/zed-integration/acp.ts new file mode 100644 index 00000000..eef4e1ee --- /dev/null +++ b/packages/cli/src/zed-integration/acp.ts @@ -0,0 +1,366 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */ + +import { z } from 'zod'; +import * as schema from './schema.js'; +export * from './schema.js'; + +import { WritableStream, ReadableStream } from 'node:stream/web'; + +export class AgentSideConnection implements Client { + #connection: Connection; + + constructor( + toAgent: (conn: Client) => Agent, + input: WritableStream, + output: ReadableStream, + ) { + const agent = toAgent(this); + + const handler = async ( + method: string, + params: unknown, + ): Promise => { + switch (method) { + case schema.AGENT_METHODS.initialize: { + const validatedParams = schema.initializeRequestSchema.parse(params); + return agent.initialize(validatedParams); + } + case schema.AGENT_METHODS.session_new: { + const validatedParams = schema.newSessionRequestSchema.parse(params); + return agent.newSession(validatedParams); + } + case schema.AGENT_METHODS.session_load: { + if (!agent.loadSession) { + throw RequestError.methodNotFound(); + } + const validatedParams = schema.loadSessionRequestSchema.parse(params); + return agent.loadSession(validatedParams); + } + case schema.AGENT_METHODS.authenticate: { + const validatedParams = + schema.authenticateRequestSchema.parse(params); + return agent.authenticate(validatedParams); + } + case schema.AGENT_METHODS.session_prompt: { + const validatedParams = schema.promptRequestSchema.parse(params); + return agent.prompt(validatedParams); + } + case schema.AGENT_METHODS.session_cancel: { + const validatedParams = schema.cancelNotificationSchema.parse(params); + return agent.cancel(validatedParams); + } + default: + throw RequestError.methodNotFound(method); + } + }; + + this.#connection = new Connection(handler, input, output); + } + + /** + * Streams new content to the client including text, tool calls, etc. + */ + async sessionUpdate(params: schema.SessionNotification): Promise { + return await this.#connection.sendNotification( + schema.CLIENT_METHODS.session_update, + params, + ); + } + + /** + * Request permission before running a tool + * + * The agent specifies a series of permission options with different granularity, + * and the client returns the chosen one. + */ + async requestPermission( + params: schema.RequestPermissionRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.session_request_permission, + params, + ); + } + + async readTextFile( + params: schema.ReadTextFileRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.fs_read_text_file, + params, + ); + } + + async writeTextFile( + params: schema.WriteTextFileRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.fs_write_text_file, + params, + ); + } +} + +type AnyMessage = AnyRequest | AnyResponse | AnyNotification; + +type AnyRequest = { + jsonrpc: '2.0'; + id: string | number; + method: string; + params?: unknown; +}; + +type AnyResponse = { + jsonrpc: '2.0'; + id: string | number; +} & Result; + +type AnyNotification = { + jsonrpc: '2.0'; + method: string; + params?: unknown; +}; + +type Result = + | { + result: T; + } + | { + error: ErrorResponse; + }; + +type ErrorResponse = { + code: number; + message: string; + data?: unknown; +}; + +type PendingResponse = { + resolve: (response: unknown) => void; + reject: (error: ErrorResponse) => void; +}; + +type MethodHandler = (method: string, params: unknown) => Promise; + +class Connection { + #pendingResponses: Map = new Map(); + #nextRequestId: number = 0; + #handler: MethodHandler; + #peerInput: WritableStream; + #writeQueue: Promise = Promise.resolve(); + #textEncoder: TextEncoder; + + constructor( + handler: MethodHandler, + peerInput: WritableStream, + peerOutput: ReadableStream, + ) { + this.#handler = handler; + this.#peerInput = peerInput; + this.#textEncoder = new TextEncoder(); + this.#receive(peerOutput); + } + + async #receive(output: ReadableStream) { + let content = ''; + const decoder = new TextDecoder(); + for await (const chunk of output) { + content += decoder.decode(chunk, { stream: true }); + const lines = content.split('\n'); + content = lines.pop() || ''; + + for (const line of lines) { + const trimmedLine = line.trim(); + + if (trimmedLine) { + const message = JSON.parse(trimmedLine); + this.#processMessage(message); + } + } + } + } + + async #processMessage(message: AnyMessage) { + if ('method' in message && 'id' in message) { + // It's a request + const response = await this.#tryCallHandler( + message.method, + message.params, + ); + + await this.#sendMessage({ + jsonrpc: '2.0', + id: message.id, + ...response, + }); + } else if ('method' in message) { + // It's a notification + await this.#tryCallHandler(message.method, message.params); + } else if ('id' in message) { + // It's a response + this.#handleResponse(message as AnyResponse); + } + } + + async #tryCallHandler( + method: string, + params?: unknown, + ): Promise> { + try { + const result = await this.#handler(method, params); + return { result: result ?? null }; + } catch (error: unknown) { + if (error instanceof RequestError) { + return error.toResult(); + } + + if (error instanceof z.ZodError) { + return RequestError.invalidParams( + JSON.stringify(error.format(), undefined, 2), + ).toResult(); + } + + let details; + + if (error instanceof Error) { + details = error.message; + } else if ( + typeof error === 'object' && + error != null && + 'message' in error && + typeof error.message === 'string' + ) { + details = error.message; + } + + return RequestError.internalError(details).toResult(); + } + } + + #handleResponse(response: AnyResponse) { + const pendingResponse = this.#pendingResponses.get(response.id); + if (pendingResponse) { + if ('result' in response) { + pendingResponse.resolve(response.result); + } else if ('error' in response) { + pendingResponse.reject(response.error); + } + this.#pendingResponses.delete(response.id); + } + } + + async sendRequest(method: string, params?: Req): Promise { + const id = this.#nextRequestId++; + const responsePromise = new Promise((resolve, reject) => { + this.#pendingResponses.set(id, { resolve, reject }); + }); + await this.#sendMessage({ jsonrpc: '2.0', id, method, params }); + return responsePromise as Promise; + } + + async sendNotification(method: string, params?: N): Promise { + await this.#sendMessage({ jsonrpc: '2.0', method, params }); + } + + async #sendMessage(json: AnyMessage) { + const content = JSON.stringify(json) + '\n'; + this.#writeQueue = this.#writeQueue + .then(async () => { + const writer = this.#peerInput.getWriter(); + try { + await writer.write(this.#textEncoder.encode(content)); + } finally { + writer.releaseLock(); + } + }) + .catch((error) => { + // Continue processing writes on error + console.error('ACP write error:', error); + }); + return this.#writeQueue; + } +} + +export class RequestError extends Error { + data?: { details?: string }; + + constructor( + public code: number, + message: string, + details?: string, + ) { + super(message); + this.name = 'RequestError'; + if (details) { + this.data = { details }; + } + } + + static parseError(details?: string): RequestError { + return new RequestError(-32700, 'Parse error', details); + } + + static invalidRequest(details?: string): RequestError { + return new RequestError(-32600, 'Invalid request', details); + } + + static methodNotFound(details?: string): RequestError { + return new RequestError(-32601, 'Method not found', details); + } + + static invalidParams(details?: string): RequestError { + return new RequestError(-32602, 'Invalid params', details); + } + + static internalError(details?: string): RequestError { + return new RequestError(-32603, 'Internal error', details); + } + + static authRequired(details?: string): RequestError { + return new RequestError(-32000, 'Authentication required', details); + } + + toResult(): Result { + return { + error: { + code: this.code, + message: this.message, + data: this.data, + }, + }; + } +} + +export interface Client { + requestPermission( + params: schema.RequestPermissionRequest, + ): Promise; + sessionUpdate(params: schema.SessionNotification): Promise; + writeTextFile( + params: schema.WriteTextFileRequest, + ): Promise; + readTextFile( + params: schema.ReadTextFileRequest, + ): Promise; +} + +export interface Agent { + initialize( + params: schema.InitializeRequest, + ): Promise; + newSession( + params: schema.NewSessionRequest, + ): Promise; + loadSession?( + params: schema.LoadSessionRequest, + ): Promise; + authenticate(params: schema.AuthenticateRequest): Promise; + prompt(params: schema.PromptRequest): Promise; + cancel(params: schema.CancelNotification): Promise; +} diff --git a/packages/cli/src/zed-integration/schema.ts b/packages/cli/src/zed-integration/schema.ts new file mode 100644 index 00000000..4c962131 --- /dev/null +++ b/packages/cli/src/zed-integration/schema.ts @@ -0,0 +1,457 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { z } from 'zod'; + +export const AGENT_METHODS = { + authenticate: 'authenticate', + initialize: 'initialize', + session_cancel: 'session/cancel', + session_load: 'session/load', + session_new: 'session/new', + session_prompt: 'session/prompt', +}; + +export const CLIENT_METHODS = { + fs_read_text_file: 'fs/read_text_file', + fs_write_text_file: 'fs/write_text_file', + session_request_permission: 'session/request_permission', + session_update: 'session/update', +}; + +export const PROTOCOL_VERSION = 1; + +export type WriteTextFileRequest = z.infer; + +export type ReadTextFileRequest = z.infer; + +export type PermissionOptionKind = z.infer; + +export type Role = z.infer; + +export type TextResourceContents = z.infer; + +export type BlobResourceContents = z.infer; + +export type ToolKind = z.infer; + +export type ToolCallStatus = z.infer; + +export type WriteTextFileResponse = z.infer; + +export type ReadTextFileResponse = z.infer; + +export type RequestPermissionOutcome = z.infer< + typeof requestPermissionOutcomeSchema +>; + +export type CancelNotification = z.infer; + +export type AuthenticateRequest = z.infer; + +export type AuthenticateResponse = z.infer; + +export type NewSessionResponse = z.infer; + +export type LoadSessionResponse = z.infer; + +export type StopReason = z.infer; + +export type PromptResponse = z.infer; + +export type ToolCallLocation = z.infer; + +export type PlanEntry = z.infer; + +export type PermissionOption = z.infer; + +export type Annotations = z.infer; + +export type RequestPermissionResponse = z.infer< + typeof requestPermissionResponseSchema +>; + +export type FileSystemCapability = z.infer; + +export type EnvVariable = z.infer; + +export type McpServer = z.infer; + +export type AgentCapabilities = z.infer; + +export type AuthMethod = z.infer; + +export type ClientResponse = z.infer; + +export type ClientNotification = z.infer; + +export type EmbeddedResourceResource = z.infer< + typeof embeddedResourceResourceSchema +>; + +export type NewSessionRequest = z.infer; + +export type LoadSessionRequest = z.infer; + +export type InitializeResponse = z.infer; + +export type ContentBlock = z.infer; + +export type ToolCallContent = z.infer; + +export type ToolCall = z.infer; + +export type ClientCapabilities = z.infer; + +export type PromptRequest = z.infer; + +export type SessionUpdate = z.infer; + +export type AgentResponse = z.infer; + +export type RequestPermissionRequest = z.infer< + typeof requestPermissionRequestSchema +>; + +export type InitializeRequest = z.infer; + +export type SessionNotification = z.infer; + +export type ClientRequest = z.infer; + +export type AgentRequest = z.infer; + +export type AgentNotification = z.infer; + +export const writeTextFileRequestSchema = z.object({ + content: z.string(), + path: z.string(), + sessionId: z.string(), +}); + +export const readTextFileRequestSchema = z.object({ + limit: z.number().optional().nullable(), + line: z.number().optional().nullable(), + path: z.string(), + sessionId: z.string(), +}); + +export const permissionOptionKindSchema = z.union([ + z.literal('allow_once'), + z.literal('allow_always'), + z.literal('reject_once'), + z.literal('reject_always'), +]); + +export const roleSchema = z.union([z.literal('assistant'), z.literal('user')]); + +export const textResourceContentsSchema = z.object({ + mimeType: z.string().optional().nullable(), + text: z.string(), + uri: z.string(), +}); + +export const blobResourceContentsSchema = z.object({ + blob: z.string(), + mimeType: z.string().optional().nullable(), + uri: z.string(), +}); + +export const toolKindSchema = z.union([ + z.literal('read'), + z.literal('edit'), + z.literal('delete'), + z.literal('move'), + z.literal('search'), + z.literal('execute'), + z.literal('think'), + z.literal('fetch'), + z.literal('other'), +]); + +export const toolCallStatusSchema = z.union([ + z.literal('pending'), + z.literal('in_progress'), + z.literal('completed'), + z.literal('failed'), +]); + +export const writeTextFileResponseSchema = z.null(); + +export const readTextFileResponseSchema = z.object({ + content: z.string(), +}); + +export const requestPermissionOutcomeSchema = z.union([ + z.object({ + outcome: z.literal('cancelled'), + }), + z.object({ + optionId: z.string(), + outcome: z.literal('selected'), + }), +]); + +export const cancelNotificationSchema = z.object({ + sessionId: z.string(), +}); + +export const authenticateRequestSchema = z.object({ + methodId: z.string(), +}); + +export const authenticateResponseSchema = z.null(); + +export const newSessionResponseSchema = z.object({ + sessionId: z.string(), +}); + +export const loadSessionResponseSchema = z.null(); + +export const stopReasonSchema = z.union([ + z.literal('end_turn'), + z.literal('max_tokens'), + z.literal('refusal'), + z.literal('cancelled'), +]); + +export const promptResponseSchema = z.object({ + stopReason: stopReasonSchema, +}); + +export const toolCallLocationSchema = z.object({ + line: z.number().optional().nullable(), + path: z.string(), +}); + +export const planEntrySchema = z.object({ + content: z.string(), + priority: z.union([z.literal('high'), z.literal('medium'), z.literal('low')]), + status: z.union([ + z.literal('pending'), + z.literal('in_progress'), + z.literal('completed'), + ]), +}); + +export const permissionOptionSchema = z.object({ + kind: permissionOptionKindSchema, + name: z.string(), + optionId: z.string(), +}); + +export const annotationsSchema = z.object({ + audience: z.array(roleSchema).optional().nullable(), + lastModified: z.string().optional().nullable(), + priority: z.number().optional().nullable(), +}); + +export const requestPermissionResponseSchema = z.object({ + outcome: requestPermissionOutcomeSchema, +}); + +export const fileSystemCapabilitySchema = z.object({ + readTextFile: z.boolean(), + writeTextFile: z.boolean(), +}); + +export const envVariableSchema = z.object({ + name: z.string(), + value: z.string(), +}); + +export const mcpServerSchema = z.object({ + args: z.array(z.string()), + command: z.string(), + env: z.array(envVariableSchema), + name: z.string(), +}); + +export const agentCapabilitiesSchema = z.object({ + loadSession: z.boolean(), +}); + +export const authMethodSchema = z.object({ + description: z.string().nullable(), + id: z.string(), + name: z.string(), +}); + +export const clientResponseSchema = z.union([ + writeTextFileResponseSchema, + readTextFileResponseSchema, + requestPermissionResponseSchema, +]); + +export const clientNotificationSchema = cancelNotificationSchema; + +export const embeddedResourceResourceSchema = z.union([ + textResourceContentsSchema, + blobResourceContentsSchema, +]); + +export const newSessionRequestSchema = z.object({ + cwd: z.string(), + mcpServers: z.array(mcpServerSchema), +}); + +export const loadSessionRequestSchema = z.object({ + cwd: z.string(), + mcpServers: z.array(mcpServerSchema), + sessionId: z.string(), +}); + +export const initializeResponseSchema = z.object({ + agentCapabilities: agentCapabilitiesSchema, + authMethods: z.array(authMethodSchema), + protocolVersion: z.number(), +}); + +export const contentBlockSchema = z.union([ + z.object({ + annotations: annotationsSchema.optional().nullable(), + text: z.string(), + type: z.literal('text'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + data: z.string(), + mimeType: z.string(), + type: z.literal('image'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + data: z.string(), + mimeType: z.string(), + type: z.literal('audio'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + description: z.string().optional().nullable(), + mimeType: z.string().optional().nullable(), + name: z.string(), + size: z.number().optional().nullable(), + title: z.string().optional().nullable(), + type: z.literal('resource_link'), + uri: z.string(), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + resource: embeddedResourceResourceSchema, + type: z.literal('resource'), + }), +]); + +export const toolCallContentSchema = z.union([ + z.object({ + content: contentBlockSchema, + type: z.literal('content'), + }), + z.object({ + newText: z.string(), + oldText: z.string().nullable(), + path: z.string(), + type: z.literal('diff'), + }), +]); + +export const toolCallSchema = z.object({ + content: z.array(toolCallContentSchema).optional(), + kind: toolKindSchema, + locations: z.array(toolCallLocationSchema).optional(), + rawInput: z.unknown().optional(), + status: toolCallStatusSchema, + title: z.string(), + toolCallId: z.string(), +}); + +export const clientCapabilitiesSchema = z.object({ + fs: fileSystemCapabilitySchema, +}); + +export const promptRequestSchema = z.object({ + prompt: z.array(contentBlockSchema), + sessionId: z.string(), +}); + +export const sessionUpdateSchema = z.union([ + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('user_message_chunk'), + }), + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('agent_message_chunk'), + }), + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('agent_thought_chunk'), + }), + z.object({ + content: z.array(toolCallContentSchema).optional(), + kind: toolKindSchema, + locations: z.array(toolCallLocationSchema).optional(), + rawInput: z.unknown().optional(), + sessionUpdate: z.literal('tool_call'), + status: toolCallStatusSchema, + title: z.string(), + toolCallId: z.string(), + }), + z.object({ + content: z.array(toolCallContentSchema).optional().nullable(), + kind: toolKindSchema.optional().nullable(), + locations: z.array(toolCallLocationSchema).optional().nullable(), + rawInput: z.unknown().optional(), + sessionUpdate: z.literal('tool_call_update'), + status: toolCallStatusSchema.optional().nullable(), + title: z.string().optional().nullable(), + toolCallId: z.string(), + }), + z.object({ + entries: z.array(planEntrySchema), + sessionUpdate: z.literal('plan'), + }), +]); + +export const agentResponseSchema = z.union([ + initializeResponseSchema, + authenticateResponseSchema, + newSessionResponseSchema, + loadSessionResponseSchema, + promptResponseSchema, +]); + +export const requestPermissionRequestSchema = z.object({ + options: z.array(permissionOptionSchema), + sessionId: z.string(), + toolCall: toolCallSchema, +}); + +export const initializeRequestSchema = z.object({ + clientCapabilities: clientCapabilitiesSchema, + protocolVersion: z.number(), +}); + +export const sessionNotificationSchema = z.object({ + sessionId: z.string(), + update: sessionUpdateSchema, +}); + +export const clientRequestSchema = z.union([ + writeTextFileRequestSchema, + readTextFileRequestSchema, + requestPermissionRequestSchema, +]); + +export const agentRequestSchema = z.union([ + initializeRequestSchema, + authenticateRequestSchema, + newSessionRequestSchema, + loadSessionRequestSchema, + promptRequestSchema, +]); + +export const agentNotificationSchema = sessionNotificationSchema; diff --git a/packages/cli/src/acp/acpPeer.ts b/packages/cli/src/zed-integration/zedIntegration.ts similarity index 54% rename from packages/cli/src/acp/acpPeer.ts rename to packages/cli/src/zed-integration/zedIntegration.ts index 40d8753f..1b5baa8a 100644 --- a/packages/cli/src/acp/acpPeer.ts +++ b/packages/cli/src/zed-integration/zedIntegration.ts @@ -21,16 +21,26 @@ import { getErrorMessage, isWithinRoot, getErrorStatus, + MCPServerConfig, } from '@google/gemini-cli-core'; import * as acp from './acp.js'; -import { Agent } from './acp.js'; import { Readable, Writable } from 'node:stream'; import { Content, Part, FunctionCall, PartListUnion } from '@google/genai'; import { LoadedSettings, SettingScope } from '../config/settings.js'; import * as fs from 'fs/promises'; import * as path from 'path'; +import { z } from 'zod'; -export async function runAcpPeer(config: Config, settings: LoadedSettings) { +import { randomUUID } from 'crypto'; +import { Extension } from '../config/extension.js'; +import { CliArgs, loadCliConfig } from '../config/config.js'; + +export async function runZedIntegration( + config: Config, + settings: LoadedSettings, + extensions: Extension[], + argv: CliArgs, +) { const stdout = Writable.toWeb(process.stdout) as WritableStream; const stdin = Readable.toWeb(process.stdin) as ReadableStream; @@ -40,76 +50,176 @@ export async function runAcpPeer(config: Config, settings: LoadedSettings) { console.info = console.error; console.debug = console.error; - new acp.ClientConnection( - (client: acp.Client) => new GeminiAgent(config, settings, client), + new acp.AgentSideConnection( + (client: acp.Client) => + new GeminiAgent(config, settings, extensions, argv, client), stdout, stdin, ); } -class GeminiAgent implements Agent { - chat?: GeminiChat; - pendingSend?: AbortController; +class GeminiAgent { + private sessions: Map = new Map(); constructor( private config: Config, private settings: LoadedSettings, + private extensions: Extension[], + private argv: CliArgs, private client: acp.Client, ) {} - async initialize(_: acp.InitializeParams): Promise { + async initialize( + _args: acp.InitializeRequest, + ): Promise { + const authMethods = [ + { + id: AuthType.LOGIN_WITH_GOOGLE, + name: 'Log in with Google', + description: null, + }, + { + id: AuthType.USE_GEMINI, + name: 'Use Gemini API key', + description: + 'Requires setting the `GEMINI_API_KEY` environment variable', + }, + { + id: AuthType.USE_VERTEX_AI, + name: 'Vertex AI', + description: null, + }, + ]; + + return { + protocolVersion: acp.PROTOCOL_VERSION, + authMethods, + agentCapabilities: { + loadSession: false, + }, + }; + } + + async authenticate({ methodId }: acp.AuthenticateRequest): Promise { + const method = z.nativeEnum(AuthType).parse(methodId); + + await clearCachedCredentialFile(); + await this.config.refreshAuth(method); + this.settings.setValue(SettingScope.User, 'selectedAuthType', method); + } + + async newSession({ + cwd, + mcpServers, + }: acp.NewSessionRequest): Promise { + const sessionId = randomUUID(); + const config = await this.newSessionConfig(sessionId, cwd, mcpServers); + let isAuthenticated = false; if (this.settings.merged.selectedAuthType) { try { - await this.config.refreshAuth(this.settings.merged.selectedAuthType); + await config.refreshAuth(this.settings.merged.selectedAuthType); isAuthenticated = true; - } catch (error) { - console.error('Failed to refresh auth:', error); + } catch (e) { + console.error(`Authentication failed: ${e}`); } } - return { protocolVersion: acp.LATEST_PROTOCOL_VERSION, isAuthenticated }; + + if (!isAuthenticated) { + throw acp.RequestError.authRequired(); + } + + const geminiClient = config.getGeminiClient(); + const chat = await geminiClient.startChat(); + const session = new Session(sessionId, chat, config, this.client); + this.sessions.set(sessionId, session); + + return { + sessionId, + }; } - async authenticate(): Promise { - await clearCachedCredentialFile(); - await this.config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE); - this.settings.setValue( - SettingScope.User, - 'selectedAuthType', - AuthType.LOGIN_WITH_GOOGLE, + async newSessionConfig( + sessionId: string, + cwd: string, + mcpServers: acp.McpServer[], + ): Promise { + const mergedMcpServers = { ...this.settings.merged.mcpServers }; + + for (const { command, args, env: rawEnv, name } of mcpServers) { + const env: Record = {}; + for (const { name: envName, value } of rawEnv) { + env[envName] = value; + } + mergedMcpServers[name] = new MCPServerConfig(command, args, env, cwd); + } + + const settings = { ...this.settings.merged, mcpServers: mergedMcpServers }; + + const config = await loadCliConfig( + settings, + this.extensions, + sessionId, + this.argv, + cwd, ); + + await config.initialize(); + return config; } - async cancelSendMessage(): Promise { - if (!this.pendingSend) { + async cancel(params: acp.CancelNotification): Promise { + const session = this.sessions.get(params.sessionId); + if (!session) { + throw new Error(`Session not found: ${params.sessionId}`); + } + await session.cancelPendingPrompt(); + } + + async prompt(params: acp.PromptRequest): Promise { + const session = this.sessions.get(params.sessionId); + if (!session) { + throw new Error(`Session not found: ${params.sessionId}`); + } + return session.prompt(params); + } +} + +class Session { + private pendingPrompt: AbortController | null = null; + + constructor( + private readonly id: string, + private readonly chat: GeminiChat, + private readonly config: Config, + private readonly client: acp.Client, + ) {} + + async cancelPendingPrompt(): Promise { + if (!this.pendingPrompt) { throw new Error('Not currently generating'); } - this.pendingSend.abort(); - delete this.pendingSend; + this.pendingPrompt.abort(); + this.pendingPrompt = null; } - async sendUserMessage(params: acp.SendUserMessageParams): Promise { - this.pendingSend?.abort(); + async prompt(params: acp.PromptRequest): Promise { + this.pendingPrompt?.abort(); const pendingSend = new AbortController(); - this.pendingSend = pendingSend; - - if (!this.chat) { - const geminiClient = this.config.getGeminiClient(); - this.chat = await geminiClient.startChat(); - } + this.pendingPrompt = pendingSend; const promptId = Math.random().toString(16).slice(2); - const chat = this.chat!; - const toolRegistry: ToolRegistry = await this.config.getToolRegistry(); - const parts = await this.#resolveUserMessage(params, pendingSend.signal); + const chat = this.chat; + + const parts = await this.#resolvePrompt(params.prompt, pendingSend.signal); let nextMessage: Content | null = { role: 'user', parts }; while (nextMessage !== null) { if (pendingSend.signal.aborted) { chat.addHistory(nextMessage); - return; + return { stopReason: 'cancelled' }; } const functionCalls: FunctionCall[] = []; @@ -120,11 +230,6 @@ class GeminiAgent implements Agent { message: nextMessage?.parts ?? [], config: { abortSignal: pendingSend.signal, - tools: [ - { - functionDeclarations: toolRegistry.getFunctionDeclarations(), - }, - ], }, }, promptId, @@ -133,7 +238,7 @@ class GeminiAgent implements Agent { for await (const resp of responseStream) { if (pendingSend.signal.aborted) { - return; + return { stopReason: 'cancelled' }; } if (resp.candidates && resp.candidates.length > 0) { @@ -143,10 +248,16 @@ class GeminiAgent implements Agent { continue; } - this.client.streamAssistantMessageChunk({ - chunk: part.thought - ? { thought: part.text } - : { text: part.text }, + const content: acp.ContentBlock = { + type: 'text', + text: part.text, + }; + + this.sendUpdate({ + sessionUpdate: part.thought + ? 'agent_thought_chunk' + : 'agent_message_chunk', + content, }); } } @@ -170,11 +281,7 @@ class GeminiAgent implements Agent { const toolResponseParts: Part[] = []; for (const fc of functionCalls) { - const response = await this.#runTool( - pendingSend.signal, - promptId, - fc, - ); + const response = await this.runTool(pendingSend.signal, promptId, fc); const parts = Array.isArray(response) ? response : [response]; @@ -190,9 +297,20 @@ class GeminiAgent implements Agent { nextMessage = { role: 'user', parts: toolResponseParts }; } } + + return { stopReason: 'end_turn' }; } - async #runTool( + private async sendUpdate(update: acp.SessionUpdate): Promise { + const params: acp.SessionNotification = { + sessionId: this.id, + update, + }; + + await this.client.sessionUpdate(params); + } + + private async runTool( abortSignal: AbortSignal, promptId: string, fc: FunctionCall, @@ -239,68 +357,82 @@ class GeminiAgent implements Agent { ); } - let toolCallId: number | undefined = undefined; - try { - const invocation = tool.build(args); - const confirmationDetails = - await invocation.shouldConfirmExecute(abortSignal); - if (confirmationDetails) { - let content: acp.ToolCallContent | null = null; - if (confirmationDetails.type === 'edit') { - content = { - type: 'diff', - path: confirmationDetails.fileName, - oldText: confirmationDetails.originalContent, - newText: confirmationDetails.newContent, - }; - } + const invocation = tool.build(args); + const confirmationDetails = + await invocation.shouldConfirmExecute(abortSignal); - const result = await this.client.requestToolCallConfirmation({ - label: invocation.getDescription(), - icon: tool.icon, - content, - confirmation: toAcpToolCallConfirmation(confirmationDetails), - locations: invocation.toolLocations(), + if (confirmationDetails) { + const content: acp.ToolCallContent[] = []; + + if (confirmationDetails.type === 'edit') { + content.push({ + type: 'diff', + path: confirmationDetails.fileName, + oldText: confirmationDetails.originalContent, + newText: confirmationDetails.newContent, }); - - await confirmationDetails.onConfirm(toToolCallOutcome(result.outcome)); - switch (result.outcome) { - case 'reject': - return errorResponse( - new Error(`Tool "${fc.name}" not allowed to run by the user.`), - ); - - case 'cancel': - return errorResponse( - new Error(`Tool "${fc.name}" was canceled by the user.`), - ); - case 'allow': - case 'alwaysAllow': - case 'alwaysAllowMcpServer': - case 'alwaysAllowTool': - break; - default: { - const resultOutcome: never = result.outcome; - throw new Error(`Unexpected: ${resultOutcome}`); - } - } - toolCallId = result.id; - } else { - const result = await this.client.pushToolCall({ - icon: tool.icon, - label: invocation.getDescription(), - locations: invocation.toolLocations(), - }); - toolCallId = result.id; } - const toolResult: ToolResult = await invocation.execute(abortSignal); - const toolCallContent = toToolCallContent(toolResult); + const params: acp.RequestPermissionRequest = { + sessionId: this.id, + options: toPermissionOptions(confirmationDetails), + toolCall: { + toolCallId: callId, + status: 'pending', + title: invocation.getDescription(), + content, + locations: invocation.toolLocations(), + kind: tool.kind, + }, + }; - await this.client.updateToolCall({ - toolCallId, - status: 'finished', - content: toolCallContent, + const output = await this.client.requestPermission(params); + const outcome = + output.outcome.outcome === 'cancelled' + ? ToolConfirmationOutcome.Cancel + : z + .nativeEnum(ToolConfirmationOutcome) + .parse(output.outcome.optionId); + + await confirmationDetails.onConfirm(outcome); + + switch (outcome) { + case ToolConfirmationOutcome.Cancel: + return errorResponse( + new Error(`Tool "${fc.name}" was canceled by the user.`), + ); + case ToolConfirmationOutcome.ProceedOnce: + case ToolConfirmationOutcome.ProceedAlways: + case ToolConfirmationOutcome.ProceedAlwaysServer: + case ToolConfirmationOutcome.ProceedAlwaysTool: + case ToolConfirmationOutcome.ModifyWithEditor: + break; + default: { + const resultOutcome: never = outcome; + throw new Error(`Unexpected: ${resultOutcome}`); + } + } + } else { + await this.sendUpdate({ + sessionUpdate: 'tool_call', + toolCallId: callId, + status: 'in_progress', + title: invocation.getDescription(), + content: [], + locations: invocation.toolLocations(), + kind: tool.kind, + }); + } + + try { + const toolResult: ToolResult = await invocation.execute(abortSignal); + const content = toToolCallContent(toolResult); + + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'completed', + content: content ? [content] : [], }); const durationMs = Date.now() - startTime; @@ -317,31 +449,55 @@ class GeminiAgent implements Agent { return convertToFunctionResponse(fc.name, callId, toolResult.llmContent); } catch (e) { const error = e instanceof Error ? e : new Error(String(e)); - if (toolCallId) { - await this.client.updateToolCall({ - toolCallId, - status: 'error', - content: { type: 'markdown', markdown: error.message }, - }); - } + + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'failed', + content: [ + { type: 'content', content: { type: 'text', text: error.message } }, + ], + }); + return errorResponse(error); } } - async #resolveUserMessage( - message: acp.SendUserMessageParams, + async #resolvePrompt( + message: acp.ContentBlock[], abortSignal: AbortSignal, ): Promise { - const atPathCommandParts = message.chunks.filter((part) => 'path' in part); + const parts = message.map((part) => { + switch (part.type) { + case 'text': + return { text: part.text }; + case 'resource_link': + return { + fileData: { + mimeData: part.mimeType, + name: part.name, + fileUri: part.uri, + }, + }; + case 'resource': { + return { + fileData: { + mimeData: part.resource.mimeType, + name: part.resource.uri, + fileUri: part.resource.uri, + }, + }; + } + default: { + throw new Error(`Unexpected chunk type: '${part.type}'`); + } + } + }); + + const atPathCommandParts = parts.filter((part) => 'fileData' in part); if (atPathCommandParts.length === 0) { - return message.chunks.map((chunk) => { - if ('text' in chunk) { - return { text: chunk.text }; - } else { - throw new Error('Unexpected chunk type'); - } - }); + return parts; } // Get centralized file discovery service @@ -362,8 +518,7 @@ class GeminiAgent implements Agent { } for (const atPathPart of atPathCommandParts) { - const pathName = atPathPart.path; - + const pathName = atPathPart.fileData!.fileUri; // Check if path should be ignored by git if (fileDiscovery.shouldGitIgnoreFile(pathName)) { ignoredPaths.push(pathName); @@ -373,10 +528,8 @@ class GeminiAgent implements Agent { console.warn(`Path ${pathName} is ${reason}.`); continue; } - let currentPathSpec = pathName; let resolvedSuccessfully = false; - try { const absolutePath = path.resolve(this.config.getTargetDir(), pathName); if (isWithinRoot(absolutePath, this.config.getTargetDir())) { @@ -385,24 +538,22 @@ class GeminiAgent implements Agent { currentPathSpec = pathName.endsWith('/') ? `${pathName}**` : `${pathName}/**`; - this.#debug( + this.debug( `Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`, ); } else { - this.#debug( - `Path ${pathName} resolved to file: ${currentPathSpec}`, - ); + this.debug(`Path ${pathName} resolved to file: ${currentPathSpec}`); } resolvedSuccessfully = true; } else { - this.#debug( + this.debug( `Path ${pathName} is outside the project directory. Skipping.`, ); } } catch (error) { if (isNodeError(error) && error.code === 'ENOENT') { if (this.config.getEnableRecursiveFileSearch() && globTool) { - this.#debug( + this.debug( `Path ${pathName} not found directly, attempting glob search.`, ); try { @@ -426,17 +577,17 @@ class GeminiAgent implements Agent { this.config.getTargetDir(), firstMatchAbsolute, ); - this.#debug( + this.debug( `Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`, ); resolvedSuccessfully = true; } else { - this.#debug( + this.debug( `Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`, ); } } else { - this.#debug( + this.debug( `Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`, ); } @@ -446,7 +597,7 @@ class GeminiAgent implements Agent { ); } } else { - this.#debug( + this.debug( `Glob tool not found. Path ${pathName} will be skipped.`, ); } @@ -456,23 +607,22 @@ class GeminiAgent implements Agent { ); } } - if (resolvedSuccessfully) { pathSpecsToRead.push(currentPathSpec); atPathToResolvedSpecMap.set(pathName, currentPathSpec); contentLabelsForDisplay.push(pathName); } } - // Construct the initial part of the query for the LLM let initialQueryText = ''; - for (let i = 0; i < message.chunks.length; i++) { - const chunk = message.chunks[i]; + for (let i = 0; i < parts.length; i++) { + const chunk = parts[i]; if ('text' in chunk) { initialQueryText += chunk.text; } else { // type === 'atPath' - const resolvedSpec = atPathToResolvedSpecMap.get(chunk.path); + const resolvedSpec = + chunk.fileData && atPathToResolvedSpecMap.get(chunk.fileData.fileUri); if ( i > 0 && initialQueryText.length > 0 && @@ -480,10 +630,11 @@ class GeminiAgent implements Agent { resolvedSpec ) { // Add space if previous part was text and didn't end with space, or if previous was @path - const prevPart = message.chunks[i - 1]; + const prevPart = parts[i - 1]; if ( 'text' in prevPart || - ('path' in prevPart && atPathToResolvedSpecMap.has(prevPart.path)) + ('fileData' in prevPart && + atPathToResolvedSpecMap.has(prevPart.fileData!.fileUri)) ) { initialQueryText += ' '; } @@ -497,56 +648,64 @@ class GeminiAgent implements Agent { i > 0 && initialQueryText.length > 0 && !initialQueryText.endsWith(' ') && - !chunk.path.startsWith(' ') + !chunk.fileData?.fileUri.startsWith(' ') ) { initialQueryText += ' '; } - initialQueryText += `@${chunk.path}`; + if (chunk.fileData?.fileUri) { + initialQueryText += `@${chunk.fileData.fileUri}`; + } } } } initialQueryText = initialQueryText.trim(); - // Inform user about ignored paths if (ignoredPaths.length > 0) { const ignoreType = respectGitIgnore ? 'git-ignored' : 'custom-ignored'; - this.#debug( + this.debug( `Ignored ${ignoredPaths.length} ${ignoreType} files: ${ignoredPaths.join(', ')}`, ); } - // Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText if (pathSpecsToRead.length === 0) { console.warn('No valid file paths found in @ commands to read.'); return [{ text: initialQueryText }]; } - const processedQueryParts: Part[] = [{ text: initialQueryText }]; - const toolArgs = { paths: pathSpecsToRead, respectGitIgnore, // Use configuration setting }; - let toolCallId: number | undefined = undefined; + const callId = `${readManyFilesTool.name}-${Date.now()}`; + try { const invocation = readManyFilesTool.build(toolArgs); - const toolCall = await this.client.pushToolCall({ - icon: readManyFilesTool.icon, - label: invocation.getDescription(), - }); - toolCallId = toolCall.id; - const result = await invocation.execute(abortSignal); - const content = toToolCallContent(result) || { - type: 'markdown', - markdown: `Successfully read: ${contentLabelsForDisplay.join(', ')}`, - }; - await this.client.updateToolCall({ - toolCallId: toolCall.id, - status: 'finished', - content, + + await this.sendUpdate({ + sessionUpdate: 'tool_call', + toolCallId: callId, + status: 'in_progress', + title: invocation.getDescription(), + content: [], + locations: invocation.toolLocations(), + kind: readManyFilesTool.kind, }); + const result = await invocation.execute(abortSignal); + const content = toToolCallContent(result) || { + type: 'content', + content: { + type: 'text', + text: `Successfully read: ${contentLabelsForDisplay.join(', ')}`, + }, + }; + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'completed', + content: content ? [content] : [], + }); if (Array.isArray(result.llmContent)) { const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/; processedQueryParts.push({ @@ -576,24 +735,28 @@ class GeminiAgent implements Agent { 'read_many_files tool returned no content or empty content.', ); } - return processedQueryParts; } catch (error: unknown) { - if (toolCallId) { - await this.client.updateToolCall({ - toolCallId, - status: 'error', - content: { - type: 'markdown', - markdown: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`, + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'failed', + content: [ + { + type: 'content', + content: { + type: 'text', + text: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`, + }, }, - }); - } + ], + }); + throw error; } } - #debug(msg: string) { + debug(msg: string) { if (this.config.getDebugMode()) { console.warn(msg); } @@ -604,8 +767,8 @@ function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { if (toolResult.returnDisplay) { if (typeof toolResult.returnDisplay === 'string') { return { - type: 'markdown', - markdown: toolResult.returnDisplay, + type: 'content', + content: { type: 'text', text: toolResult.returnDisplay }, }; } else { return { @@ -620,57 +783,66 @@ function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { } } -function toAcpToolCallConfirmation( - confirmationDetails: ToolCallConfirmationDetails, -): acp.ToolCallConfirmation { - switch (confirmationDetails.type) { - case 'edit': - return { type: 'edit' }; - case 'exec': - return { - type: 'execute', - rootCommand: confirmationDetails.rootCommand, - command: confirmationDetails.command, - }; - case 'mcp': - return { - type: 'mcp', - serverName: confirmationDetails.serverName, - toolName: confirmationDetails.toolName, - toolDisplayName: confirmationDetails.toolDisplayName, - }; - case 'info': - return { - type: 'fetch', - urls: confirmationDetails.urls || [], - description: confirmationDetails.urls?.length - ? null - : confirmationDetails.prompt, - }; - default: { - const unreachable: never = confirmationDetails; - throw new Error(`Unexpected: ${unreachable}`); - } - } -} +const basicPermissionOptions = [ + { + optionId: ToolConfirmationOutcome.ProceedOnce, + name: 'Allow', + kind: 'allow_once', + }, + { + optionId: ToolConfirmationOutcome.Cancel, + name: 'Reject', + kind: 'reject_once', + }, +] as const; -function toToolCallOutcome( - outcome: acp.ToolCallConfirmationOutcome, -): ToolConfirmationOutcome { - switch (outcome) { - case 'allow': - return ToolConfirmationOutcome.ProceedOnce; - case 'alwaysAllow': - return ToolConfirmationOutcome.ProceedAlways; - case 'alwaysAllowMcpServer': - return ToolConfirmationOutcome.ProceedAlwaysServer; - case 'alwaysAllowTool': - return ToolConfirmationOutcome.ProceedAlwaysTool; - case 'reject': - case 'cancel': - return ToolConfirmationOutcome.Cancel; +function toPermissionOptions( + confirmation: ToolCallConfirmationDetails, +): acp.PermissionOption[] { + switch (confirmation.type) { + case 'edit': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: 'Allow All Edits', + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'exec': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: `Always Allow ${confirmation.rootCommand}`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'mcp': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlwaysServer, + name: `Always Allow ${confirmation.serverName}`, + kind: 'allow_always', + }, + { + optionId: ToolConfirmationOutcome.ProceedAlwaysTool, + name: `Always Allow ${confirmation.toolName}`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'info': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: `Always Allow`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; default: { - const unreachable: never = outcome; + const unreachable: never = confirmation; throw new Error(`Unexpected: ${unreachable}`); } } diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 9231f427..069a486d 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -185,7 +185,7 @@ export interface ConfigParameters { model: string; extensionContextFilePaths?: string[]; maxSessionTurns?: number; - experimentalAcp?: boolean; + experimentalZedIntegration?: boolean; listExtensions?: boolean; extensions?: GeminiCLIExtension[]; blockedMcpServers?: Array<{ name: string; extensionName: string }>; @@ -256,7 +256,7 @@ export class Config { private readonly summarizeToolOutput: | Record | undefined; - private readonly experimentalAcp: boolean = false; + private readonly experimentalZedIntegration: boolean = false; private readonly loadMemoryFromIncludeDirectories: boolean = false; private readonly chatCompression: ChatCompressionSettings | undefined; private readonly interactive: boolean; @@ -309,7 +309,8 @@ export class Config { this.model = params.model; this.extensionContextFilePaths = params.extensionContextFilePaths ?? []; this.maxSessionTurns = params.maxSessionTurns ?? -1; - this.experimentalAcp = params.experimentalAcp ?? false; + this.experimentalZedIntegration = + params.experimentalZedIntegration ?? false; this.listExtensions = params.listExtensions ?? false; this._extensions = params.extensions ?? []; this._blockedMcpServers = params.blockedMcpServers ?? []; @@ -621,8 +622,8 @@ export class Config { return this.extensionContextFilePaths; } - getExperimentalAcp(): boolean { - return this.experimentalAcp; + getExperimentalZedIntegration(): boolean { + return this.experimentalZedIntegration; } getListExtensions(): boolean { diff --git a/packages/core/src/core/coreToolScheduler.test.ts b/packages/core/src/core/coreToolScheduler.test.ts index a3a25707..df39c1dc 100644 --- a/packages/core/src/core/coreToolScheduler.test.ts +++ b/packages/core/src/core/coreToolScheduler.test.ts @@ -19,7 +19,7 @@ import { ToolConfirmationPayload, ToolResult, Config, - Icon, + Kind, ApprovalMode, } from '../index.js'; import { Part, PartListUnion } from '@google/genai'; @@ -389,7 +389,7 @@ describe('CoreToolScheduler edit cancellation', () => { 'mockEditTool', 'mockEditTool', 'A mock edit tool', - Icon.Pencil, + Kind.Edit, {}, ); } diff --git a/packages/core/src/test-utils/tools.ts b/packages/core/src/test-utils/tools.ts index b168db9c..7d917b6c 100644 --- a/packages/core/src/test-utils/tools.ts +++ b/packages/core/src/test-utils/tools.ts @@ -7,9 +7,9 @@ import { vi } from 'vitest'; import { BaseTool, - Icon, ToolCallConfirmationDetails, ToolResult, + Kind, } from '../tools/tools.js'; import { Schema, Type } from '@google/genai'; @@ -29,7 +29,7 @@ export class MockTool extends BaseTool<{ [key: string]: unknown }, ToolResult> { properties: { param: { type: Type.STRING } }, }, ) { - super(name, displayName ?? name, description, Icon.Hammer, params); + super(name, displayName ?? name, description, Kind.Other, params); } async execute( diff --git a/packages/core/src/tools/edit.ts b/packages/core/src/tools/edit.ts index e2b517cf..733c1bf8 100644 --- a/packages/core/src/tools/edit.ts +++ b/packages/core/src/tools/edit.ts @@ -9,7 +9,7 @@ import * as path from 'path'; import * as Diff from 'diff'; import { BaseDeclarativeTool, - Icon, + Kind, ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolEditConfirmationDetails, @@ -435,7 +435,7 @@ Expectation for required parameters: 4. NEVER escape \`old_string\` or \`new_string\`, that would break the exact literal text requirement. **Important:** If ANY of the above are not satisfied, the tool will fail. CRITICAL for \`old_string\`: Must uniquely identify the single instance to change. Include at least 3 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations, or does not match exactly, the tool will fail. **Multiple replacements:** Set \`expected_replacements\` to the number of occurrences you want to replace. The tool will replace ALL occurrences that match \`old_string\` exactly. Ensure the number of replacements matches your expectation.`, - Icon.Pencil, + Kind.Edit, { properties: { file_path: { diff --git a/packages/core/src/tools/glob.ts b/packages/core/src/tools/glob.ts index eaedc20f..77a7241f 100644 --- a/packages/core/src/tools/glob.ts +++ b/packages/core/src/tools/glob.ts @@ -11,7 +11,7 @@ import { SchemaValidator } from '../utils/schemaValidator.js'; import { BaseDeclarativeTool, BaseToolInvocation, - Icon, + Kind, ToolInvocation, ToolResult, } from './tools.js'; @@ -248,7 +248,7 @@ export class GlobTool extends BaseDeclarativeTool { GlobTool.Name, 'FindFiles', 'Efficiently finds files matching specific glob patterns (e.g., `src/**/*.ts`, `**/*.md`), returning absolute paths sorted by modification time (newest first). Ideal for quickly locating files based on their name or path structure, especially in large codebases.', - Icon.FileSearch, + Kind.Search, { properties: { pattern: { diff --git a/packages/core/src/tools/grep.ts b/packages/core/src/tools/grep.ts index f8ecdc9c..9d3d638a 100644 --- a/packages/core/src/tools/grep.ts +++ b/packages/core/src/tools/grep.ts @@ -13,7 +13,7 @@ import { globStream } from 'glob'; import { BaseDeclarativeTool, BaseToolInvocation, - Icon, + Kind, ToolInvocation, ToolResult, } from './tools.js'; @@ -543,7 +543,7 @@ export class GrepTool extends BaseDeclarativeTool { GrepTool.Name, 'SearchText', 'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.', - Icon.Regex, + Kind.Search, { properties: { pattern: { diff --git a/packages/core/src/tools/ls.ts b/packages/core/src/tools/ls.ts index 79820246..7a4445a5 100644 --- a/packages/core/src/tools/ls.ts +++ b/packages/core/src/tools/ls.ts @@ -6,7 +6,7 @@ import fs from 'fs'; import path from 'path'; -import { BaseTool, Icon, ToolResult } from './tools.js'; +import { BaseTool, Kind, ToolResult } from './tools.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { makeRelative, shortenPath } from '../utils/paths.js'; import { Config, DEFAULT_FILE_FILTERING_OPTIONS } from '../config/config.js'; @@ -75,7 +75,7 @@ export class LSTool extends BaseTool { LSTool.Name, 'ReadFolder', 'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.', - Icon.Folder, + Kind.Search, { properties: { path: { diff --git a/packages/core/src/tools/mcp-tool.ts b/packages/core/src/tools/mcp-tool.ts index 4b9a9818..59f83db3 100644 --- a/packages/core/src/tools/mcp-tool.ts +++ b/packages/core/src/tools/mcp-tool.ts @@ -10,7 +10,7 @@ import { ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolMcpConfirmationDetails, - Icon, + Kind, } from './tools.js'; import { CallableTool, Part, FunctionCall } from '@google/genai'; @@ -67,7 +67,7 @@ export class DiscoveredMCPTool extends BaseTool { nameOverride ?? generateValidName(serverToolName), `${serverToolName} (${serverName} MCP Server)`, description, - Icon.Hammer, + Kind.Other, parameterSchema, true, // isOutputMarkdown false, // canUpdateOutput diff --git a/packages/core/src/tools/memoryTool.ts b/packages/core/src/tools/memoryTool.ts index f0c95b6a..c8e88c97 100644 --- a/packages/core/src/tools/memoryTool.ts +++ b/packages/core/src/tools/memoryTool.ts @@ -6,10 +6,10 @@ import { BaseTool, + Kind, ToolResult, ToolEditConfirmationDetails, ToolConfirmationOutcome, - Icon, } from './tools.js'; import { FunctionDeclaration } from '@google/genai'; import * as fs from 'fs/promises'; @@ -122,7 +122,7 @@ export class MemoryTool MemoryTool.Name, 'Save Memory', memoryToolDescription, - Icon.LightBulb, + Kind.Think, memoryToolSchemaData.parametersJsonSchema as Record, ); } diff --git a/packages/core/src/tools/read-file.ts b/packages/core/src/tools/read-file.ts index 0c040b66..d10c73d1 100644 --- a/packages/core/src/tools/read-file.ts +++ b/packages/core/src/tools/read-file.ts @@ -10,7 +10,7 @@ import { makeRelative, shortenPath } from '../utils/paths.js'; import { BaseDeclarativeTool, BaseToolInvocation, - Icon, + Kind, ToolInvocation, ToolLocation, ToolResult, @@ -173,7 +173,7 @@ export class ReadFileTool extends BaseDeclarativeTool< ReadFileTool.Name, 'ReadFile', `Reads and returns the content of a specified file. If the file is large, the content will be truncated. The tool's response will clearly indicate if truncation has occurred and will provide details on how to read more of the file using the 'offset' and 'limit' parameters. Handles text, images (PNG, JPG, GIF, WEBP, SVG, BMP), and PDF files. For text files, it can read specific line ranges.`, - Icon.FileSearch, + Kind.Read, { properties: { absolute_path: { diff --git a/packages/core/src/tools/read-many-files.ts b/packages/core/src/tools/read-many-files.ts index 1c92b4f3..5a0799bb 100644 --- a/packages/core/src/tools/read-many-files.ts +++ b/packages/core/src/tools/read-many-files.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { BaseTool, Icon, ToolResult } from './tools.js'; +import { BaseTool, Kind, ToolResult } from './tools.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; import * as path from 'path'; @@ -229,7 +229,7 @@ This tool is useful when you need to understand or analyze a collection of files - When the user asks to "read all files in X directory" or "show me the content of all Y files". Use this tool when the user's query implies needing the content of several files simultaneously for context, analysis, or summarization. For text files, it uses default UTF-8 encoding and a '--- {filePath} ---' separator between file contents. Ensure paths are relative to the target directory. Glob patterns like 'src/**/*.js' are supported. Avoid using for single files if a more specific single-file reading tool is available, unless the user specifically requests to process a list containing just one file via this tool. Other binary files (not explicitly requested as image/PDF) are generally skipped. Default excludes apply to common non-text files (except for explicitly requested images/PDFs) and large dependency directories unless 'useDefaultExcludes' is false.`, - Icon.FileSearch, + Kind.Read, parameterSchema, ); } diff --git a/packages/core/src/tools/shell.ts b/packages/core/src/tools/shell.ts index de9b7c2f..4fa08297 100644 --- a/packages/core/src/tools/shell.ts +++ b/packages/core/src/tools/shell.ts @@ -15,7 +15,7 @@ import { ToolCallConfirmationDetails, ToolExecuteConfirmationDetails, ToolConfirmationOutcome, - Icon, + Kind, } from './tools.js'; import { ToolErrorType } from './tool-error.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; @@ -61,7 +61,7 @@ export class ShellTool extends BaseTool { Signal: Signal number or \`(none)\` if no signal was received. Background PIDs: List of background processes started or \`(none)\`. Process Group PGID: Process group started or \`(none)\``, - Icon.Terminal, + Kind.Execute, { type: 'object', properties: { diff --git a/packages/core/src/tools/tool-registry.ts b/packages/core/src/tools/tool-registry.ts index 17d324b3..02f77727 100644 --- a/packages/core/src/tools/tool-registry.ts +++ b/packages/core/src/tools/tool-registry.ts @@ -5,7 +5,7 @@ */ import { FunctionDeclaration } from '@google/genai'; -import { AnyDeclarativeTool, Icon, ToolResult, BaseTool } from './tools.js'; +import { AnyDeclarativeTool, Kind, ToolResult, BaseTool } from './tools.js'; import { Config } from '../config/config.js'; import { spawn } from 'node:child_process'; import { StringDecoder } from 'node:string_decoder'; @@ -44,7 +44,7 @@ Signal: Signal number or \`(none)\` if no signal was received. name, name, description, - Icon.Hammer, + Kind.Other, parameterSchema, false, // isOutputMarkdown false, // canUpdateOutput diff --git a/packages/core/src/tools/tools.ts b/packages/core/src/tools/tools.ts index 4b13174c..3e7d0647 100644 --- a/packages/core/src/tools/tools.ts +++ b/packages/core/src/tools/tools.ts @@ -145,9 +145,9 @@ export interface ToolBuilder< description: string; /** - * The icon to display when interacting via ACP. + * The kind of tool for categorization and permissions */ - icon: Icon; + kind: Kind; /** * Function declaration schema from @google/genai. @@ -185,7 +185,7 @@ export abstract class DeclarativeTool< readonly name: string, readonly displayName: string, readonly description: string, - readonly icon: Icon, + readonly kind: Kind, readonly parameterSchema: unknown, readonly isOutputMarkdown: boolean = true, readonly canUpdateOutput: boolean = false, @@ -287,7 +287,7 @@ export abstract class BaseTool< readonly name: string, readonly displayName: string, readonly description: string, - readonly icon: Icon, + readonly kind: Kind, readonly parameterSchema: unknown, readonly isOutputMarkdown: boolean = true, readonly canUpdateOutput: boolean = false, @@ -296,7 +296,7 @@ export abstract class BaseTool< name, displayName, description, - icon, + kind, parameterSchema, isOutputMarkdown, canUpdateOutput, @@ -570,15 +570,16 @@ export enum ToolConfirmationOutcome { Cancel = 'cancel', } -export enum Icon { - FileSearch = 'fileSearch', - Folder = 'folder', - Globe = 'globe', - Hammer = 'hammer', - LightBulb = 'lightBulb', - Pencil = 'pencil', - Regex = 'regex', - Terminal = 'terminal', +export enum Kind { + Read = 'read', + Edit = 'edit', + Delete = 'delete', + Move = 'move', + Search = 'search', + Execute = 'execute', + Think = 'think', + Fetch = 'fetch', + Other = 'other', } export interface ToolLocation { diff --git a/packages/core/src/tools/web-fetch.ts b/packages/core/src/tools/web-fetch.ts index 6733c38d..bf8d1968 100644 --- a/packages/core/src/tools/web-fetch.ts +++ b/packages/core/src/tools/web-fetch.ts @@ -10,7 +10,7 @@ import { ToolResult, ToolCallConfirmationDetails, ToolConfirmationOutcome, - Icon, + Kind, } from './tools.js'; import { getErrorMessage } from '../utils/errors.js'; import { Config, ApprovalMode } from '../config/config.js'; @@ -70,7 +70,7 @@ export class WebFetchTool extends BaseTool { WebFetchTool.Name, 'WebFetch', "Processes content from URL(s), including local and private network addresses (e.g., localhost), embedded in a prompt. Include up to 20 URLs and instructions (e.g., summarize, extract specific data) directly in the 'prompt' parameter.", - Icon.Globe, + Kind.Fetch, { properties: { prompt: { diff --git a/packages/core/src/tools/web-search.ts b/packages/core/src/tools/web-search.ts index 8fe29967..54679452 100644 --- a/packages/core/src/tools/web-search.ts +++ b/packages/core/src/tools/web-search.ts @@ -5,7 +5,7 @@ */ import { GroundingMetadata } from '@google/genai'; -import { BaseTool, Icon, ToolResult } from './tools.js'; +import { BaseTool, Kind, ToolResult } from './tools.js'; import { Type } from '@google/genai'; import { SchemaValidator } from '../utils/schemaValidator.js'; @@ -69,7 +69,7 @@ export class WebSearchTool extends BaseTool< WebSearchTool.Name, 'GoogleSearch', 'Performs a web search using Google Search (via the Gemini API) and returns the results. This tool is useful for finding information on the internet based on a query.', - Icon.Globe, + Kind.Search, { type: Type.OBJECT, properties: { diff --git a/packages/core/src/tools/write-file.ts b/packages/core/src/tools/write-file.ts index 72aeba6d..fa1e1301 100644 --- a/packages/core/src/tools/write-file.ts +++ b/packages/core/src/tools/write-file.ts @@ -15,7 +15,8 @@ import { ToolEditConfirmationDetails, ToolConfirmationOutcome, ToolCallConfirmationDetails, - Icon, + Kind, + ToolLocation, } from './tools.js'; import { ToolErrorType } from './tool-error.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; @@ -82,7 +83,7 @@ export class WriteFileTool `Writes content to a specified file in the local filesystem. The user has the ability to modify \`content\`. If modified, this will be stated in the response.`, - Icon.Pencil, + Kind.Edit, { properties: { file_path: { @@ -101,6 +102,10 @@ export class WriteFileTool ); } + toolLocations(params: WriteFileToolParams): ToolLocation[] { + return [{ path: params.file_path }]; + } + validateToolParams(params: WriteFileToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, From f81ccd8d7a996dc3f8b9dbc2fab1e428d482d6a5 Mon Sep 17 00:00:00 2001 From: Shreya Keshive Date: Wed, 13 Aug 2025 12:31:20 -0400 Subject: [PATCH 24/45] Add usage documentation for IDE integration (#6154) --- README.md | 2 +- docs/ide-integration.md | 141 ++++++++++++++++++++++++++++++++++++++++ docs/index.md | 1 + 3 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 docs/ide-integration.md diff --git a/README.md b/README.md index 697142d7..3fb86d12 100644 --- a/README.md +++ b/README.md @@ -214,7 +214,7 @@ Integrate Gemini CLI directly into your GitHub workflows with the [**Gemini CLI ### Advanced Topics - [**Architecture Overview**](./docs/architecture.md) - How Gemini CLI works -- [**IDE Integration**](./docs/extension.md) - VS Code companion +- [**IDE Integration**](./docs/ide-integration.md) - VS Code companion - [**Sandboxing & Security**](./docs/sandbox.md) - Safe execution environments - [**Enterprise Deployment**](./docs/deployment.md) - Docker, system-wide config - [**Telemetry & Monitoring**](./docs/telemetry.md) - Usage tracking diff --git a/docs/ide-integration.md b/docs/ide-integration.md new file mode 100644 index 00000000..a0bd4976 --- /dev/null +++ b/docs/ide-integration.md @@ -0,0 +1,141 @@ +# IDE Integration + +Gemini CLI can integrate with your IDE to provide a more seamless and context-aware experience. This integration allows the CLI to understand your workspace better and enables powerful features like native in-editor diffing. + +Currently, the only supported IDE is [Visual Studio Code](https://code.visualstudio.com/) and other editors that support VS Code extensions. + +## Features + +- **Workspace Context:** The CLI automatically gains awareness of your workspace to provide more relevant and accurate responses. This context includes: + - The **10 most recently accessed files** in your workspace. + - Your active cursor position. + - Any text you have selected (up to a 16KB limit; longer selections will be truncated). + +- **Native Diffing:** When Gemini suggests code modifications, you can view the changes directly within your IDE's native diff viewer. This allows you to review, edit, and accept or reject the suggested changes seamlessly. + +- **VS Code Commands:** You can access Gemini CLI features directly from the VS Code Command Palette (`Cmd+Shift+P` or `Ctrl+Shift+P`): + - `Gemini CLI: Run`: Starts a new Gemini CLI session in the integrated terminal. + - `Gemini CLI: Accept Diff`: Accepts the changes in the active diff editor. + - `Gemini CLI: Close Diff Editor`: Rejects the changes and closes the active diff editor. + - `Gemini CLI: View Third-Party Notices`: Displays the third-party notices for the extension. + +## Installation and Setup + +There are three ways to set up the IDE integration: + +### 1. Automatic Nudge (Recommended) + +When you run Gemini CLI inside a supported editor, it will automatically detect your environment and prompt you to connect. Answering "Yes" will automatically run the necessary setup, which includes installing the companion extension and enabling the connection. + +### 2. Manual Installation from CLI + +If you previously dismissed the prompt or want to install the extension manually, you can run the following command inside Gemini CLI: + +``` +/ide install +``` + +This will find the correct extension for your IDE and install it. + +### 3. Manual Installation from a Marketplace + +You can also install the extension directly from a marketplace. + +- **For Visual Studio Code:** Install from the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=google.gemini-cli-vscode-ide-companion). +- **For VS Code Forks:** To support forks of VS Code, the extension is also published on the [Open VSX Registry](https://open-vsx.org/extension/google/gemini-cli-vscode-ide-companion). Follow your editor's instructions for installing extensions from this registry. + +After any installation method, it's recommended to open a new terminal window to ensure the integration is activated correctly. Once installed, you can use `/ide enable` to connect. + +## Usage + +### Enabling and Disabling + +You can control the IDE integration from within the CLI: + +- To enable the connection to the IDE, run: + ``` + /ide enable + ``` +- To disable the connection, run: + ``` + /ide disable + ``` + +When enabled, Gemini CLI will automatically attempt to connect to the IDE companion extension. + +### Checking the Status + +To check the connection status and see the context the CLI has received from the IDE, run: + +``` +/ide status +``` + +If connected, this command will show the IDE it's connected to and a list of recently opened files it is aware of. + +(Note: The file list is limited to 10 recently accessed files within your workspace and only includes local files on disk.) + +### Working with Diffs + +When you ask Gemini to modify a file, it can open a diff view directly in your editor. + +**To accept a diff**, you can perform any of the following actions: + +- Click the **checkmark icon** in the diff editor's title bar. +- Save the file (e.g., with `Cmd+S` or `Ctrl+S`). +- Open the Command Palette and run **Gemini CLI: Accept Diff**. +- Respond with `yes` in the CLI when prompted. + +**To reject a diff**, you can: + +- Click the **'x' icon** in the diff editor's title bar. +- Close the diff editor tab. +- Open the Command Palette and run **Gemini CLI: Close Diff Editor**. +- Respond with `no` in the CLI when prompted. + +You can also **modify the suggested changes** directly in the diff view before accepting them. + +If you select ‘Yes, allow always’ in the CLI, changes will no longer show up in the IDE as they will be auto-accepted. + +## Using with Sandboxing + +If you are using Gemini CLI within a sandbox, please be aware of the following: + +- **On macOS:** The IDE integration requires network access to communicate with the IDE companion extension. You must use a Seatbelt profile that allows network access. +- **In a Docker Container:** If you run Gemini CLI inside a Docker (or Podman) container, the IDE integration can still connect to the VS Code extension running on your host machine. The CLI is configured to automatically find the IDE server on `host.docker.internal`. No special configuration is usually required, but you may need to ensure your Docker networking setup allows connections from the container to the host. + +## Troubleshooting + +If you encounter issues with IDE integration, here are some common error messages and how to resolve them. + +### Connection Errors + +- **Message:** `🔴 Disconnected: Failed to connect to IDE companion extension for [IDE Name]. Please ensure the extension is running and try restarting your terminal. To install the extension, run /ide install.` + - **Cause:** Gemini CLI could not find the necessary environment variables (`GEMINI_CLI_IDE_WORKSPACE_PATH` or `GEMINI_CLI_IDE_SERVER_PORT`) to connect to the IDE. This usually means the IDE companion extension is not running or did not initialize correctly. + - **Solution:** + 1. Make sure you have installed the **Gemini CLI Companion** extension in your IDE and that it is enabled. + 2. Open a new terminal window in your IDE to ensure it picks up the correct environment. + +- **Message:** `🔴 Disconnected: IDE connection error. The connection was lost unexpectedly. Please try reconnecting by running /ide enable` + - **Cause:** The connection to the IDE companion was lost. + - **Solution:** Run `/ide enable` to try and reconnect. If the issue continues, open a new terminal window or restart your IDE. + +### Configuration Errors + +- **Message:** `🔴 Disconnected: Directory mismatch. Gemini CLI is running in a different location than the open workspace in [IDE Name]. Please run the CLI from the same directory as your project's root folder.` + - **Cause:** The CLI's current working directory is outside the folder or workspace you have open in your IDE. + - **Solution:** `cd` into the same directory that is open in your IDE and restart the CLI. + +- **Message:** `🔴 Disconnected: To use this feature, please open a single workspace folder in [IDE Name] and try again.` + - **Cause:** You have multiple workspace folders open in your IDE, or no folder is open at all. The IDE integration requires a single root workspace folder to operate correctly. + - **Solution:** Open a single project folder in your IDE and restart the CLI. + +### General Errors + +- **Message:** `IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: [List of IDEs]` + - **Cause:** You are running Gemini CLI in a terminal or environment that is not a supported IDE. + - **Solution:** Run Gemini CLI from the integrated terminal of a supported IDE, like VS Code. + +- **Message:** `No installer is available for [IDE Name]. Please install the IDE companion manually from its marketplace.` + - **Cause:** You ran `/ide install`, but the CLI does not have an automated installer for your specific IDE. + - **Solution:** Open your IDE's extension marketplace, search for "Gemini CLI Companion", and install it manually. diff --git a/docs/index.md b/docs/index.md index ecdd5a52..fc0de1ea 100644 --- a/docs/index.md +++ b/docs/index.md @@ -18,6 +18,7 @@ This documentation is organized into the following sections: - **[Configuration](./cli/configuration.md):** Information on configuring the CLI. - **[Checkpointing](./checkpointing.md):** Documentation for the checkpointing feature. - **[Extensions](./extension.md):** How to extend the CLI with new functionality. + - **[IDE Integration](./ide-integration.md):** Connect the CLI to your editor. - **[Telemetry](./telemetry.md):** Overview of telemetry in the CLI. - **Core Details:** Documentation for `packages/core`. - **[Core Introduction](./core/index.md):** Overview of the core component. From 74a13fb535b255797d6c9aa3499acfea6aadc58d Mon Sep 17 00:00:00 2001 From: Tommaso Sciortino Date: Wed, 13 Aug 2025 10:30:15 -0700 Subject: [PATCH 25/45] Document certificate issue workaround (#6157) --- docs/troubleshooting.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index dde2a8ef..9506859a 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -19,6 +19,11 @@ This guide provides solutions to common issues and debugging tips, including top [Google AI Studio](http://aistudio.google.com/app/apikey), which also includes a separate free tier. +- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY` or `unable to get local issuer certificate`** + - **Cause:** You may be on a corporate network with a firewall that intercepts and inspects SSL/TLS traffic. This often requires a custom root CA certificate to be trusted by Node.js. + - **Solution:** Set the `NODE_EXTRA_CA_CERTS` environment variable to the absolute path of your corporate root CA certificate file. + - Example: `export NODE_EXTRA_CA_CERTS=/path/to/your/corporate-ca.crt` + ## Frequently asked questions (FAQs) - **Q: How do I update Gemini CLI to the latest version?** From 9c7fb870c1a7c80741fafdfc6837d4b92e373b2d Mon Sep 17 00:00:00 2001 From: Deepankar Sharma Date: Wed, 13 Aug 2025 13:32:54 -0400 Subject: [PATCH 26/45] Add terminal setup command for Shift+Enter and Ctrl+Enter support (#3289) Co-authored-by: jacob314 --- packages/cli/src/config/keyBindings.ts | 4 + packages/cli/src/gemini.tsx | 3 + .../cli/src/services/BuiltinCommandLoader.ts | 2 + packages/cli/src/ui/App.tsx | 8 +- .../ui/commands/terminalSetupCommand.test.ts | 85 +++++ .../src/ui/commands/terminalSetupCommand.ts | 45 +++ .../cli/src/ui/components/InputPrompt.tsx | 8 +- packages/cli/src/ui/hooks/useFocus.ts | 8 +- packages/cli/src/ui/hooks/useKeypress.test.ts | 11 +- packages/cli/src/ui/hooks/useKeypress.ts | 246 ++++++++++++- .../src/ui/hooks/useKittyKeyboardProtocol.ts | 31 ++ .../cli/src/ui/utils/kittyProtocolDetector.ts | 105 ++++++ .../cli/src/ui/utils/platformConstants.ts | 44 +++ packages/cli/src/ui/utils/terminalSetup.ts | 340 ++++++++++++++++++ .../clearcut-logger/clearcut-logger.ts | 20 ++ .../clearcut-logger/event-metadata-key.ts | 10 + packages/core/src/telemetry/index.ts | 2 + packages/core/src/telemetry/loggers.ts | 19 + packages/core/src/telemetry/types.ts | 16 +- 19 files changed, 989 insertions(+), 18 deletions(-) create mode 100644 packages/cli/src/ui/commands/terminalSetupCommand.test.ts create mode 100644 packages/cli/src/ui/commands/terminalSetupCommand.ts create mode 100644 packages/cli/src/ui/hooks/useKittyKeyboardProtocol.ts create mode 100644 packages/cli/src/ui/utils/kittyProtocolDetector.ts create mode 100644 packages/cli/src/ui/utils/platformConstants.ts create mode 100644 packages/cli/src/ui/utils/terminalSetup.ts diff --git a/packages/cli/src/config/keyBindings.ts b/packages/cli/src/config/keyBindings.ts index 6f4a21a2..640bf9de 100644 --- a/packages/cli/src/config/keyBindings.ts +++ b/packages/cli/src/config/keyBindings.ts @@ -129,20 +129,24 @@ export const defaultKeyBindings: KeyBindingConfig = { // Text input // Original: key.name === 'return' && !key.ctrl && !key.meta && !key.paste + // Must also exclude shift to allow shift+enter for newline [Command.SUBMIT]: [ { key: 'return', ctrl: false, command: false, paste: false, + shift: false, }, ], // Original: key.name === 'return' && (key.ctrl || key.meta || key.paste) // Split into multiple data-driven bindings + // Now also includes shift+enter for multi-line input [Command.NEWLINE]: [ { key: 'return', ctrl: true }, { key: 'return', command: true }, { key: 'return', paste: true }, + { key: 'return', shift: true }, ], // External tools diff --git a/packages/cli/src/gemini.tsx b/packages/cli/src/gemini.tsx index 68f948da..54e58f72 100644 --- a/packages/cli/src/gemini.tsx +++ b/packages/cli/src/gemini.tsx @@ -41,6 +41,7 @@ import { import { validateAuthMethod } from './config/auth.js'; import { setMaxSizedBoxDebugging } from './ui/components/shared/MaxSizedBox.js'; import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js'; +import { detectAndEnableKittyProtocol } from './ui/utils/kittyProtocolDetector.js'; import { checkForUpdates } from './ui/utils/updateCheck.js'; import { handleAutoUpdate } from './utils/handleAutoUpdate.js'; import { appEvents, AppEvent } from './utils/events.js'; @@ -263,6 +264,8 @@ export async function main() { // Render UI, passing necessary config values. Check that there is no command line question. if (config.isInteractive()) { const version = await getCliVersion(); + // Detect and enable Kitty keyboard protocol once at startup + await detectAndEnableKittyProtocol(); setWindowTitle(basename(workspaceRoot), settings); const instance = render( diff --git a/packages/cli/src/services/BuiltinCommandLoader.ts b/packages/cli/src/services/BuiltinCommandLoader.ts index 639bb4d8..7a09cb14 100644 --- a/packages/cli/src/services/BuiltinCommandLoader.ts +++ b/packages/cli/src/services/BuiltinCommandLoader.ts @@ -33,6 +33,7 @@ import { toolsCommand } from '../ui/commands/toolsCommand.js'; import { settingsCommand } from '../ui/commands/settingsCommand.js'; import { vimCommand } from '../ui/commands/vimCommand.js'; import { setupGithubCommand } from '../ui/commands/setupGithubCommand.js'; +import { terminalSetupCommand } from '../ui/commands/terminalSetupCommand.js'; /** * Loads the core, hard-coded slash commands that are an integral part @@ -76,6 +77,7 @@ export class BuiltinCommandLoader implements ICommandLoader { settingsCommand, vimCommand, setupGithubCommand, + terminalSetupCommand, ]; return allDefinitions.filter((cmd): cmd is SlashCommand => cmd !== null); diff --git a/packages/cli/src/ui/App.tsx b/packages/cli/src/ui/App.tsx index 1caabbe0..e8aca549 100644 --- a/packages/cli/src/ui/App.tsx +++ b/packages/cli/src/ui/App.tsx @@ -80,6 +80,7 @@ import { useTextBuffer } from './components/shared/text-buffer.js'; import { useVimMode, VimModeProvider } from './contexts/VimModeContext.js'; import { useVim } from './hooks/vim.js'; import { useKeypress, Key } from './hooks/useKeypress.js'; +import { useKittyKeyboardProtocol } from './hooks/useKittyKeyboardProtocol.js'; import { keyMatchers, Command } from './keyMatchers.js'; import * as fs from 'fs'; import { UpdateNotification } from './components/UpdateNotification.js'; @@ -605,6 +606,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const { elapsedTime, currentLoadingPhrase } = useLoadingIndicator(streamingState); const showAutoAcceptIndicator = useAutoAcceptIndicator({ config }); + const kittyProtocolStatus = useKittyKeyboardProtocol(); const handleExit = useCallback( ( @@ -697,7 +699,11 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { ], ); - useKeypress(handleGlobalKeypress, { isActive: true }); + useKeypress(handleGlobalKeypress, { + isActive: true, + kittyProtocolEnabled: kittyProtocolStatus.enabled, + config, + }); useEffect(() => { if (config) { diff --git a/packages/cli/src/ui/commands/terminalSetupCommand.test.ts b/packages/cli/src/ui/commands/terminalSetupCommand.test.ts new file mode 100644 index 00000000..85f8735e --- /dev/null +++ b/packages/cli/src/ui/commands/terminalSetupCommand.test.ts @@ -0,0 +1,85 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { terminalSetupCommand } from './terminalSetupCommand.js'; +import * as terminalSetupModule from '../utils/terminalSetup.js'; +import { CommandContext } from './types.js'; + +vi.mock('../utils/terminalSetup.js'); + +describe('terminalSetupCommand', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('should have correct metadata', () => { + expect(terminalSetupCommand.name).toBe('terminal-setup'); + expect(terminalSetupCommand.description).toContain('multiline input'); + expect(terminalSetupCommand.kind).toBe('built-in'); + }); + + it('should return success message when terminal setup succeeds', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockResolvedValue({ + success: true, + message: 'Terminal configured successfully', + }); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: 'Terminal configured successfully', + messageType: 'info', + }); + }); + + it('should append restart message when terminal setup requires restart', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockResolvedValue({ + success: true, + message: 'Terminal configured successfully', + requiresRestart: true, + }); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: + 'Terminal configured successfully\n\nPlease restart your terminal for the changes to take effect.', + messageType: 'info', + }); + }); + + it('should return error message when terminal setup fails', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockResolvedValue({ + success: false, + message: 'Failed to detect terminal', + }); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: 'Failed to detect terminal', + messageType: 'error', + }); + }); + + it('should handle exceptions from terminal setup', async () => { + vi.spyOn(terminalSetupModule, 'terminalSetup').mockRejectedValue( + new Error('Unexpected error'), + ); + + const result = await terminalSetupCommand.action({} as CommandContext, ''); + + expect(result).toEqual({ + type: 'message', + content: 'Failed to configure terminal: Error: Unexpected error', + messageType: 'error', + }); + }); +}); diff --git a/packages/cli/src/ui/commands/terminalSetupCommand.ts b/packages/cli/src/ui/commands/terminalSetupCommand.ts new file mode 100644 index 00000000..11520c0e --- /dev/null +++ b/packages/cli/src/ui/commands/terminalSetupCommand.ts @@ -0,0 +1,45 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { MessageActionReturn, SlashCommand, CommandKind } from './types.js'; +import { terminalSetup } from '../utils/terminalSetup.js'; + +/** + * Command to configure terminal keybindings for multiline input support. + * + * This command automatically detects and configures VS Code, Cursor, and Windsurf + * to support Shift+Enter and Ctrl+Enter for multiline input. + */ +export const terminalSetupCommand: SlashCommand = { + name: 'terminal-setup', + description: + 'Configure terminal keybindings for multiline input (VS Code, Cursor, Windsurf)', + kind: CommandKind.BUILT_IN, + + action: async (): Promise => { + try { + const result = await terminalSetup(); + + let content = result.message; + if (result.requiresRestart) { + content += + '\n\nPlease restart your terminal for the changes to take effect.'; + } + + return { + type: 'message', + content, + messageType: result.success ? 'info' : 'error', + }; + } catch (error) { + return { + type: 'message', + content: `Failed to configure terminal: ${error}`, + messageType: 'error', + }; + } + }, +}; diff --git a/packages/cli/src/ui/components/InputPrompt.tsx b/packages/cli/src/ui/components/InputPrompt.tsx index f53d255f..94cbcf1b 100644 --- a/packages/cli/src/ui/components/InputPrompt.tsx +++ b/packages/cli/src/ui/components/InputPrompt.tsx @@ -17,6 +17,7 @@ import { useShellHistory } from '../hooks/useShellHistory.js'; import { useReverseSearchCompletion } from '../hooks/useReverseSearchCompletion.js'; import { useCommandCompletion } from '../hooks/useCommandCompletion.js'; import { useKeypress, Key } from '../hooks/useKeypress.js'; +import { useKittyKeyboardProtocol } from '../hooks/useKittyKeyboardProtocol.js'; import { keyMatchers, Command } from '../keyMatchers.js'; import { CommandContext, SlashCommand } from '../commands/types.js'; import { Config } from '@google/gemini-cli-core'; @@ -66,6 +67,7 @@ export const InputPrompt: React.FC = ({ const [escPressCount, setEscPressCount] = useState(0); const [showEscapePrompt, setShowEscapePrompt] = useState(false); const escapeTimerRef = useRef(null); + const kittyProtocolStatus = useKittyKeyboardProtocol(); const [dirs, setDirs] = useState( config.getWorkspaceContext().getDirectories(), @@ -525,7 +527,11 @@ export const InputPrompt: React.FC = ({ ], ); - useKeypress(handleInput, { isActive: true }); + useKeypress(handleInput, { + isActive: true, + kittyProtocolEnabled: kittyProtocolStatus.enabled, + config, + }); const linesToRender = buffer.viewportVisualLines; const [cursorVisualRowAbsolute, cursorVisualColAbsolute] = diff --git a/packages/cli/src/ui/hooks/useFocus.ts b/packages/cli/src/ui/hooks/useFocus.ts index 6c9a6daa..8a7f9f6c 100644 --- a/packages/cli/src/ui/hooks/useFocus.ts +++ b/packages/cli/src/ui/hooks/useFocus.ts @@ -8,12 +8,12 @@ import { useStdin, useStdout } from 'ink'; import { useEffect, useState } from 'react'; // ANSI escape codes to enable/disable terminal focus reporting -const ENABLE_FOCUS_REPORTING = '\x1b[?1004h'; -const DISABLE_FOCUS_REPORTING = '\x1b[?1004l'; +export const ENABLE_FOCUS_REPORTING = '\x1b[?1004h'; +export const DISABLE_FOCUS_REPORTING = '\x1b[?1004l'; // ANSI escape codes for focus events -const FOCUS_IN = '\x1b[I'; -const FOCUS_OUT = '\x1b[O'; +export const FOCUS_IN = '\x1b[I'; +export const FOCUS_OUT = '\x1b[O'; export const useFocus = () => { const { stdin } = useStdin(); diff --git a/packages/cli/src/ui/hooks/useKeypress.test.ts b/packages/cli/src/ui/hooks/useKeypress.test.ts index a30eabf2..946ee054 100644 --- a/packages/cli/src/ui/hooks/useKeypress.test.ts +++ b/packages/cli/src/ui/hooks/useKeypress.test.ts @@ -134,9 +134,14 @@ describe('useKeypress', () => { expect(onKeypress).not.toHaveBeenCalled(); }); - it('should listen for keypress when active', () => { + it.each([ + { key: { name: 'a', sequence: 'a' } }, + { key: { name: 'left', sequence: '\x1b[D' } }, + { key: { name: 'right', sequence: '\x1b[C' } }, + { key: { name: 'up', sequence: '\x1b[A' } }, + { key: { name: 'down', sequence: '\x1b[B' } }, + ])('should listen for keypress when active for key $key.name', ({ key }) => { renderHook(() => useKeypress(onKeypress, { isActive: true })); - const key = { name: 'a', sequence: 'a' }; act(() => stdin.pressKey(key)); expect(onKeypress).toHaveBeenCalledWith(expect.objectContaining(key)); }); @@ -187,7 +192,7 @@ describe('useKeypress', () => { }, isLegacy: true, }, - ])('Paste Handling in $description', ({ setup, isLegacy }) => { + ])('in $description', ({ setup, isLegacy }) => { beforeEach(() => { setup(); stdin.setLegacy(isLegacy); diff --git a/packages/cli/src/ui/hooks/useKeypress.ts b/packages/cli/src/ui/hooks/useKeypress.ts index 6c2b7e8f..920270ee 100644 --- a/packages/cli/src/ui/hooks/useKeypress.ts +++ b/packages/cli/src/ui/hooks/useKeypress.ts @@ -8,6 +8,21 @@ import { useEffect, useRef } from 'react'; import { useStdin } from 'ink'; import readline from 'readline'; import { PassThrough } from 'stream'; +import { + KITTY_CTRL_C, + BACKSLASH_ENTER_DETECTION_WINDOW_MS, + MAX_KITTY_SEQUENCE_LENGTH, +} from '../utils/platformConstants.js'; +import { + KittySequenceOverflowEvent, + logKittySequenceOverflow, + Config, +} from '@google/gemini-cli-core'; +import { FOCUS_IN, FOCUS_OUT } from './useFocus.js'; + +const ESC = '\u001B'; +export const PASTE_MODE_PREFIX = `${ESC}[200~`; +export const PASTE_MODE_SUFFIX = `${ESC}[201~`; export interface Key { name: string; @@ -16,6 +31,7 @@ export interface Key { shift: boolean; paste: boolean; sequence: string; + kittyProtocol?: boolean; } /** @@ -30,10 +46,16 @@ export interface Key { * @param onKeypress - The callback function to execute on each keypress. * @param options - Options to control the hook's behavior. * @param options.isActive - Whether the hook should be actively listening for input. + * @param options.kittyProtocolEnabled - Whether Kitty keyboard protocol is enabled. + * @param options.config - Optional config for telemetry logging. */ export function useKeypress( onKeypress: (key: Key) => void, - { isActive }: { isActive: boolean }, + { + isActive, + kittyProtocolEnabled = false, + config, + }: { isActive: boolean; kittyProtocolEnabled?: boolean; config?: Config }, ) { const { stdin, setRawMode } = useStdin(); const onKeypressRef = useRef(onKeypress); @@ -64,8 +86,210 @@ export function useKeypress( let isPaste = false; let pasteBuffer = Buffer.alloc(0); + let kittySequenceBuffer = ''; + let backslashTimeout: NodeJS.Timeout | null = null; + let waitingForEnterAfterBackslash = false; + + // Parse Kitty protocol sequences + const parseKittySequence = (sequence: string): Key | null => { + // Match CSI ; u or ~ + // Format: ESC [ ; u/~ + const kittyPattern = new RegExp(`^${ESC}\\[(\\d+)(;(\\d+))?([u~])$`); + const match = sequence.match(kittyPattern); + if (!match) return null; + + const keyCode = parseInt(match[1], 10); + const modifiers = match[3] ? parseInt(match[3], 10) : 1; + + // Decode modifiers (subtract 1 as per Kitty protocol spec) + const modifierBits = modifiers - 1; + const shift = (modifierBits & 1) === 1; + const alt = (modifierBits & 2) === 2; + const ctrl = (modifierBits & 4) === 4; + + // Handle Escape key (code 27) + if (keyCode === 27) { + return { + name: 'escape', + ctrl, + meta: alt, + shift, + paste: false, + sequence, + kittyProtocol: true, + }; + } + + // Handle Enter key (code 13) + if (keyCode === 13) { + return { + name: 'return', + ctrl, + meta: alt, + shift, + paste: false, + sequence, + kittyProtocol: true, + }; + } + + // Handle Ctrl+letter combinations (a-z) + // ASCII codes: a=97, b=98, c=99, ..., z=122 + if (keyCode >= 97 && keyCode <= 122 && ctrl) { + const letter = String.fromCharCode(keyCode); + return { + name: letter, + ctrl: true, + meta: alt, + shift, + paste: false, + sequence, + kittyProtocol: true, + }; + } + + // Handle other keys as needed + return null; + }; const handleKeypress = (_: unknown, key: Key) => { + // Handle VS Code's backslash+return pattern (Shift+Enter) + if (key.name === 'return' && waitingForEnterAfterBackslash) { + // Cancel the timeout since we got the Enter + if (backslashTimeout) { + clearTimeout(backslashTimeout); + backslashTimeout = null; + } + waitingForEnterAfterBackslash = false; + + // Convert to Shift+Enter + onKeypressRef.current({ + ...key, + shift: true, + sequence: '\\\r', // VS Code's Shift+Enter representation + }); + return; + } + + // Handle backslash - hold it to see if Enter follows + if (key.sequence === '\\' && !key.name) { + // Don't pass through the backslash yet - wait to see if Enter follows + waitingForEnterAfterBackslash = true; + + // Set up a timeout to pass through the backslash if no Enter follows + backslashTimeout = setTimeout(() => { + waitingForEnterAfterBackslash = false; + backslashTimeout = null; + // Pass through the backslash since no Enter followed + onKeypressRef.current(key); + }, BACKSLASH_ENTER_DETECTION_WINDOW_MS); + + return; + } + + // If we're waiting for Enter after backslash but got something else, + // pass through the backslash first, then the new key + if (waitingForEnterAfterBackslash && key.name !== 'return') { + if (backslashTimeout) { + clearTimeout(backslashTimeout); + backslashTimeout = null; + } + waitingForEnterAfterBackslash = false; + + // Pass through the backslash that was held + onKeypressRef.current({ + name: '', + sequence: '\\', + ctrl: false, + meta: false, + shift: false, + paste: false, + }); + + // Then continue processing the current key normally + } + + // If readline has already identified an arrow key, pass it through + // immediately, bypassing the Kitty protocol sequence buffering. + if (['up', 'down', 'left', 'right'].includes(key.name)) { + onKeypressRef.current(key); + return; + } + + // Always pass through Ctrl+C immediately, regardless of protocol state + // Check both standard format and Kitty protocol sequence + if ( + (key.ctrl && key.name === 'c') || + key.sequence === `${ESC}${KITTY_CTRL_C}` + ) { + kittySequenceBuffer = ''; + // If it's the Kitty sequence, create a proper key object + if (key.sequence === `${ESC}${KITTY_CTRL_C}`) { + onKeypressRef.current({ + name: 'c', + ctrl: true, + meta: false, + shift: false, + paste: false, + sequence: key.sequence, + kittyProtocol: true, + }); + } else { + onKeypressRef.current(key); + } + return; + } + + // If Kitty protocol is enabled, handle CSI sequences + if (kittyProtocolEnabled) { + // If we have a buffer or this starts a CSI sequence + if ( + kittySequenceBuffer || + (key.sequence.startsWith(`${ESC}[`) && + !key.sequence.startsWith(PASTE_MODE_PREFIX) && + !key.sequence.startsWith(PASTE_MODE_SUFFIX) && + !key.sequence.startsWith(FOCUS_IN) && + !key.sequence.startsWith(FOCUS_OUT)) + ) { + kittySequenceBuffer += key.sequence; + + // Try to parse the buffer as a Kitty sequence + const kittyKey = parseKittySequence(kittySequenceBuffer); + if (kittyKey) { + kittySequenceBuffer = ''; + onKeypressRef.current(kittyKey); + return; + } + + if (config?.getDebugMode()) { + const codes = Array.from(kittySequenceBuffer).map((ch) => + ch.charCodeAt(0), + ); + // Unless the user is sshing over a slow connection, this likely + // indicates this is not a kitty sequence but we have incorrectly + // interpreted it as such. See the examples above for sequences + // such as FOCUS_IN that are not Kitty sequences. + console.warn('Kitty sequence buffer has char codes:', codes); + } + + // If buffer doesn't match expected pattern and is getting long, flush it + if (kittySequenceBuffer.length > MAX_KITTY_SEQUENCE_LENGTH) { + // Log telemetry for buffer overflow + if (config) { + const event = new KittySequenceOverflowEvent( + kittySequenceBuffer.length, + kittySequenceBuffer, + ); + logKittySequenceOverflow(config, event); + } + // Not a Kitty sequence, treat as regular key + kittySequenceBuffer = ''; + } else { + // Wait for more characters + return; + } + } + } if (key.name === 'paste-start') { isPaste = true; } else if (key.name === 'paste-end') { @@ -84,7 +308,7 @@ export function useKeypress( pasteBuffer = Buffer.concat([pasteBuffer, Buffer.from(key.sequence)]); } else { // Handle special keys - if (key.name === 'return' && key.sequence === '\x1B\r') { + if (key.name === 'return' && key.sequence === `${ESC}\r`) { key.meta = true; } onKeypressRef.current({ ...key, paste: isPaste }); @@ -93,13 +317,13 @@ export function useKeypress( }; const handleRawKeypress = (data: Buffer) => { - const PASTE_MODE_PREFIX = Buffer.from('\x1B[200~'); - const PASTE_MODE_SUFFIX = Buffer.from('\x1B[201~'); + const pasteModePrefixBuffer = Buffer.from(PASTE_MODE_PREFIX); + const pasteModeSuffixBuffer = Buffer.from(PASTE_MODE_SUFFIX); let pos = 0; while (pos < data.length) { - const prefixPos = data.indexOf(PASTE_MODE_PREFIX, pos); - const suffixPos = data.indexOf(PASTE_MODE_SUFFIX, pos); + const prefixPos = data.indexOf(pasteModePrefixBuffer, pos); + const suffixPos = data.indexOf(pasteModeSuffixBuffer, pos); // Determine which marker comes first, if any. const isPrefixNext = @@ -115,7 +339,7 @@ export function useKeypress( } else if (isSuffixNext) { nextMarkerPos = suffixPos; } - markerLength = PASTE_MODE_SUFFIX.length; + markerLength = pasteModeSuffixBuffer.length; if (nextMarkerPos === -1) { keypressStream.write(data.slice(pos)); @@ -170,6 +394,12 @@ export function useKeypress( rl.close(); setRawMode(false); + // Clean up any pending backslash timeout + if (backslashTimeout) { + clearTimeout(backslashTimeout); + backslashTimeout = null; + } + // If we are in the middle of a paste, send what we have. if (isPaste) { onKeypressRef.current({ @@ -183,5 +413,5 @@ export function useKeypress( pasteBuffer = Buffer.alloc(0); } }; - }, [isActive, stdin, setRawMode]); + }, [isActive, stdin, setRawMode, kittyProtocolEnabled, config]); } diff --git a/packages/cli/src/ui/hooks/useKittyKeyboardProtocol.ts b/packages/cli/src/ui/hooks/useKittyKeyboardProtocol.ts new file mode 100644 index 00000000..53c7566c --- /dev/null +++ b/packages/cli/src/ui/hooks/useKittyKeyboardProtocol.ts @@ -0,0 +1,31 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { useState } from 'react'; +import { + isKittyProtocolEnabled, + isKittyProtocolSupported, +} from '../utils/kittyProtocolDetector.js'; + +export interface KittyProtocolStatus { + supported: boolean; + enabled: boolean; + checking: boolean; +} + +/** + * Hook that returns the cached Kitty keyboard protocol status. + * Detection is done once at app startup to avoid repeated queries. + */ +export function useKittyKeyboardProtocol(): KittyProtocolStatus { + const [status] = useState({ + supported: isKittyProtocolSupported(), + enabled: isKittyProtocolEnabled(), + checking: false, + }); + + return status; +} diff --git a/packages/cli/src/ui/utils/kittyProtocolDetector.ts b/packages/cli/src/ui/utils/kittyProtocolDetector.ts new file mode 100644 index 00000000..5d77943a --- /dev/null +++ b/packages/cli/src/ui/utils/kittyProtocolDetector.ts @@ -0,0 +1,105 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +let detectionComplete = false; +let protocolSupported = false; +let protocolEnabled = false; + +/** + * Detects Kitty keyboard protocol support. + * Definitive document about this protocol lives at https://sw.kovidgoyal.net/kitty/keyboard-protocol/ + * This function should be called once at app startup. + */ +export async function detectAndEnableKittyProtocol(): Promise { + if (detectionComplete) { + return protocolSupported; + } + + return new Promise((resolve) => { + if (!process.stdin.isTTY || !process.stdout.isTTY) { + detectionComplete = true; + resolve(false); + return; + } + + const originalRawMode = process.stdin.isRaw; + if (!originalRawMode) { + process.stdin.setRawMode(true); + } + + let responseBuffer = ''; + let progressiveEnhancementReceived = false; + let checkFinished = false; + + const handleData = (data: Buffer) => { + responseBuffer += data.toString(); + + // Check for progressive enhancement response (CSI ? u) + if (responseBuffer.includes('\x1b[?') && responseBuffer.includes('u')) { + progressiveEnhancementReceived = true; + } + + // Check for device attributes response (CSI ? c) + if (responseBuffer.includes('\x1b[?') && responseBuffer.includes('c')) { + if (!checkFinished) { + checkFinished = true; + process.stdin.removeListener('data', handleData); + + if (!originalRawMode) { + process.stdin.setRawMode(false); + } + + if (progressiveEnhancementReceived) { + // Enable the protocol + process.stdout.write('\x1b[>1u'); + protocolSupported = true; + protocolEnabled = true; + + // Set up cleanup on exit + process.on('exit', disableProtocol); + process.on('SIGTERM', disableProtocol); + } + + detectionComplete = true; + resolve(protocolSupported); + } + } + }; + + process.stdin.on('data', handleData); + + // Send queries + process.stdout.write('\x1b[?u'); // Query progressive enhancement + process.stdout.write('\x1b[c'); // Query device attributes + + // Timeout after 50ms + setTimeout(() => { + if (!checkFinished) { + process.stdin.removeListener('data', handleData); + if (!originalRawMode) { + process.stdin.setRawMode(false); + } + detectionComplete = true; + resolve(false); + } + }, 50); + }); +} + +function disableProtocol() { + if (protocolEnabled) { + process.stdout.write('\x1b[ ; u/~ + * Example: \x1b[13;2u (Shift+Enter) = 8 chars + * Longest reasonable: \x1b[127;15~ = 11 chars (Del with all modifiers) + * We use 12 to provide a small buffer. + */ +export const MAX_KITTY_SEQUENCE_LENGTH = 12; diff --git a/packages/cli/src/ui/utils/terminalSetup.ts b/packages/cli/src/ui/utils/terminalSetup.ts new file mode 100644 index 00000000..7f944847 --- /dev/null +++ b/packages/cli/src/ui/utils/terminalSetup.ts @@ -0,0 +1,340 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Terminal setup utility for configuring Shift+Enter and Ctrl+Enter support. + * + * This module provides automatic detection and configuration of various terminal + * emulators to support multiline input through modified Enter keys. + * + * Supported terminals: + * - VS Code: Configures keybindings.json to send \\\r\n + * - Cursor: Configures keybindings.json to send \\\r\n (VS Code fork) + * - Windsurf: Configures keybindings.json to send \\\r\n (VS Code fork) + * + * For VS Code and its forks: + * - Shift+Enter: Sends \\\r\n (backslash followed by CRLF) + * - Ctrl+Enter: Sends \\\r\n (backslash followed by CRLF) + * + * The module will not modify existing shift+enter or ctrl+enter keybindings + * to avoid conflicts with user customizations. + */ + +import { promises as fs } from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { exec } from 'child_process'; +import { promisify } from 'util'; +import { isKittyProtocolEnabled } from './kittyProtocolDetector.js'; +import { VSCODE_SHIFT_ENTER_SEQUENCE } from './platformConstants.js'; + +const execAsync = promisify(exec); + +/** + * Removes single-line JSON comments (// ...) from a string to allow parsing + * VS Code style JSON files that may contain comments. + */ +function stripJsonComments(content: string): string { + // Remove single-line comments (// ...) + return content.replace(/^\s*\/\/.*$/gm, ''); +} + +export interface TerminalSetupResult { + success: boolean; + message: string; + requiresRestart?: boolean; +} + +type SupportedTerminal = 'vscode' | 'cursor' | 'windsurf'; + +// Terminal detection +async function detectTerminal(): Promise { + const termProgram = process.env.TERM_PROGRAM; + + // Check VS Code and its forks - check forks first to avoid false positives + // Check for Cursor-specific indicators + if ( + process.env.CURSOR_TRACE_ID || + process.env.VSCODE_GIT_ASKPASS_MAIN?.toLowerCase().includes('cursor') + ) { + return 'cursor'; + } + // Check for Windsurf-specific indicators + if (process.env.VSCODE_GIT_ASKPASS_MAIN?.toLowerCase().includes('windsurf')) { + return 'windsurf'; + } + // Check VS Code last since forks may also set VSCODE env vars + if (termProgram === 'vscode' || process.env.VSCODE_GIT_IPC_HANDLE) { + return 'vscode'; + } + + // Check parent process name + if (os.platform() !== 'win32') { + try { + const { stdout } = await execAsync('ps -o comm= -p $PPID'); + const parentName = stdout.trim(); + + // Check forks before VS Code to avoid false positives + if (parentName.includes('windsurf') || parentName.includes('Windsurf')) + return 'windsurf'; + if (parentName.includes('cursor') || parentName.includes('Cursor')) + return 'cursor'; + if (parentName.includes('code') || parentName.includes('Code')) + return 'vscode'; + } catch (error) { + // Continue detection even if process check fails + console.debug('Parent process detection failed:', error); + } + } + + return null; +} + +// Backup file helper +async function backupFile(filePath: string): Promise { + try { + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const backupPath = `${filePath}.backup.${timestamp}`; + await fs.copyFile(filePath, backupPath); + } catch (error) { + // Log backup errors but continue with operation + console.warn(`Failed to create backup of ${filePath}:`, error); + } +} + +// Helper function to get VS Code-style config directory +function getVSCodeStyleConfigDir(appName: string): string | null { + const platform = os.platform(); + + if (platform === 'darwin') { + return path.join( + os.homedir(), + 'Library', + 'Application Support', + appName, + 'User', + ); + } else if (platform === 'win32') { + if (!process.env.APPDATA) { + return null; + } + return path.join(process.env.APPDATA, appName, 'User'); + } else { + return path.join(os.homedir(), '.config', appName, 'User'); + } +} + +// Generic VS Code-style terminal configuration +async function configureVSCodeStyle( + terminalName: string, + appName: string, +): Promise { + const configDir = getVSCodeStyleConfigDir(appName); + + if (!configDir) { + return { + success: false, + message: `Could not determine ${terminalName} config path on Windows: APPDATA environment variable is not set.`, + }; + } + + const keybindingsFile = path.join(configDir, 'keybindings.json'); + + try { + await fs.mkdir(configDir, { recursive: true }); + + let keybindings: unknown[] = []; + try { + const content = await fs.readFile(keybindingsFile, 'utf8'); + await backupFile(keybindingsFile); + try { + const cleanContent = stripJsonComments(content); + const parsedContent = JSON.parse(cleanContent); + if (!Array.isArray(parsedContent)) { + return { + success: false, + message: + `${terminalName} keybindings.json exists but is not a valid JSON array. ` + + `Please fix the file manually or delete it to allow automatic configuration.\n` + + `File: ${keybindingsFile}`, + }; + } + keybindings = parsedContent; + } catch (parseError) { + return { + success: false, + message: + `Failed to parse ${terminalName} keybindings.json. The file contains invalid JSON.\n` + + `Please fix the file manually or delete it to allow automatic configuration.\n` + + `File: ${keybindingsFile}\n` + + `Error: ${parseError}`, + }; + } + } catch { + // File doesn't exist, will create new one + } + + const shiftEnterBinding = { + key: 'shift+enter', + command: 'workbench.action.terminal.sendSequence', + when: 'terminalFocus', + args: { text: VSCODE_SHIFT_ENTER_SEQUENCE }, + }; + + const ctrlEnterBinding = { + key: 'ctrl+enter', + command: 'workbench.action.terminal.sendSequence', + when: 'terminalFocus', + args: { text: VSCODE_SHIFT_ENTER_SEQUENCE }, + }; + + // Check if ANY shift+enter or ctrl+enter bindings already exist + const existingShiftEnter = keybindings.find((kb) => { + const binding = kb as { key?: string }; + return binding.key === 'shift+enter'; + }); + + const existingCtrlEnter = keybindings.find((kb) => { + const binding = kb as { key?: string }; + return binding.key === 'ctrl+enter'; + }); + + if (existingShiftEnter || existingCtrlEnter) { + const messages: string[] = []; + if (existingShiftEnter) { + messages.push(`- Shift+Enter binding already exists`); + } + if (existingCtrlEnter) { + messages.push(`- Ctrl+Enter binding already exists`); + } + return { + success: false, + message: + `Existing keybindings detected. Will not modify to avoid conflicts.\n` + + messages.join('\n') + + '\n' + + `Please check and modify manually if needed: ${keybindingsFile}`, + }; + } + + // Check if our specific bindings already exist + const hasOurShiftEnter = keybindings.some((kb) => { + const binding = kb as { + command?: string; + args?: { text?: string }; + key?: string; + }; + return ( + binding.key === 'shift+enter' && + binding.command === 'workbench.action.terminal.sendSequence' && + binding.args?.text === '\\\r\n' + ); + }); + + const hasOurCtrlEnter = keybindings.some((kb) => { + const binding = kb as { + command?: string; + args?: { text?: string }; + key?: string; + }; + return ( + binding.key === 'ctrl+enter' && + binding.command === 'workbench.action.terminal.sendSequence' && + binding.args?.text === '\\\r\n' + ); + }); + + if (!hasOurShiftEnter || !hasOurCtrlEnter) { + if (!hasOurShiftEnter) keybindings.unshift(shiftEnterBinding); + if (!hasOurCtrlEnter) keybindings.unshift(ctrlEnterBinding); + + await fs.writeFile(keybindingsFile, JSON.stringify(keybindings, null, 4)); + return { + success: true, + message: `Added Shift+Enter and Ctrl+Enter keybindings to ${terminalName}.\nModified: ${keybindingsFile}`, + requiresRestart: true, + }; + } else { + return { + success: true, + message: `${terminalName} keybindings already configured.`, + }; + } + } catch (error) { + return { + success: false, + message: `Failed to configure ${terminalName}.\nFile: ${keybindingsFile}\nError: ${error}`, + }; + } +} + +// Terminal-specific configuration functions + +async function configureVSCode(): Promise { + return configureVSCodeStyle('VS Code', 'Code'); +} + +async function configureCursor(): Promise { + return configureVSCodeStyle('Cursor', 'Cursor'); +} + +async function configureWindsurf(): Promise { + return configureVSCodeStyle('Windsurf', 'Windsurf'); +} + +/** + * Main terminal setup function that detects and configures the current terminal. + * + * This function: + * 1. Detects the current terminal emulator + * 2. Applies appropriate configuration for Shift+Enter and Ctrl+Enter support + * 3. Creates backups of configuration files before modifying them + * + * @returns Promise Result object with success status and message + * + * @example + * const result = await terminalSetup(); + * if (result.success) { + * console.log(result.message); + * if (result.requiresRestart) { + * console.log('Please restart your terminal'); + * } + * } + */ +export async function terminalSetup(): Promise { + // Check if terminal already has optimal keyboard support + if (isKittyProtocolEnabled()) { + return { + success: true, + message: + 'Your terminal is already configured for an optimal experience with multiline input (Shift+Enter and Ctrl+Enter).', + }; + } + + const terminal = await detectTerminal(); + + if (!terminal) { + return { + success: false, + message: + 'Could not detect terminal type. Supported terminals: VS Code, Cursor, and Windsurf.', + }; + } + + switch (terminal) { + case 'vscode': + return configureVSCode(); + case 'cursor': + return configureCursor(); + case 'windsurf': + return configureWindsurf(); + default: + return { + success: false, + message: `Terminal "${terminal}" is not supported yet.`, + }; + } +} diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index 0c13e864..9450f06d 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -19,6 +19,7 @@ import { SlashCommandEvent, MalformedJsonResponseEvent, IdeConnectionEvent, + KittySequenceOverflowEvent, } from '../types.js'; import { EventMetadataKey } from './event-metadata-key.js'; import { Config } from '../../config/config.js'; @@ -43,6 +44,7 @@ const next_speaker_check_event_name = 'next_speaker_check'; const slash_command_event_name = 'slash_command'; const malformed_json_response_event_name = 'malformed_json_response'; const ide_connection_event_name = 'ide_connection'; +const kitty_sequence_overflow_event_name = 'kitty_sequence_overflow'; export interface LogResponse { nextRequestWaitMs?: number; @@ -675,6 +677,24 @@ export class ClearcutLogger { this.flushIfNeeded(); } + logKittySequenceOverflowEvent(event: KittySequenceOverflowEvent): void { + const data: EventValue[] = [ + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_SEQUENCE_LENGTH, + value: event.sequence_length.toString(), + }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE, + value: event.truncated_sequence, + }, + ]; + + this.enqueueLogEvent( + this.createLogEvent(kitty_sequence_overflow_event_name, data), + ); + this.flushIfNeeded(); + } + logEndSessionEvent(event: EndSessionEvent): void { const data: EventValue[] = [ { diff --git a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts index 9dae3e0d..cb4172ed 100644 --- a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts +++ b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts @@ -212,6 +212,16 @@ export enum EventMetadataKey { // Logs user removed lines in edit/write tool response. GEMINI_CLI_USER_REMOVED_LINES = 50, + + // ========================================================================== + // Kitty Sequence Overflow Event Keys + // =========================================================================== + + // Logs the length of the kitty sequence that overflowed. + GEMINI_CLI_KITTY_SEQUENCE_LENGTH = 53, + + // Logs the truncated kitty sequence. + GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE = 52, } export function getEventMetadataKey( diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts index 1663abdf..0f343ab3 100644 --- a/packages/core/src/telemetry/index.ts +++ b/packages/core/src/telemetry/index.ts @@ -27,6 +27,7 @@ export { logApiResponse, logFlashFallback, logSlashCommand, + logKittySequenceOverflow, } from './loggers.js'; export { StartSessionEvent, @@ -39,6 +40,7 @@ export { TelemetryEvent, FlashFallbackEvent, SlashCommandEvent, + KittySequenceOverflowEvent, makeSlashCommandEvent, SlashCommandStatus, } from './types.js'; diff --git a/packages/core/src/telemetry/loggers.ts b/packages/core/src/telemetry/loggers.ts index e3726ccb..d7a81203 100644 --- a/packages/core/src/telemetry/loggers.ts +++ b/packages/core/src/telemetry/loggers.ts @@ -32,6 +32,7 @@ import { NextSpeakerCheckEvent, LoopDetectedEvent, SlashCommandEvent, + KittySequenceOverflowEvent, } from './types.js'; import { recordApiErrorMetrics, @@ -378,3 +379,21 @@ export function logIdeConnection( }; logger.emit(logRecord); } + +export function logKittySequenceOverflow( + config: Config, + event: KittySequenceOverflowEvent, +): void { + ClearcutLogger.getInstance(config)?.logKittySequenceOverflowEvent(event); + if (!isTelemetrySdkInitialized()) return; + const attributes: LogAttributes = { + ...getCommonAttributes(config), + ...event, + }; + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `Kitty sequence buffer overflow: ${event.sequence_length} bytes`, + attributes, + }; + logger.emit(logRecord); +} diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index d590699c..b07c4ca4 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -346,6 +346,20 @@ export class IdeConnectionEvent { } } +export class KittySequenceOverflowEvent { + 'event.name': 'kitty_sequence_overflow'; + 'event.timestamp': string; // ISO 8601 + sequence_length: number; + truncated_sequence: string; + constructor(sequence_length: number, truncated_sequence: string) { + this['event.name'] = 'kitty_sequence_overflow'; + this['event.timestamp'] = new Date().toISOString(); + this.sequence_length = sequence_length; + // Truncate to first 20 chars for logging (avoid logging sensitive data) + this.truncated_sequence = truncated_sequence.substring(0, 20); + } +} + export type TelemetryEvent = | StartSessionEvent | EndSessionEvent @@ -360,4 +374,4 @@ export type TelemetryEvent = | SlashCommandEvent | MalformedJsonResponseEvent | IdeConnectionEvent - | SlashCommandEvent; + | KittySequenceOverflowEvent; From b61a63aef4bcce9cb56fe46f10f0dc90b8fd6597 Mon Sep 17 00:00:00 2001 From: Adam Weidman <65992621+adamfweidman@users.noreply.github.com> Date: Wed, 13 Aug 2025 17:57:11 +0000 Subject: [PATCH 27/45] move errorParsing.ts to core (#6159) --- packages/cli/src/nonInteractiveCli.ts | 2 +- packages/cli/src/ui/hooks/useGeminiStream.test.tsx | 7 ++----- packages/cli/src/ui/hooks/useGeminiStream.ts | 2 +- packages/core/src/index.ts | 1 + .../src/ui => core/src}/utils/errorParsing.test.ts | 11 +++++------ .../{cli/src/ui => core/src}/utils/errorParsing.ts | 12 +++++++----- packages/core/src/utils/quotaErrorDetection.ts | 7 ++----- 7 files changed, 19 insertions(+), 23 deletions(-) rename packages/{cli/src/ui => core/src}/utils/errorParsing.test.ts (98%) rename packages/{cli/src/ui => core/src}/utils/errorParsing.ts (97%) diff --git a/packages/cli/src/nonInteractiveCli.ts b/packages/cli/src/nonInteractiveCli.ts index c237e56b..f2efe8fc 100644 --- a/packages/cli/src/nonInteractiveCli.ts +++ b/packages/cli/src/nonInteractiveCli.ts @@ -13,10 +13,10 @@ import { isTelemetrySdkInitialized, GeminiEventType, ToolErrorType, + parseAndFormatApiError, } from '@google/gemini-cli-core'; import { Content, Part, FunctionCall } from '@google/genai'; -import { parseAndFormatApiError } from './ui/utils/errorParsing.js'; import { ConsolePatcher } from './ui/utils/ConsolePatcher.js'; export async function runNonInteractive( diff --git a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx index 37d63e9a..9eed0912 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx +++ b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx @@ -51,6 +51,7 @@ const MockedGeminiClientClass = vi.hoisted(() => const MockedUserPromptEvent = vi.hoisted(() => vi.fn().mockImplementation(() => {}), ); +const mockParseAndFormatApiError = vi.hoisted(() => vi.fn()); vi.mock('@google/gemini-cli-core', async (importOriginal) => { const actualCoreModule = (await importOriginal()) as any; @@ -59,6 +60,7 @@ vi.mock('@google/gemini-cli-core', async (importOriginal) => { GitService: vi.fn(), GeminiClient: MockedGeminiClientClass, UserPromptEvent: MockedUserPromptEvent, + parseAndFormatApiError: mockParseAndFormatApiError, }; }); @@ -127,11 +129,6 @@ vi.mock('./slashCommandProcessor.js', () => ({ handleSlashCommand: vi.fn().mockReturnValue(false), })); -const mockParseAndFormatApiError = vi.hoisted(() => vi.fn()); -vi.mock('../utils/errorParsing.js', () => ({ - parseAndFormatApiError: mockParseAndFormatApiError, -})); - // --- END MOCKS --- describe('mergePartListUnions', () => { diff --git a/packages/cli/src/ui/hooks/useGeminiStream.ts b/packages/cli/src/ui/hooks/useGeminiStream.ts index 6f3cb4fd..99b727b6 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.ts +++ b/packages/cli/src/ui/hooks/useGeminiStream.ts @@ -25,6 +25,7 @@ import { UnauthorizedError, UserPromptEvent, DEFAULT_GEMINI_FLASH_MODEL, + parseAndFormatApiError, } from '@google/gemini-cli-core'; import { type Part, type PartListUnion, FinishReason } from '@google/genai'; import { @@ -37,7 +38,6 @@ import { ToolCallStatus, } from '../types.js'; import { isAtCommand } from '../utils/commandUtils.js'; -import { parseAndFormatApiError } from '../utils/errorParsing.js'; import { useShellCommandProcessor } from './shellCommandProcessor.js'; import { handleAtCommand } from './atCommandProcessor.js'; import { findLastSafeSplitPoint } from '../utils/markdownUtilities.js'; diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 791446e3..a24cddbe 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -41,6 +41,7 @@ export * from './utils/systemEncoding.js'; export * from './utils/textUtils.js'; export * from './utils/formatters.js'; export * from './utils/filesearch/fileSearch.js'; +export * from './utils/errorParsing.js'; // Export services export * from './services/fileDiscoveryService.js'; diff --git a/packages/cli/src/ui/utils/errorParsing.test.ts b/packages/core/src/utils/errorParsing.test.ts similarity index 98% rename from packages/cli/src/ui/utils/errorParsing.test.ts rename to packages/core/src/utils/errorParsing.test.ts index 770dffad..f2a4709a 100644 --- a/packages/cli/src/ui/utils/errorParsing.test.ts +++ b/packages/core/src/utils/errorParsing.test.ts @@ -6,12 +6,11 @@ import { describe, it, expect } from 'vitest'; import { parseAndFormatApiError } from './errorParsing.js'; -import { - AuthType, - UserTierId, - DEFAULT_GEMINI_FLASH_MODEL, - isProQuotaExceededError, -} from '@google/gemini-cli-core'; +import { isProQuotaExceededError } from './quotaErrorDetection.js'; +import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js'; +import { UserTierId } from '../code_assist/types.js'; +import { AuthType } from '../core/contentGenerator.js'; +import { StructuredError } from '../core/turn.js'; describe('parseAndFormatApiError', () => { const _enterpriseMessage = diff --git a/packages/cli/src/ui/utils/errorParsing.ts b/packages/core/src/utils/errorParsing.ts similarity index 97% rename from packages/cli/src/ui/utils/errorParsing.ts rename to packages/core/src/utils/errorParsing.ts index 5031bc0a..aa15a652 100644 --- a/packages/cli/src/ui/utils/errorParsing.ts +++ b/packages/core/src/utils/errorParsing.ts @@ -5,15 +5,17 @@ */ import { - AuthType, - UserTierId, - DEFAULT_GEMINI_FLASH_MODEL, - DEFAULT_GEMINI_MODEL, isProQuotaExceededError, isGenericQuotaExceededError, isApiError, isStructuredError, -} from '@google/gemini-cli-core'; +} from './quotaErrorDetection.js'; +import { + DEFAULT_GEMINI_MODEL, + DEFAULT_GEMINI_FLASH_MODEL, +} from '../config/models.js'; +import { UserTierId } from '../code_assist/types.js'; +import { AuthType } from '../core/contentGenerator.js'; // Free Tier message functions const getRateLimitErrorMessageGoogleFree = ( diff --git a/packages/core/src/utils/quotaErrorDetection.ts b/packages/core/src/utils/quotaErrorDetection.ts index 6fe9b312..1377b4fa 100644 --- a/packages/core/src/utils/quotaErrorDetection.ts +++ b/packages/core/src/utils/quotaErrorDetection.ts @@ -4,6 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ +import { StructuredError } from '../core/turn.js'; + export interface ApiError { error: { code: number; @@ -13,11 +15,6 @@ export interface ApiError { }; } -interface StructuredError { - message: string; - status?: number; -} - export function isApiError(error: unknown): error is ApiError { return ( typeof error === 'object' && From 38876b738f4c9ef8bd1b839d5e33580486e9a089 Mon Sep 17 00:00:00 2001 From: shrutip90 Date: Wed, 13 Aug 2025 11:06:31 -0700 Subject: [PATCH 28/45] Add support for trustedFolders.json config file (#6073) --- packages/cli/src/config/config.test.ts | 123 +++++++++++ packages/cli/src/config/config.ts | 6 +- .../cli/src/config/trustedFolders.test.ts | 203 ++++++++++++++++++ packages/cli/src/config/trustedFolders.ts | 158 ++++++++++++++ packages/cli/src/ui/App.tsx | 6 +- .../cli/src/ui/hooks/useFolderTrust.test.ts | 163 ++++++++++---- packages/cli/src/ui/hooks/useFolderTrust.ts | 41 ++-- packages/core/src/config/config.ts | 7 + 8 files changed, 644 insertions(+), 63 deletions(-) create mode 100644 packages/cli/src/config/trustedFolders.test.ts create mode 100644 packages/cli/src/config/trustedFolders.ts diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index fc4d24bd..69985867 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -13,6 +13,11 @@ import { loadCliConfig, parseArguments } from './config.js'; import { Settings } from './settings.js'; import { Extension } from './extension.js'; import * as ServerConfig from '@google/gemini-cli-core'; +import { isWorkspaceTrusted } from './trustedFolders.js'; + +vi.mock('./trustedFolders.js', () => ({ + isWorkspaceTrusted: vi.fn(), +})); vi.mock('os', async (importOriginal) => { const actualOs = await importOriginal(); @@ -1628,6 +1633,7 @@ describe('loadCliConfig approval mode', () => { vi.resetAllMocks(); vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); process.env.GEMINI_API_KEY = 'test-api-key'; + process.argv = ['node', 'script.js']; // Reset argv for each test }); afterEach(() => { @@ -1696,3 +1702,120 @@ describe('loadCliConfig approval mode', () => { expect(config.getApprovalMode()).toBe(ServerConfig.ApprovalMode.YOLO); }); }); + +describe('loadCliConfig trustedFolder', () => { + const originalArgv = process.argv; + const originalEnv = { ...process.env }; + + beforeEach(() => { + vi.resetAllMocks(); + vi.mocked(os.homedir).mockReturnValue('/mock/home/user'); + process.env.GEMINI_API_KEY = 'test-api-key'; + process.argv = ['node', 'script.js']; // Reset argv for each test + }); + + afterEach(() => { + process.argv = originalArgv; + process.env = originalEnv; + vi.restoreAllMocks(); + }); + + const testCases = [ + // Cases where folderTrustFeature is false (feature disabled) + { + folderTrustFeature: false, + folderTrust: true, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust true, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: false, + folderTrust: true, + isWorkspaceTrusted: false, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust true, workspace not trusted -> behave as trusted', + }, + { + folderTrustFeature: false, + folderTrust: false, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature disabled, folderTrust false, workspace trusted -> behave as trusted', + }, + + // Cases where folderTrustFeature is true but folderTrust setting is false + { + folderTrustFeature: true, + folderTrust: false, + isWorkspaceTrusted: true, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature on, folderTrust false, workspace trusted -> behave as trusted', + }, + { + folderTrustFeature: true, + folderTrust: false, + isWorkspaceTrusted: false, + expectedFolderTrust: false, + expectedIsTrustedFolder: true, + description: + 'feature on, folderTrust false, workspace not trusted -> behave as trusted', + }, + + // Cases where feature is fully enabled (folderTrustFeature and folderTrust are true) + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: true, + expectedFolderTrust: true, + expectedIsTrustedFolder: true, + description: + 'feature on, folderTrust on, workspace trusted -> is trusted', + }, + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: false, + expectedFolderTrust: true, + expectedIsTrustedFolder: false, + description: + 'feature on, folderTrust on, workspace NOT trusted -> is NOT trusted', + }, + { + folderTrustFeature: true, + folderTrust: true, + isWorkspaceTrusted: undefined, + expectedFolderTrust: true, + expectedIsTrustedFolder: undefined, + description: + 'feature on, folderTrust on, workspace trust unknown -> is unknown', + }, + ]; + + for (const { + folderTrustFeature, + folderTrust, + isWorkspaceTrusted: mockTrustValue, + expectedFolderTrust, + expectedIsTrustedFolder, + description, + } of testCases) { + it(`should be correct for: ${description}`, async () => { + (isWorkspaceTrusted as vi.Mock).mockReturnValue(mockTrustValue); + const argv = await parseArguments(); + const settings: Settings = { folderTrustFeature, folderTrust }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + + expect(config.getFolderTrust()).toBe(expectedFolderTrust); + expect(config.isTrustedFolder()).toBe(expectedIsTrustedFolder); + }); + } +}); diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index 636696fa..296d140d 100644 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -35,6 +35,8 @@ import { getCliVersion } from '../utils/version.js'; import { loadSandboxConfig } from './sandboxConfig.js'; import { resolvePath } from '../utils/resolvePath.js'; +import { isWorkspaceTrusted } from './trustedFolders.js'; + // Simple console logger for now - replace with actual logger if available const logger = { // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -317,8 +319,9 @@ export async function loadCliConfig( const ideMode = settings.ideMode ?? false; const folderTrustFeature = settings.folderTrustFeature ?? false; - const folderTrustSetting = settings.folderTrust ?? false; + const folderTrustSetting = settings.folderTrust ?? true; const folderTrust = folderTrustFeature && folderTrustSetting; + const trustedFolder = folderTrust ? isWorkspaceTrusted() : true; const allExtensions = annotateActiveExtensions( extensions, @@ -523,6 +526,7 @@ export async function loadCliConfig( folderTrustFeature, folderTrust, interactive, + trustedFolder, }); } diff --git a/packages/cli/src/config/trustedFolders.test.ts b/packages/cli/src/config/trustedFolders.test.ts new file mode 100644 index 00000000..67bf9cfc --- /dev/null +++ b/packages/cli/src/config/trustedFolders.test.ts @@ -0,0 +1,203 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Mock 'os' first. +import * as osActual from 'os'; +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); + return { + ...actualOs, + homedir: vi.fn(() => '/mock/home/user'), + platform: vi.fn(() => 'linux'), + }; +}); + +import { + describe, + it, + expect, + vi, + beforeEach, + afterEach, + type Mocked, + type Mock, +} from 'vitest'; +import * as fs from 'fs'; +import stripJsonComments from 'strip-json-comments'; +import * as path from 'path'; + +import { + loadTrustedFolders, + USER_TRUSTED_FOLDERS_PATH, + TrustLevel, + isWorkspaceTrusted, +} from './trustedFolders.js'; + +vi.mock('fs', async (importOriginal) => { + const actualFs = await importOriginal(); + return { + ...actualFs, + existsSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + mkdirSync: vi.fn(), + }; +}); + +vi.mock('strip-json-comments', () => ({ + default: vi.fn((content) => content), +})); + +describe('Trusted Folders Loading', () => { + let mockFsExistsSync: Mocked; + let mockStripJsonComments: Mocked; + let mockFsWriteFileSync: Mocked; + + beforeEach(() => { + vi.resetAllMocks(); + mockFsExistsSync = vi.mocked(fs.existsSync); + mockStripJsonComments = vi.mocked(stripJsonComments); + mockFsWriteFileSync = vi.mocked(fs.writeFileSync); + vi.mocked(osActual.homedir).mockReturnValue('/mock/home/user'); + (mockStripJsonComments as unknown as Mock).mockImplementation( + (jsonString: string) => jsonString, + ); + (mockFsExistsSync as Mock).mockReturnValue(false); + (fs.readFileSync as Mock).mockReturnValue('{}'); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should load empty rules if no files exist', () => { + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([]); + expect(errors).toEqual([]); + }); + + it('should load user rules if only user file exists', () => { + const userPath = USER_TRUSTED_FOLDERS_PATH; + (mockFsExistsSync as Mock).mockImplementation((p) => p === userPath); + const userContent = { + '/user/folder': TrustLevel.TRUST_FOLDER, + }; + (fs.readFileSync as Mock).mockImplementation((p) => { + if (p === userPath) return JSON.stringify(userContent); + return '{}'; + }); + + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([ + { path: '/user/folder', trustLevel: TrustLevel.TRUST_FOLDER }, + ]); + expect(errors).toEqual([]); + }); + + it('should handle JSON parsing errors gracefully', () => { + const userPath = USER_TRUSTED_FOLDERS_PATH; + (mockFsExistsSync as Mock).mockImplementation((p) => p === userPath); + (fs.readFileSync as Mock).mockImplementation((p) => { + if (p === userPath) return 'invalid json'; + return '{}'; + }); + + const { rules, errors } = loadTrustedFolders(); + expect(rules).toEqual([]); + expect(errors.length).toBe(1); + expect(errors[0].path).toBe(userPath); + expect(errors[0].message).toContain('Unexpected token'); + }); + + it('setValue should update the user config and save it', () => { + const loadedFolders = loadTrustedFolders(); + loadedFolders.setValue('/new/path', TrustLevel.TRUST_FOLDER); + + expect(loadedFolders.user.config['/new/path']).toBe( + TrustLevel.TRUST_FOLDER, + ); + expect(mockFsWriteFileSync).toHaveBeenCalledWith( + USER_TRUSTED_FOLDERS_PATH, + JSON.stringify({ '/new/path': TrustLevel.TRUST_FOLDER }, null, 2), + 'utf-8', + ); + }); +}); + +describe('isWorkspaceTrusted', () => { + let mockCwd: string; + const mockRules: Record = {}; + + beforeEach(() => { + vi.spyOn(process, 'cwd').mockImplementation(() => mockCwd); + vi.spyOn(fs, 'readFileSync').mockImplementation((p) => { + if (p === USER_TRUSTED_FOLDERS_PATH) { + return JSON.stringify(mockRules); + } + return '{}'; + }); + vi.spyOn(fs, 'existsSync').mockImplementation( + (p) => p === USER_TRUSTED_FOLDERS_PATH, + ); + }); + + afterEach(() => { + vi.restoreAllMocks(); + // Clear the object + Object.keys(mockRules).forEach((key) => delete mockRules[key]); + }); + + it('should return true for a directly trusted folder', () => { + mockCwd = '/home/user/projectA'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should return true for a child of a trusted folder', () => { + mockCwd = '/home/user/projectA/src'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should return true for a child of a trusted parent folder', () => { + mockCwd = '/home/user/projectB'; + mockRules['/home/user/projectB/somefile.txt'] = TrustLevel.TRUST_PARENT; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should return false for a directly untrusted folder', () => { + mockCwd = '/home/user/untrusted'; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBe(false); + }); + + it('should return undefined for a child of an untrusted folder', () => { + mockCwd = '/home/user/untrusted/src'; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBeUndefined(); + }); + + it('should return undefined when no rules match', () => { + mockCwd = '/home/user/other'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + mockRules['/home/user/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBeUndefined(); + }); + + it('should prioritize trust over distrust', () => { + mockCwd = '/home/user/projectA/untrusted'; + mockRules['/home/user/projectA'] = TrustLevel.TRUST_FOLDER; + mockRules['/home/user/projectA/untrusted'] = TrustLevel.DO_NOT_TRUST; + expect(isWorkspaceTrusted()).toBe(true); + }); + + it('should handle path normalization', () => { + mockCwd = '/home/user/projectA'; + mockRules[`/home/user/../user/${path.basename('/home/user/projectA')}`] = + TrustLevel.TRUST_FOLDER; + expect(isWorkspaceTrusted()).toBe(true); + }); +}); diff --git a/packages/cli/src/config/trustedFolders.ts b/packages/cli/src/config/trustedFolders.ts new file mode 100644 index 00000000..9da27c80 --- /dev/null +++ b/packages/cli/src/config/trustedFolders.ts @@ -0,0 +1,158 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'fs'; +import * as path from 'path'; +import { homedir } from 'os'; +import { getErrorMessage, isWithinRoot } from '@google/gemini-cli-core'; +import stripJsonComments from 'strip-json-comments'; + +export const TRUSTED_FOLDERS_FILENAME = 'trustedFolders.json'; +export const SETTINGS_DIRECTORY_NAME = '.gemini'; +export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME); +export const USER_TRUSTED_FOLDERS_PATH = path.join( + USER_SETTINGS_DIR, + TRUSTED_FOLDERS_FILENAME, +); + +export enum TrustLevel { + TRUST_FOLDER = 'TRUST_FOLDER', + TRUST_PARENT = 'TRUST_PARENT', + DO_NOT_TRUST = 'DO_NOT_TRUST', +} + +export interface TrustRule { + path: string; + trustLevel: TrustLevel; +} + +export interface TrustedFoldersError { + message: string; + path: string; +} + +export interface TrustedFoldersFile { + config: Record; + path: string; +} + +export class LoadedTrustedFolders { + constructor( + public user: TrustedFoldersFile, + public errors: TrustedFoldersError[], + ) {} + + get rules(): TrustRule[] { + return Object.entries(this.user.config).map(([path, trustLevel]) => ({ + path, + trustLevel, + })); + } + + setValue(path: string, trustLevel: TrustLevel): void { + this.user.config[path] = trustLevel; + saveTrustedFolders(this.user); + } +} + +export function loadTrustedFolders(): LoadedTrustedFolders { + const errors: TrustedFoldersError[] = []; + const userConfig: Record = {}; + + const userPath = USER_TRUSTED_FOLDERS_PATH; + + // Load user trusted folders + try { + if (fs.existsSync(userPath)) { + const content = fs.readFileSync(userPath, 'utf-8'); + const parsed = JSON.parse(stripJsonComments(content)) as Record< + string, + TrustLevel + >; + if (parsed) { + Object.assign(userConfig, parsed); + } + } + } catch (error: unknown) { + errors.push({ + message: getErrorMessage(error), + path: userPath, + }); + } + + return new LoadedTrustedFolders( + { path: userPath, config: userConfig }, + errors, + ); +} + +export function saveTrustedFolders( + trustedFoldersFile: TrustedFoldersFile, +): void { + try { + // Ensure the directory exists + const dirPath = path.dirname(trustedFoldersFile.path); + if (!fs.existsSync(dirPath)) { + fs.mkdirSync(dirPath, { recursive: true }); + } + + fs.writeFileSync( + trustedFoldersFile.path, + JSON.stringify(trustedFoldersFile.config, null, 2), + 'utf-8', + ); + } catch (error) { + console.error('Error saving trusted folders file:', error); + } +} + +export function isWorkspaceTrusted(): boolean | undefined { + const { rules, errors } = loadTrustedFolders(); + + if (errors.length > 0) { + for (const error of errors) { + console.error( + `Error loading trusted folders config from ${error.path}: ${error.message}`, + ); + } + } + + const trustedPaths: string[] = []; + const untrustedPaths: string[] = []; + + for (const rule of rules) { + switch (rule.trustLevel) { + case TrustLevel.TRUST_FOLDER: + trustedPaths.push(rule.path); + break; + case TrustLevel.TRUST_PARENT: + trustedPaths.push(path.dirname(rule.path)); + break; + case TrustLevel.DO_NOT_TRUST: + untrustedPaths.push(rule.path); + break; + default: + // Do nothing for unknown trust levels. + break; + } + } + + const cwd = process.cwd(); + + for (const trustedPath of trustedPaths) { + if (isWithinRoot(cwd, trustedPath)) { + return true; + } + } + + for (const untrustedPath of untrustedPaths) { + if (path.normalize(cwd) === path.normalize(untrustedPath)) { + return false; + } + } + + return undefined; +} diff --git a/packages/cli/src/ui/App.tsx b/packages/cli/src/ui/App.tsx index e8aca549..5d4643e5 100644 --- a/packages/cli/src/ui/App.tsx +++ b/packages/cli/src/ui/App.tsx @@ -252,8 +252,10 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const { isSettingsDialogOpen, openSettingsDialog, closeSettingsDialog } = useSettingsCommand(); - const { isFolderTrustDialogOpen, handleFolderTrustSelect } = - useFolderTrust(settings); + const { isFolderTrustDialogOpen, handleFolderTrustSelect } = useFolderTrust( + settings, + config, + ); const { isAuthDialogOpen, diff --git a/packages/cli/src/ui/hooks/useFolderTrust.test.ts b/packages/cli/src/ui/hooks/useFolderTrust.test.ts index 61552af0..e565ab05 100644 --- a/packages/cli/src/ui/hooks/useFolderTrust.test.ts +++ b/packages/cli/src/ui/hooks/useFolderTrust.test.ts @@ -4,15 +4,33 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { renderHook, act } from '@testing-library/react'; import { vi } from 'vitest'; +import { renderHook, act } from '@testing-library/react'; import { useFolderTrust } from './useFolderTrust.js'; -import { LoadedSettings, SettingScope } from '../../config/settings.js'; +import { type Config } from '@google/gemini-cli-core'; +import { LoadedSettings } from '../../config/settings.js'; import { FolderTrustChoice } from '../components/FolderTrustDialog.js'; +import { + LoadedTrustedFolders, + TrustLevel, +} from '../../config/trustedFolders.js'; +import * as process from 'process'; + +import * as trustedFolders from '../../config/trustedFolders.js'; + +vi.mock('process', () => ({ + cwd: vi.fn(), + platform: 'linux', +})); describe('useFolderTrust', () => { - it('should set isFolderTrustDialogOpen to true when folderTrustFeature is true and folderTrust is undefined', () => { - const settings = { + let mockSettings: LoadedSettings; + let mockConfig: Config; + let mockTrustedFolders: LoadedTrustedFolders; + let loadTrustedFoldersSpy: vi.SpyInstance; + + beforeEach(() => { + mockSettings = { merged: { folderTrustFeature: true, folderTrust: undefined, @@ -20,59 +38,110 @@ describe('useFolderTrust', () => { setValue: vi.fn(), } as unknown as LoadedSettings; - const { result } = renderHook(() => useFolderTrust(settings)); + mockConfig = { + isTrustedFolder: vi.fn().mockReturnValue(undefined), + } as unknown as Config; + mockTrustedFolders = { + setValue: vi.fn(), + } as unknown as LoadedTrustedFolders; + + loadTrustedFoldersSpy = vi + .spyOn(trustedFolders, 'loadTrustedFolders') + .mockReturnValue(mockTrustedFolders); + (process.cwd as vi.Mock).mockReturnValue('/test/path'); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should not open dialog when folder is already trusted', () => { + (mockConfig.isTrustedFolder as vi.Mock).mockReturnValue(true); + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should not open dialog when folder is already untrusted', () => { + (mockConfig.isTrustedFolder as vi.Mock).mockReturnValue(false); + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should open dialog when folder trust is undefined', () => { + (mockConfig.isTrustedFolder as vi.Mock).mockReturnValue(undefined); + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); expect(result.current.isFolderTrustDialogOpen).toBe(true); }); - it('should set isFolderTrustDialogOpen to false when folderTrustFeature is false', () => { - const settings = { - merged: { - folderTrustFeature: false, - folderTrust: undefined, - }, - setValue: vi.fn(), - } as unknown as LoadedSettings; - - const { result } = renderHook(() => useFolderTrust(settings)); - - expect(result.current.isFolderTrustDialogOpen).toBe(false); - }); - - it('should set isFolderTrustDialogOpen to false when folderTrust is defined', () => { - const settings = { - merged: { - folderTrustFeature: true, - folderTrust: true, - }, - setValue: vi.fn(), - } as unknown as LoadedSettings; - - const { result } = renderHook(() => useFolderTrust(settings)); - - expect(result.current.isFolderTrustDialogOpen).toBe(false); - }); - - it('should call setValue and set isFolderTrustDialogOpen to false on handleFolderTrustSelect', () => { - const settings = { - merged: { - folderTrustFeature: true, - folderTrust: undefined, - }, - setValue: vi.fn(), - } as unknown as LoadedSettings; - - const { result } = renderHook(() => useFolderTrust(settings)); + it('should handle TRUST_FOLDER choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); act(() => { result.current.handleFolderTrustSelect(FolderTrustChoice.TRUST_FOLDER); }); - expect(settings.setValue).toHaveBeenCalledWith( - SettingScope.User, - 'folderTrust', - true, + expect(loadTrustedFoldersSpy).toHaveBeenCalled(); + expect(mockTrustedFolders.setValue).toHaveBeenCalledWith( + '/test/path', + TrustLevel.TRUST_FOLDER, ); expect(result.current.isFolderTrustDialogOpen).toBe(false); }); + + it('should handle TRUST_PARENT choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + + act(() => { + result.current.handleFolderTrustSelect(FolderTrustChoice.TRUST_PARENT); + }); + + expect(mockTrustedFolders.setValue).toHaveBeenCalledWith( + '/test/path', + TrustLevel.TRUST_PARENT, + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should handle DO_NOT_TRUST choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + + act(() => { + result.current.handleFolderTrustSelect(FolderTrustChoice.DO_NOT_TRUST); + }); + + expect(mockTrustedFolders.setValue).toHaveBeenCalledWith( + '/test/path', + TrustLevel.DO_NOT_TRUST, + ); + expect(result.current.isFolderTrustDialogOpen).toBe(false); + }); + + it('should do nothing for default choice', () => { + const { result } = renderHook(() => + useFolderTrust(mockSettings, mockConfig), + ); + + act(() => { + result.current.handleFolderTrustSelect( + 'invalid_choice' as FolderTrustChoice, + ); + }); + + expect(mockTrustedFolders.setValue).not.toHaveBeenCalled(); + expect(mockSettings.setValue).not.toHaveBeenCalled(); + expect(result.current.isFolderTrustDialogOpen).toBe(true); + }); }); diff --git a/packages/cli/src/ui/hooks/useFolderTrust.ts b/packages/cli/src/ui/hooks/useFolderTrust.ts index 90a69132..6458d4aa 100644 --- a/packages/cli/src/ui/hooks/useFolderTrust.ts +++ b/packages/cli/src/ui/hooks/useFolderTrust.ts @@ -5,24 +5,39 @@ */ import { useState, useCallback } from 'react'; -import { LoadedSettings, SettingScope } from '../../config/settings.js'; +import { type Config } from '@google/gemini-cli-core'; +import { LoadedSettings } from '../../config/settings.js'; import { FolderTrustChoice } from '../components/FolderTrustDialog.js'; +import { loadTrustedFolders, TrustLevel } from '../../config/trustedFolders.js'; +import * as process from 'process'; -export const useFolderTrust = (settings: LoadedSettings) => { +export const useFolderTrust = (settings: LoadedSettings, config: Config) => { const [isFolderTrustDialogOpen, setIsFolderTrustDialogOpen] = useState( - !!settings.merged.folderTrustFeature && - // TODO: Update to avoid showing dialog for folders that are trusted. - settings.merged.folderTrust === undefined, + config.isTrustedFolder() === undefined, ); - const handleFolderTrustSelect = useCallback( - (_choice: FolderTrustChoice) => { - // TODO: Store folderPath in the trusted folders config file based on the choice. - settings.setValue(SettingScope.User, 'folderTrust', true); - setIsFolderTrustDialogOpen(false); - }, - [settings], - ); + const handleFolderTrustSelect = useCallback((choice: FolderTrustChoice) => { + const trustedFolders = loadTrustedFolders(); + const cwd = process.cwd(); + let trustLevel: TrustLevel; + + switch (choice) { + case FolderTrustChoice.TRUST_FOLDER: + trustLevel = TrustLevel.TRUST_FOLDER; + break; + case FolderTrustChoice.TRUST_PARENT: + trustLevel = TrustLevel.TRUST_PARENT; + break; + case FolderTrustChoice.DO_NOT_TRUST: + trustLevel = TrustLevel.DO_NOT_TRUST; + break; + default: + return; + } + + trustedFolders.setValue(cwd, trustLevel); + setIsFolderTrustDialogOpen(false); + }, []); return { isFolderTrustDialogOpen, diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 069a486d..7c61f239 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -197,6 +197,7 @@ export interface ConfigParameters { loadMemoryFromIncludeDirectories?: boolean; chatCompression?: ChatCompressionSettings; interactive?: boolean; + trustedFolder?: boolean; } export class Config { @@ -260,6 +261,7 @@ export class Config { private readonly loadMemoryFromIncludeDirectories: boolean = false; private readonly chatCompression: ChatCompressionSettings | undefined; private readonly interactive: boolean; + private readonly trustedFolder: boolean | undefined; private initialized: boolean = false; constructor(params: ConfigParameters) { @@ -324,6 +326,7 @@ export class Config { params.loadMemoryFromIncludeDirectories ?? false; this.chatCompression = params.chatCompression; this.interactive = params.interactive ?? false; + this.trustedFolder = params.trustedFolder; if (params.contextFileName) { setGeminiMdFilename(params.contextFileName); @@ -664,6 +667,10 @@ export class Config { return this.folderTrust; } + isTrustedFolder(): boolean | undefined { + return this.trustedFolder; + } + setIdeMode(value: boolean): void { this.ideMode = value; } From e4473a9007e7555809ec9e4087be636e8aadf017 Mon Sep 17 00:00:00 2001 From: Jacob Richman Date: Wed, 13 Aug 2025 11:13:18 -0700 Subject: [PATCH 29/45] Revert "chore(cli/slashcommands): Add status enum to SlashCommandEvent telemetry" (#6161) --- .../ui/hooks/slashCommandProcessor.test.ts | 110 ++++---------- .../cli/src/ui/hooks/slashCommandProcessor.ts | 136 ++++++++---------- packages/core/index.ts | 1 - .../clearcut-logger/clearcut-logger.ts | 7 - .../clearcut-logger/event-metadata-key.ts | 3 - packages/core/src/telemetry/index.ts | 2 - packages/core/src/telemetry/types.ts | 83 ++++------- 7 files changed, 124 insertions(+), 218 deletions(-) diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts index 24880fc1..66c1b883 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts @@ -4,17 +4,18 @@ * SPDX-License-Identifier: Apache-2.0 */ -const { logSlashCommand } = vi.hoisted(() => ({ +const { logSlashCommand, SlashCommandEvent } = vi.hoisted(() => ({ logSlashCommand: vi.fn(), + SlashCommandEvent: vi.fn((command, subCommand) => ({ command, subCommand })), })); vi.mock('@google/gemini-cli-core', async (importOriginal) => { const original = await importOriginal(); - return { ...original, logSlashCommand, + SlashCommandEvent, getIdeInstaller: vi.fn().mockReturnValue(null), }; }); @@ -24,10 +25,10 @@ const { mockProcessExit } = vi.hoisted(() => ({ })); vi.mock('node:process', () => { - const mockProcess: Partial = { + const mockProcess = { exit: mockProcessExit, - platform: 'sunos', - } as unknown as NodeJS.Process; + platform: 'test-platform', + }; return { ...mockProcess, default: mockProcess, @@ -76,28 +77,22 @@ import { ConfirmShellCommandsActionReturn, SlashCommand, } from '../commands/types.js'; -import { ToolConfirmationOutcome } from '@google/gemini-cli-core'; +import { Config, ToolConfirmationOutcome } from '@google/gemini-cli-core'; import { LoadedSettings } from '../../config/settings.js'; import { MessageType } from '../types.js'; import { BuiltinCommandLoader } from '../../services/BuiltinCommandLoader.js'; import { FileCommandLoader } from '../../services/FileCommandLoader.js'; import { McpPromptLoader } from '../../services/McpPromptLoader.js'; -import { - SlashCommandStatus, - makeFakeConfig, -} from '@google/gemini-cli-core/index.js'; -function createTestCommand( +const createTestCommand = ( overrides: Partial, kind: CommandKind = CommandKind.BUILT_IN, -): SlashCommand { - return { - name: 'test', - description: 'a test command', - kind, - ...overrides, - }; -} +): SlashCommand => ({ + name: 'test', + description: 'a test command', + kind, + ...overrides, +}); describe('useSlashCommandProcessor', () => { const mockAddItem = vi.fn(); @@ -107,7 +102,15 @@ describe('useSlashCommandProcessor', () => { const mockOpenAuthDialog = vi.fn(); const mockSetQuittingMessages = vi.fn(); - const mockConfig = makeFakeConfig({}); + const mockConfig = { + getProjectRoot: vi.fn(() => '/mock/cwd'), + getSessionId: vi.fn(() => 'test-session'), + getGeminiClient: vi.fn(() => ({ + setHistory: vi.fn().mockResolvedValue(undefined), + })), + getExtensions: vi.fn(() => []), + getIdeMode: vi.fn(() => false), + } as unknown as Config; const mockSettings = {} as LoadedSettings; @@ -881,9 +884,7 @@ describe('useSlashCommandProcessor', () => { const loggingTestCommands: SlashCommand[] = [ createTestCommand({ name: 'logtest', - action: vi - .fn() - .mockResolvedValue({ type: 'message', content: 'hello world' }), + action: mockCommandAction, }), createTestCommand({ name: 'logwithsub', @@ -894,10 +895,6 @@ describe('useSlashCommandProcessor', () => { }), ], }), - createTestCommand({ - name: 'fail', - action: vi.fn().mockRejectedValue(new Error('oh no!')), - }), createTestCommand({ name: 'logalias', altNames: ['la'], @@ -908,6 +905,7 @@ describe('useSlashCommandProcessor', () => { beforeEach(() => { mockCommandAction.mockClear(); vi.mocked(logSlashCommand).mockClear(); + vi.mocked(SlashCommandEvent).mockClear(); }); it('should log a simple slash command', async () => { @@ -919,45 +917,8 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logtest'); }); - expect(logSlashCommand).toHaveBeenCalledWith( - mockConfig, - expect.objectContaining({ - command: 'logtest', - subcommand: undefined, - status: SlashCommandStatus.SUCCESS, - }), - ); - }); - - it('logs nothing for a bogus command', async () => { - const result = setupProcessorHook(loggingTestCommands); - await waitFor(() => - expect(result.current.slashCommands.length).toBeGreaterThan(0), - ); - await act(async () => { - await result.current.handleSlashCommand('/bogusbogusbogus'); - }); - - expect(logSlashCommand).not.toHaveBeenCalled(); - }); - - it('logs a failure event for a failed command', async () => { - const result = setupProcessorHook(loggingTestCommands); - await waitFor(() => - expect(result.current.slashCommands.length).toBeGreaterThan(0), - ); - await act(async () => { - await result.current.handleSlashCommand('/fail'); - }); - - expect(logSlashCommand).toHaveBeenCalledWith( - mockConfig, - expect.objectContaining({ - command: 'fail', - status: 'error', - subcommand: undefined, - }), - ); + expect(logSlashCommand).toHaveBeenCalledTimes(1); + expect(SlashCommandEvent).toHaveBeenCalledWith('logtest', undefined); }); it('should log a slash command with a subcommand', async () => { @@ -969,13 +930,8 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logwithsub sub'); }); - expect(logSlashCommand).toHaveBeenCalledWith( - mockConfig, - expect.objectContaining({ - command: 'logwithsub', - subcommand: 'sub', - }), - ); + expect(logSlashCommand).toHaveBeenCalledTimes(1); + expect(SlashCommandEvent).toHaveBeenCalledWith('logwithsub', 'sub'); }); it('should log the command path when an alias is used', async () => { @@ -986,12 +942,8 @@ describe('useSlashCommandProcessor', () => { await act(async () => { await result.current.handleSlashCommand('/la'); }); - expect(logSlashCommand).toHaveBeenCalledWith( - mockConfig, - expect.objectContaining({ - command: 'logalias', - }), - ); + expect(logSlashCommand).toHaveBeenCalledTimes(1); + expect(SlashCommandEvent).toHaveBeenCalledWith('logalias', undefined); }); it('should not log for unknown commands', async () => { diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.ts index aaa2fbff..b4ce0d4d 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.ts @@ -14,8 +14,7 @@ import { GitService, Logger, logSlashCommand, - makeSlashCommandEvent, - SlashCommandStatus, + SlashCommandEvent, ToolConfirmationOutcome, } from '@google/gemini-cli-core'; import { useSessionStats } from '../contexts/SessionContext.js'; @@ -230,70 +229,76 @@ export const useSlashCommandProcessor = ( overwriteConfirmed?: boolean, ): Promise => { setIsProcessing(true); - - if (typeof rawQuery !== 'string') { - return false; - } - - const trimmed = rawQuery.trim(); - if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { - return false; - } - - const userMessageTimestamp = Date.now(); - addItem({ type: MessageType.USER, text: trimmed }, userMessageTimestamp); - - const parts = trimmed.substring(1).trim().split(/\s+/); - const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] - - let currentCommands = commands; - let commandToExecute: SlashCommand | undefined; - let pathIndex = 0; - let hasError = false; - const canonicalPath: string[] = []; - - for (const part of commandPath) { - // TODO: For better performance and architectural clarity, this two-pass - // search could be replaced. A more optimal approach would be to - // pre-compute a single lookup map in `CommandService.ts` that resolves - // all name and alias conflicts during the initial loading phase. The - // processor would then perform a single, fast lookup on that map. - - // First pass: check for an exact match on the primary command name. - let foundCommand = currentCommands.find((cmd) => cmd.name === part); - - // Second pass: if no primary name matches, check for an alias. - if (!foundCommand) { - foundCommand = currentCommands.find((cmd) => - cmd.altNames?.includes(part), - ); + try { + if (typeof rawQuery !== 'string') { + return false; } - if (foundCommand) { - commandToExecute = foundCommand; - canonicalPath.push(foundCommand.name); - pathIndex++; - if (foundCommand.subCommands) { - currentCommands = foundCommand.subCommands; + const trimmed = rawQuery.trim(); + if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { + return false; + } + + const userMessageTimestamp = Date.now(); + addItem( + { type: MessageType.USER, text: trimmed }, + userMessageTimestamp, + ); + + const parts = trimmed.substring(1).trim().split(/\s+/); + const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] + + let currentCommands = commands; + let commandToExecute: SlashCommand | undefined; + let pathIndex = 0; + const canonicalPath: string[] = []; + + for (const part of commandPath) { + // TODO: For better performance and architectural clarity, this two-pass + // search could be replaced. A more optimal approach would be to + // pre-compute a single lookup map in `CommandService.ts` that resolves + // all name and alias conflicts during the initial loading phase. The + // processor would then perform a single, fast lookup on that map. + + // First pass: check for an exact match on the primary command name. + let foundCommand = currentCommands.find((cmd) => cmd.name === part); + + // Second pass: if no primary name matches, check for an alias. + if (!foundCommand) { + foundCommand = currentCommands.find((cmd) => + cmd.altNames?.includes(part), + ); + } + + if (foundCommand) { + commandToExecute = foundCommand; + canonicalPath.push(foundCommand.name); + pathIndex++; + if (foundCommand.subCommands) { + currentCommands = foundCommand.subCommands; + } else { + break; + } } else { break; } - } else { - break; } - } - const resolvedCommandPath = canonicalPath; - const subcommand = - resolvedCommandPath.length > 1 - ? resolvedCommandPath.slice(1).join(' ') - : undefined; - - try { if (commandToExecute) { const args = parts.slice(pathIndex).join(' '); if (commandToExecute.action) { + if (config) { + const resolvedCommandPath = canonicalPath; + const event = new SlashCommandEvent( + resolvedCommandPath[0], + resolvedCommandPath.length > 1 + ? resolvedCommandPath.slice(1).join(' ') + : undefined, + ); + logSlashCommand(config, event); + } + const fullCommandContext: CommandContext = { ...commandContext, invocation: { @@ -315,6 +320,7 @@ export const useSlashCommandProcessor = ( ]), }; } + const result = await commandToExecute.action( fullCommandContext, args, @@ -487,18 +493,8 @@ export const useSlashCommandProcessor = ( content: `Unknown command: ${trimmed}`, timestamp: new Date(), }); - return { type: 'handled' }; - } catch (e: unknown) { - hasError = true; - if (config) { - const event = makeSlashCommandEvent({ - command: resolvedCommandPath[0], - subcommand, - status: SlashCommandStatus.ERROR, - }); - logSlashCommand(config, event); - } + } catch (e) { addItem( { type: MessageType.ERROR, @@ -508,14 +504,6 @@ export const useSlashCommandProcessor = ( ); return { type: 'handled' }; } finally { - if (config && resolvedCommandPath[0] && !hasError) { - const event = makeSlashCommandEvent({ - command: resolvedCommandPath[0], - subcommand, - status: SlashCommandStatus.SUCCESS, - }); - logSlashCommand(config, event); - } setIsProcessing(false); } }, diff --git a/packages/core/index.ts b/packages/core/index.ts index 7b75b365..65a214ae 100644 --- a/packages/core/index.ts +++ b/packages/core/index.ts @@ -15,4 +15,3 @@ export { IdeConnectionEvent, IdeConnectionType, } from './src/telemetry/types.js'; -export { makeFakeConfig } from './src/test-utils/config.js'; diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index 9450f06d..b7be2af7 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -639,13 +639,6 @@ export class ClearcutLogger { }); } - if (event.status) { - data.push({ - gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_STATUS, - value: JSON.stringify(event.status), - }); - } - this.enqueueLogEvent(this.createLogEvent(slash_command_event_name, data)); this.flushIfNeeded(); } diff --git a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts index cb4172ed..314e61a8 100644 --- a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts +++ b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts @@ -174,9 +174,6 @@ export enum EventMetadataKey { // Logs the subcommand of the slash command. GEMINI_CLI_SLASH_COMMAND_SUBCOMMAND = 42, - // Logs the status of the slash command (e.g. 'success', 'error') - GEMINI_CLI_SLASH_COMMAND_STATUS = 51, - // ========================================================================== // Next Speaker Check Event Keys // =========================================================================== diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts index 0f343ab3..33781b87 100644 --- a/packages/core/src/telemetry/index.ts +++ b/packages/core/src/telemetry/index.ts @@ -41,8 +41,6 @@ export { FlashFallbackEvent, SlashCommandEvent, KittySequenceOverflowEvent, - makeSlashCommandEvent, - SlashCommandStatus, } from './types.js'; export { SpanStatusCode, ValueType } from '@opentelemetry/api'; export { SemanticAttributes } from '@opentelemetry/semantic-conventions'; diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index b07c4ca4..2b10280d 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -14,17 +14,9 @@ import { ToolCallDecision, } from './tool-call-decision.js'; -interface BaseTelemetryEvent { - 'event.name': string; - /** Current timestamp in ISO 8601 format */ - 'event.timestamp': string; -} - -type CommonFields = keyof BaseTelemetryEvent; - -export class StartSessionEvent implements BaseTelemetryEvent { +export class StartSessionEvent { 'event.name': 'cli_config'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 model: string; embedding_model: string; sandbox_enabled: boolean; @@ -68,9 +60,9 @@ export class StartSessionEvent implements BaseTelemetryEvent { } } -export class EndSessionEvent implements BaseTelemetryEvent { +export class EndSessionEvent { 'event.name': 'end_session'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 session_id?: string; constructor(config?: Config) { @@ -80,9 +72,9 @@ export class EndSessionEvent implements BaseTelemetryEvent { } } -export class UserPromptEvent implements BaseTelemetryEvent { +export class UserPromptEvent { 'event.name': 'user_prompt'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 prompt_length: number; prompt_id: string; auth_type?: string; @@ -103,9 +95,9 @@ export class UserPromptEvent implements BaseTelemetryEvent { } } -export class ToolCallEvent implements BaseTelemetryEvent { +export class ToolCallEvent { 'event.name': 'tool_call'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 function_name: string; function_args: Record; duration_ms: number; @@ -150,9 +142,9 @@ export class ToolCallEvent implements BaseTelemetryEvent { } } -export class ApiRequestEvent implements BaseTelemetryEvent { +export class ApiRequestEvent { 'event.name': 'api_request'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 model: string; prompt_id: string; request_text?: string; @@ -166,9 +158,9 @@ export class ApiRequestEvent implements BaseTelemetryEvent { } } -export class ApiErrorEvent implements BaseTelemetryEvent { +export class ApiErrorEvent { 'event.name': 'api_error'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 model: string; error: string; error_type?: string; @@ -198,9 +190,9 @@ export class ApiErrorEvent implements BaseTelemetryEvent { } } -export class ApiResponseEvent implements BaseTelemetryEvent { +export class ApiResponseEvent { 'event.name': 'api_response'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 model: string; status_code?: number | string; duration_ms: number; @@ -242,9 +234,9 @@ export class ApiResponseEvent implements BaseTelemetryEvent { } } -export class FlashFallbackEvent implements BaseTelemetryEvent { +export class FlashFallbackEvent { 'event.name': 'flash_fallback'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 auth_type: string; constructor(auth_type: string) { @@ -260,9 +252,9 @@ export enum LoopType { LLM_DETECTED_LOOP = 'llm_detected_loop', } -export class LoopDetectedEvent implements BaseTelemetryEvent { +export class LoopDetectedEvent { 'event.name': 'loop_detected'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 loop_type: LoopType; prompt_id: string; @@ -274,9 +266,9 @@ export class LoopDetectedEvent implements BaseTelemetryEvent { } } -export class NextSpeakerCheckEvent implements BaseTelemetryEvent { +export class NextSpeakerCheckEvent { 'event.name': 'next_speaker_check'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 prompt_id: string; finish_reason: string; result: string; @@ -290,36 +282,23 @@ export class NextSpeakerCheckEvent implements BaseTelemetryEvent { } } -export interface SlashCommandEvent extends BaseTelemetryEvent { +export class SlashCommandEvent { 'event.name': 'slash_command'; 'event.timestamp': string; // ISO 8106 command: string; subcommand?: string; - status?: SlashCommandStatus; + + constructor(command: string, subcommand?: string) { + this['event.name'] = 'slash_command'; + this['event.timestamp'] = new Date().toISOString(); + this.command = command; + this.subcommand = subcommand; + } } -export function makeSlashCommandEvent({ - command, - subcommand, - status, -}: Omit): SlashCommandEvent { - return { - 'event.name': 'slash_command', - 'event.timestamp': new Date().toISOString(), - command, - subcommand, - status, - }; -} - -export enum SlashCommandStatus { - SUCCESS = 'success', - ERROR = 'error', -} - -export class MalformedJsonResponseEvent implements BaseTelemetryEvent { +export class MalformedJsonResponseEvent { 'event.name': 'malformed_json_response'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 model: string; constructor(model: string) { @@ -336,7 +315,7 @@ export enum IdeConnectionType { export class IdeConnectionEvent { 'event.name': 'ide_connection'; - 'event.timestamp': string; + 'event.timestamp': string; // ISO 8601 connection_type: IdeConnectionType; constructor(connection_type: IdeConnectionType) { From 22109db320e66dcdfa4aff87adaab626b6cf9b15 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Wed, 13 Aug 2025 14:56:10 -0400 Subject: [PATCH 30/45] chore(ci): add global linter (#6111) Co-authored-by: matt korwel --- .github/workflows/ci.yml | 237 ++++++++++++++++++++++++++++++++++----- .yamllint.yml | 88 +++++++++++++++ 2 files changed, 300 insertions(+), 25 deletions(-) create mode 100644 .yamllint.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1945fa4f..2307b2f0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,53 +13,240 @@ on: concurrency: group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}' - cancel-in-progress: ${{ github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/heads/release/') }} + cancel-in-progress: |- + ${{ github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/heads/release/') }} + +permissions: + checks: 'write' + contents: 'read' + statuses: 'write' + +defaults: + run: + shell: 'bash' + +env: + ACTIONLINT_VERSION: '1.7.7' + SHELLCHECK_VERSION: '0.11.0' + YAMLLINT_VERSION: '1.35.1' jobs: - lint: - name: Lint - runs-on: ubuntu-latest - permissions: - contents: read # For checkout + # + # Lint: GitHub Actions + # + lint_github_actions: + name: 'Lint (GitHub Actions)' + runs-on: 'ubuntu-latest' steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + fetch-depth: 1 - - name: Set up Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + - name: 'Install shellcheck' # Actionlint uses shellcheck + run: |- + mkdir -p "${RUNNER_TEMP}/shellcheck" + curl -sSLo "${RUNNER_TEMP}/.shellcheck.txz" "https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/shellcheck-v${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" + tar -xf "${RUNNER_TEMP}/.shellcheck.txz" -C "${RUNNER_TEMP}/shellcheck" --strip-components=1 + echo "${RUNNER_TEMP}/shellcheck" >> "${GITHUB_PATH}" + + - name: 'Install actionlint' + run: |- + mkdir -p "${RUNNER_TEMP}/actionlint" + curl -sSLo "${RUNNER_TEMP}/.actionlint.tgz" "https://github.com/rhysd/actionlint/releases/download/v${ACTIONLINT_VERSION}/actionlint_${ACTIONLINT_VERSION}_linux_amd64.tar.gz" + tar -xzf "${RUNNER_TEMP}/.actionlint.tgz" -C "${RUNNER_TEMP}/actionlint" + echo "${RUNNER_TEMP}/actionlint" >> "${GITHUB_PATH}" + + # For actionlint, we specifically ignore shellcheck rules that are + # annoying or unhelpful. See the shellcheck action for a description. + - name: 'Run actionlint' + run: |- + actionlint \ + -color \ + -format '{{range $err := .}}::error file={{$err.Filepath}},line={{$err.Line}},col={{$err.Column}}::{{$err.Filepath}}@{{$err.Line}} {{$err.Message}}%0A```%0A{{replace $err.Snippet "\\n" "%0A"}}%0A```\n{{end}}' \ + -ignore 'SC2002:' \ + -ignore 'SC2016:' \ + -ignore 'SC2129:' \ + -ignore 'label ".+" is unknown' + + - name: 'Run ratchet' + uses: 'sethvargo/ratchet@8b4ca256dbed184350608a3023620f267f0a5253' # ratchet:sethvargo/ratchet@v0.11.4 + with: + files: |- + .github/workflows/*.yml + .github/actions/**/*.yml + + # + # Lint: Javascript + # + lint_javascript: + name: 'Lint (Javascript)' + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + fetch-depth: 1 + + - name: 'Set up Node.js' + uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4.4.0 with: node-version-file: '.nvmrc' cache: 'npm' - - name: Install dependencies - run: npm ci + - name: 'Install dependencies' + run: |- + npm ci - - name: Run formatter check - run: | + - name: 'Run formatter check' + run: |- npm run format git diff --exit-code - - name: Run linter - run: npm run lint:ci + - name: 'Run linter' + run: |- + npm run lint:ci - - name: Run linter on integration tests - run: npx eslint integration-tests --max-warnings 0 + - name: 'Run linter on integration tests' + run: |- + npx eslint integration-tests --max-warnings 0 - - name: Run formatter on integration tests - run: | + - name: 'Run formatter on integration tests' + run: |- npx prettier --check integration-tests git diff --exit-code - - name: Build project - run: npm run build + - name: 'Build project' + run: |- + npm run build - - name: Run type check - run: npm run typecheck + - name: 'Run type check' + run: |- + npm run typecheck + # + # Lint: Shell + # + lint_shell: + name: 'Lint (Shell)' + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + fetch-depth: 1 + + - name: 'Install shellcheck' + run: |- + mkdir -p "${RUNNER_TEMP}/shellcheck" + curl -sSLo "${RUNNER_TEMP}/.shellcheck.txz" "https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/shellcheck-v${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" + tar -xf "${RUNNER_TEMP}/.shellcheck.txz" -C "${RUNNER_TEMP}/shellcheck" --strip-components=1 + echo "${RUNNER_TEMP}/shellcheck" >> "${GITHUB_PATH}" + + - name: 'Install shellcheck problem matcher' + run: |- + cat > "${RUNNER_TEMP}/shellcheck/problem-matcher-lint-shell.json" <<"EOF" + { + "problemMatcher": [ + { + "owner": "lint_shell", + "pattern": [ + { + "regexp": "^(.*):(\\\\d+):(\\\\d+):\\\\s+(?:fatal\\\\s+)?(warning|error):\\\\s+(.*)$", + "file": 1, + "line": 2, + "column": 3, + "severity": 4, + "message": 5 + } + ] + } + ] + } + EOF + echo "::add-matcher::${RUNNER_TEMP}/shellcheck/problem-matcher-lint-shell.json" + + # Note that only warning and error severity show up in the github files + # page. So we replace 'style' and 'note' with 'warning' to make it show + # up. + # + # We also try and find all bash scripts even if they don't have an + # explicit extension. + # + # We explicitly ignore the following rules: + # + # - SC2002: This rule suggests using "cmd < file" instead of "cat | cmd". + # While < is more efficient, pipes are much more readable and expected. + # + # - SC2129: This rule suggests grouping multiple writes to a file in + # braces like "{ cmd1; cmd2; } >> file". This is unexpected and less + # readable. + # + # - SC2310: This is an optional warning that only appears with "set -e" + # and when a command is used as a conditional. + - name: 'Run shellcheck' + run: |- + git ls-files | grep -E '^([^.]+|.*\.(sh|zsh|bash))$' | xargs file --mime-type \ + | grep "text/x-shellscript" | awk '{ print substr($1, 1, length($1)-1) }' \ + | xargs shellcheck \ + --check-sourced \ + --enable=all \ + --exclude=SC2002,SC2129,SC2310 \ + --severity=style \ + --format=gcc \ + --color=never | sed -e 's/note:/warning:/g' -e 's/style:/warning:/g' + + # + # Lint: YAML + # + lint_yaml: + name: 'Lint (YAML)' + runs-on: 'ubuntu-latest' + steps: + - name: 'Checkout' + uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5 + with: + fetch-depth: 1 + + - name: 'Setup Python' + uses: 'actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065' # ratchet:actions/setup-python@v5 + with: + python-version: '3' + + - name: 'Install yamllint' + run: |- + pip install --user "yamllint==${YAMLLINT_VERSION}" + + - name: 'Run yamllint' + run: |- + git ls-files | grep -E '\.(yaml|yml)' | xargs yamllint --format github + + # + # Lint: All + # + # This is a virtual job that other jobs depend on to wait for all linters to + # finish. It's also used to ensure linting happens on CI via required + # workflows. + lint: + name: 'Lint' + needs: + - 'lint_github_actions' + - 'lint_javascript' + - 'lint_shell' + - 'lint_yaml' + runs-on: 'ubuntu-latest' + steps: + - run: |- + echo 'All linters finished!' + + # + # Test: Node + # test: name: 'Test' runs-on: '${{ matrix.os }}' - needs: 'lint' + needs: + - 'lint' permissions: contents: 'read' checks: 'write' diff --git a/.yamllint.yml b/.yamllint.yml new file mode 100644 index 00000000..b4612e07 --- /dev/null +++ b/.yamllint.yml @@ -0,0 +1,88 @@ +rules: + anchors: + forbid-duplicated-anchors: true + forbid-undeclared-aliases: true + forbid-unused-anchors: true + + braces: + forbid: 'non-empty' + min-spaces-inside-empty: 0 + max-spaces-inside-empty: 0 + + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: 0 + max-spaces-inside-empty: 0 + + colons: + max-spaces-before: 0 + max-spaces-after: 1 + + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + + comments: + require-starting-space: true + ignore-shebangs: true + min-spaces-from-content: 1 + + comments-indentation: 'disable' + + document-end: + present: false + + document-start: + present: false + + empty-lines: + max: 2 + max-start: 0 + max-end: 1 + + empty-values: + forbid-in-block-mappings: false + forbid-in-flow-mappings: true + + float-values: + forbid-inf: false + forbid-nan: false + forbid-scientific-notation: false + require-numeral-before-decimal: false + + hyphens: + max-spaces-after: 1 + + indentation: + spaces: 2 + indent-sequences: true + check-multi-line-strings: false + + key-duplicates: {} + + new-line-at-end-of-file: {} + + new-lines: + type: 'unix' + + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: false + + quoted-strings: + quote-type: 'single' + required: true + allow-quoted-quotes: true + + trailing-spaces: {} + + truthy: + allowed-values: ['true', 'false', 'on'] # GitHub Actions uses "on" + check-keys: true + +ignore: + - 'thirdparty/' + - 'third_party/' + - 'vendor/' From 904f4623b6945345d5845649e98f554671b1edfb Mon Sep 17 00:00:00 2001 From: joshualitt Date: Wed, 13 Aug 2025 11:57:37 -0700 Subject: [PATCH 31/45] feat(core): Continue declarative tool migration. (#6114) --- packages/core/src/tools/ls.test.ts | 142 ++++---- packages/core/src/tools/ls.ts | 393 +++++++++++---------- packages/core/src/tools/mcp-tool.test.ts | 242 ++++--------- packages/core/src/tools/mcp-tool.ts | 149 +++++--- packages/core/src/tools/memoryTool.test.ts | 76 ++-- packages/core/src/tools/memoryTool.ts | 318 +++++++++-------- 6 files changed, 623 insertions(+), 697 deletions(-) diff --git a/packages/core/src/tools/ls.test.ts b/packages/core/src/tools/ls.test.ts index fb99d829..2fbeb37a 100644 --- a/packages/core/src/tools/ls.test.ts +++ b/packages/core/src/tools/ls.test.ts @@ -74,9 +74,11 @@ describe('LSTool', () => { const params = { path: '/home/user/project/src', }; - - const error = lsTool.validateToolParams(params); - expect(error).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + expect(invocation).toBeDefined(); }); it('should reject relative paths', () => { @@ -84,8 +86,9 @@ describe('LSTool', () => { path: './src', }; - const error = lsTool.validateToolParams(params); - expect(error).toBe('Path must be absolute: ./src'); + expect(() => lsTool.build(params)).toThrow( + 'Path must be absolute: ./src', + ); }); it('should reject paths outside workspace with clear error message', () => { @@ -93,8 +96,7 @@ describe('LSTool', () => { path: '/etc/passwd', }; - const error = lsTool.validateToolParams(params); - expect(error).toBe( + expect(() => lsTool.build(params)).toThrow( 'Path must be within one of the workspace directories: /home/user/project, /home/user/other-project', ); }); @@ -103,9 +105,11 @@ describe('LSTool', () => { const params = { path: '/home/user/other-project/lib', }; - - const error = lsTool.validateToolParams(params); - expect(error).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + expect(invocation).toBeDefined(); }); }); @@ -133,10 +137,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('[DIR] subdir'); expect(result.llmContent).toContain('file1.ts'); @@ -161,10 +163,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('module1.js'); expect(result.llmContent).toContain('module2.js'); @@ -179,10 +179,8 @@ describe('LSTool', () => { } as fs.Stats); vi.mocked(fs.readdirSync).mockReturnValue([]); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toBe( 'Directory /home/user/project/empty is empty.', @@ -207,10 +205,11 @@ describe('LSTool', () => { }); vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath, ignore: ['*.spec.js'] }, - new AbortController().signal, - ); + const invocation = lsTool.build({ + path: testPath, + ignore: ['*.spec.js'], + }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('test.js'); expect(result.llmContent).toContain('index.js'); @@ -238,10 +237,8 @@ describe('LSTool', () => { (path: string) => path.includes('ignored.js'), ); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('file1.js'); expect(result.llmContent).toContain('file2.js'); @@ -269,10 +266,8 @@ describe('LSTool', () => { (path: string) => path.includes('private.js'), ); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('file1.js'); expect(result.llmContent).toContain('file2.js'); @@ -287,10 +282,8 @@ describe('LSTool', () => { isDirectory: () => false, } as fs.Stats); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('Path is not a directory'); expect(result.returnDisplay).toBe('Error: Path is not a directory.'); @@ -303,10 +296,8 @@ describe('LSTool', () => { throw new Error('ENOENT: no such file or directory'); }); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('Error listing directory'); expect(result.returnDisplay).toBe('Error: Failed to list directory.'); @@ -336,10 +327,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); const lines = ( typeof result.llmContent === 'string' ? result.llmContent : '' @@ -361,24 +350,18 @@ describe('LSTool', () => { throw new Error('EACCES: permission denied'); }); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('Error listing directory'); expect(result.llmContent).toContain('permission denied'); expect(result.returnDisplay).toBe('Error: Failed to list directory.'); }); - it('should validate parameters and return error for invalid params', async () => { - const result = await lsTool.execute( - { path: '../outside' }, - new AbortController().signal, + it('should throw for invalid params at build time', async () => { + expect(() => lsTool.build({ path: '../outside' })).toThrow( + 'Path must be absolute: ../outside', ); - - expect(result.llmContent).toContain('Invalid parameters provided'); - expect(result.returnDisplay).toBe('Error: Failed to execute tool.'); }); it('should handle errors accessing individual files during listing', async () => { @@ -406,10 +389,8 @@ describe('LSTool', () => { .spyOn(console, 'error') .mockImplementation(() => {}); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); // Should still list the accessible file expect(result.llmContent).toContain('accessible.ts'); @@ -428,19 +409,25 @@ describe('LSTool', () => { describe('getDescription', () => { it('should return shortened relative path', () => { const params = { - path: path.join(mockPrimaryDir, 'deeply', 'nested', 'directory'), + path: `${mockPrimaryDir}/deeply/nested/directory`, }; - - const description = lsTool.getDescription(params); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + const description = invocation.getDescription(); expect(description).toBe(path.join('deeply', 'nested', 'directory')); }); it('should handle paths in secondary workspace', () => { const params = { - path: path.join(mockSecondaryDir, 'lib'), + path: `${mockSecondaryDir}/lib`, }; - - const description = lsTool.getDescription(params); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + const invocation = lsTool.build(params); + const description = invocation.getDescription(); expect(description).toBe(path.join('..', 'other-project', 'lib')); }); }); @@ -448,22 +435,25 @@ describe('LSTool', () => { describe('workspace boundary validation', () => { it('should accept paths in primary workspace directory', () => { const params = { path: `${mockPrimaryDir}/src` }; - expect(lsTool.validateToolParams(params)).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + expect(lsTool.build(params)).toBeDefined(); }); it('should accept paths in secondary workspace directory', () => { const params = { path: `${mockSecondaryDir}/lib` }; - expect(lsTool.validateToolParams(params)).toBeNull(); + vi.mocked(fs.statSync).mockReturnValue({ + isDirectory: () => true, + } as fs.Stats); + expect(lsTool.build(params)).toBeDefined(); }); it('should reject paths outside all workspace directories', () => { const params = { path: '/etc/passwd' }; - const error = lsTool.validateToolParams(params); - expect(error).toContain( + expect(() => lsTool.build(params)).toThrow( 'Path must be within one of the workspace directories', ); - expect(error).toContain(mockPrimaryDir); - expect(error).toContain(mockSecondaryDir); }); it('should list files from secondary workspace directory', async () => { @@ -483,10 +473,8 @@ describe('LSTool', () => { vi.mocked(fs.readdirSync).mockReturnValue(mockFiles as any); - const result = await lsTool.execute( - { path: testPath }, - new AbortController().signal, - ); + const invocation = lsTool.build({ path: testPath }); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toContain('test1.spec.ts'); expect(result.llmContent).toContain('test2.spec.ts'); diff --git a/packages/core/src/tools/ls.ts b/packages/core/src/tools/ls.ts index 7a4445a5..2618136a 100644 --- a/packages/core/src/tools/ls.ts +++ b/packages/core/src/tools/ls.ts @@ -6,7 +6,13 @@ import fs from 'fs'; import path from 'path'; -import { BaseTool, Kind, ToolResult } from './tools.js'; +import { + BaseDeclarativeTool, + BaseToolInvocation, + Kind, + ToolInvocation, + ToolResult, +} from './tools.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { makeRelative, shortenPath } from '../utils/paths.js'; import { Config, DEFAULT_FILE_FILTERING_OPTIONS } from '../config/config.js'; @@ -64,10 +70,199 @@ export interface FileEntry { modifiedTime: Date; } +class LSToolInvocation extends BaseToolInvocation { + constructor( + private readonly config: Config, + params: LSToolParams, + ) { + super(params); + } + + /** + * Checks if a filename matches any of the ignore patterns + * @param filename Filename to check + * @param patterns Array of glob patterns to check against + * @returns True if the filename should be ignored + */ + private shouldIgnore(filename: string, patterns?: string[]): boolean { + if (!patterns || patterns.length === 0) { + return false; + } + for (const pattern of patterns) { + // Convert glob pattern to RegExp + const regexPattern = pattern + .replace(/[.+^${}()|[\]\\]/g, '\\$&') + .replace(/\*/g, '.*') + .replace(/\?/g, '.'); + const regex = new RegExp(`^${regexPattern}$`); + if (regex.test(filename)) { + return true; + } + } + return false; + } + + /** + * Gets a description of the file reading operation + * @returns A string describing the file being read + */ + getDescription(): string { + const relativePath = makeRelative( + this.params.path, + this.config.getTargetDir(), + ); + return shortenPath(relativePath); + } + + // Helper for consistent error formatting + private errorResult(llmContent: string, returnDisplay: string): ToolResult { + return { + llmContent, + // Keep returnDisplay simpler in core logic + returnDisplay: `Error: ${returnDisplay}`, + }; + } + + /** + * Executes the LS operation with the given parameters + * @returns Result of the LS operation + */ + async execute(_signal: AbortSignal): Promise { + try { + const stats = fs.statSync(this.params.path); + if (!stats) { + // fs.statSync throws on non-existence, so this check might be redundant + // but keeping for clarity. Error message adjusted. + return this.errorResult( + `Error: Directory not found or inaccessible: ${this.params.path}`, + `Directory not found or inaccessible.`, + ); + } + if (!stats.isDirectory()) { + return this.errorResult( + `Error: Path is not a directory: ${this.params.path}`, + `Path is not a directory.`, + ); + } + + const files = fs.readdirSync(this.params.path); + + const defaultFileIgnores = + this.config.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; + + const fileFilteringOptions = { + respectGitIgnore: + this.params.file_filtering_options?.respect_git_ignore ?? + defaultFileIgnores.respectGitIgnore, + respectGeminiIgnore: + this.params.file_filtering_options?.respect_gemini_ignore ?? + defaultFileIgnores.respectGeminiIgnore, + }; + + // Get centralized file discovery service + + const fileDiscovery = this.config.getFileService(); + + const entries: FileEntry[] = []; + let gitIgnoredCount = 0; + let geminiIgnoredCount = 0; + + if (files.length === 0) { + // Changed error message to be more neutral for LLM + return { + llmContent: `Directory ${this.params.path} is empty.`, + returnDisplay: `Directory is empty.`, + }; + } + + for (const file of files) { + if (this.shouldIgnore(file, this.params.ignore)) { + continue; + } + + const fullPath = path.join(this.params.path, file); + const relativePath = path.relative( + this.config.getTargetDir(), + fullPath, + ); + + // Check if this file should be ignored based on git or gemini ignore rules + if ( + fileFilteringOptions.respectGitIgnore && + fileDiscovery.shouldGitIgnoreFile(relativePath) + ) { + gitIgnoredCount++; + continue; + } + if ( + fileFilteringOptions.respectGeminiIgnore && + fileDiscovery.shouldGeminiIgnoreFile(relativePath) + ) { + geminiIgnoredCount++; + continue; + } + + try { + const stats = fs.statSync(fullPath); + const isDir = stats.isDirectory(); + entries.push({ + name: file, + path: fullPath, + isDirectory: isDir, + size: isDir ? 0 : stats.size, + modifiedTime: stats.mtime, + }); + } catch (error) { + // Log error internally but don't fail the whole listing + console.error(`Error accessing ${fullPath}: ${error}`); + } + } + + // Sort entries (directories first, then alphabetically) + entries.sort((a, b) => { + if (a.isDirectory && !b.isDirectory) return -1; + if (!a.isDirectory && b.isDirectory) return 1; + return a.name.localeCompare(b.name); + }); + + // Create formatted content for LLM + const directoryContent = entries + .map((entry) => `${entry.isDirectory ? '[DIR] ' : ''}${entry.name}`) + .join('\n'); + + let resultMessage = `Directory listing for ${this.params.path}:\n${directoryContent}`; + const ignoredMessages = []; + if (gitIgnoredCount > 0) { + ignoredMessages.push(`${gitIgnoredCount} git-ignored`); + } + if (geminiIgnoredCount > 0) { + ignoredMessages.push(`${geminiIgnoredCount} gemini-ignored`); + } + + if (ignoredMessages.length > 0) { + resultMessage += `\n\n(${ignoredMessages.join(', ')})`; + } + + let displayMessage = `Listed ${entries.length} item(s).`; + if (ignoredMessages.length > 0) { + displayMessage += ` (${ignoredMessages.join(', ')})`; + } + + return { + llmContent: resultMessage, + returnDisplay: displayMessage, + }; + } catch (error) { + const errorMsg = `Error listing directory: ${error instanceof Error ? error.message : String(error)}`; + return this.errorResult(errorMsg, 'Failed to list directory.'); + } + } +} + /** * Implementation of the LS tool logic */ -export class LSTool extends BaseTool { +export class LSTool extends BaseDeclarativeTool { static readonly Name = 'list_directory'; constructor(private config: Config) { @@ -134,198 +329,16 @@ export class LSTool extends BaseTool { const workspaceContext = this.config.getWorkspaceContext(); if (!workspaceContext.isPathWithinWorkspace(params.path)) { const directories = workspaceContext.getDirectories(); - return `Path must be within one of the workspace directories: ${directories.join(', ')}`; + return `Path must be within one of the workspace directories: ${directories.join( + ', ', + )}`; } return null; } - /** - * Checks if a filename matches any of the ignore patterns - * @param filename Filename to check - * @param patterns Array of glob patterns to check against - * @returns True if the filename should be ignored - */ - private shouldIgnore(filename: string, patterns?: string[]): boolean { - if (!patterns || patterns.length === 0) { - return false; - } - for (const pattern of patterns) { - // Convert glob pattern to RegExp - const regexPattern = pattern - .replace(/[.+^${}()|[\]\\]/g, '\\$&') - .replace(/\*/g, '.*') - .replace(/\?/g, '.'); - const regex = new RegExp(`^${regexPattern}$`); - if (regex.test(filename)) { - return true; - } - } - return false; - } - - /** - * Gets a description of the file reading operation - * @param params Parameters for the file reading - * @returns A string describing the file being read - */ - getDescription(params: LSToolParams): string { - const relativePath = makeRelative(params.path, this.config.getTargetDir()); - return shortenPath(relativePath); - } - - // Helper for consistent error formatting - private errorResult(llmContent: string, returnDisplay: string): ToolResult { - return { - llmContent, - // Keep returnDisplay simpler in core logic - returnDisplay: `Error: ${returnDisplay}`, - }; - } - - /** - * Executes the LS operation with the given parameters - * @param params Parameters for the LS operation - * @returns Result of the LS operation - */ - async execute( + protected createInvocation( params: LSToolParams, - _signal: AbortSignal, - ): Promise { - const validationError = this.validateToolParams(params); - if (validationError) { - return this.errorResult( - `Error: Invalid parameters provided. Reason: ${validationError}`, - `Failed to execute tool.`, - ); - } - - try { - const stats = fs.statSync(params.path); - if (!stats) { - // fs.statSync throws on non-existence, so this check might be redundant - // but keeping for clarity. Error message adjusted. - return this.errorResult( - `Error: Directory not found or inaccessible: ${params.path}`, - `Directory not found or inaccessible.`, - ); - } - if (!stats.isDirectory()) { - return this.errorResult( - `Error: Path is not a directory: ${params.path}`, - `Path is not a directory.`, - ); - } - - const files = fs.readdirSync(params.path); - - const defaultFileIgnores = - this.config.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; - - const fileFilteringOptions = { - respectGitIgnore: - params.file_filtering_options?.respect_git_ignore ?? - defaultFileIgnores.respectGitIgnore, - respectGeminiIgnore: - params.file_filtering_options?.respect_gemini_ignore ?? - defaultFileIgnores.respectGeminiIgnore, - }; - - // Get centralized file discovery service - - const fileDiscovery = this.config.getFileService(); - - const entries: FileEntry[] = []; - let gitIgnoredCount = 0; - let geminiIgnoredCount = 0; - - if (files.length === 0) { - // Changed error message to be more neutral for LLM - return { - llmContent: `Directory ${params.path} is empty.`, - returnDisplay: `Directory is empty.`, - }; - } - - for (const file of files) { - if (this.shouldIgnore(file, params.ignore)) { - continue; - } - - const fullPath = path.join(params.path, file); - const relativePath = path.relative( - this.config.getTargetDir(), - fullPath, - ); - - // Check if this file should be ignored based on git or gemini ignore rules - if ( - fileFilteringOptions.respectGitIgnore && - fileDiscovery.shouldGitIgnoreFile(relativePath) - ) { - gitIgnoredCount++; - continue; - } - if ( - fileFilteringOptions.respectGeminiIgnore && - fileDiscovery.shouldGeminiIgnoreFile(relativePath) - ) { - geminiIgnoredCount++; - continue; - } - - try { - const stats = fs.statSync(fullPath); - const isDir = stats.isDirectory(); - entries.push({ - name: file, - path: fullPath, - isDirectory: isDir, - size: isDir ? 0 : stats.size, - modifiedTime: stats.mtime, - }); - } catch (error) { - // Log error internally but don't fail the whole listing - console.error(`Error accessing ${fullPath}: ${error}`); - } - } - - // Sort entries (directories first, then alphabetically) - entries.sort((a, b) => { - if (a.isDirectory && !b.isDirectory) return -1; - if (!a.isDirectory && b.isDirectory) return 1; - return a.name.localeCompare(b.name); - }); - - // Create formatted content for LLM - const directoryContent = entries - .map((entry) => `${entry.isDirectory ? '[DIR] ' : ''}${entry.name}`) - .join('\n'); - - let resultMessage = `Directory listing for ${params.path}:\n${directoryContent}`; - const ignoredMessages = []; - if (gitIgnoredCount > 0) { - ignoredMessages.push(`${gitIgnoredCount} git-ignored`); - } - if (geminiIgnoredCount > 0) { - ignoredMessages.push(`${geminiIgnoredCount} gemini-ignored`); - } - - if (ignoredMessages.length > 0) { - resultMessage += `\n\n(${ignoredMessages.join(', ')})`; - } - - let displayMessage = `Listed ${entries.length} item(s).`; - if (ignoredMessages.length > 0) { - displayMessage += ` (${ignoredMessages.join(', ')})`; - } - - return { - llmContent: resultMessage, - returnDisplay: displayMessage, - }; - } catch (error) { - const errorMsg = `Error listing directory: ${error instanceof Error ? error.message : String(error)}`; - return this.errorResult(errorMsg, 'Failed to list directory.'); - } + ): ToolInvocation { + return new LSToolInvocation(this.config, params); } } diff --git a/packages/core/src/tools/mcp-tool.test.ts b/packages/core/src/tools/mcp-tool.test.ts index f8a9a8ba..36602d49 100644 --- a/packages/core/src/tools/mcp-tool.test.ts +++ b/packages/core/src/tools/mcp-tool.test.ts @@ -73,11 +73,21 @@ describe('DiscoveredMCPTool', () => { required: ['param'], }; + let tool: DiscoveredMCPTool; + beforeEach(() => { mockCallTool.mockClear(); mockToolMethod.mockClear(); + tool = new DiscoveredMCPTool( + mockCallableToolInstance, + serverName, + serverToolName, + baseDescription, + inputSchema, + ); // Clear allowlist before each relevant test, especially for shouldConfirmExecute - (DiscoveredMCPTool as any).allowlist.clear(); + const invocation = tool.build({}) as any; + invocation.constructor.allowlist.clear(); }); afterEach(() => { @@ -86,14 +96,6 @@ describe('DiscoveredMCPTool', () => { describe('constructor', () => { it('should set properties correctly', () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - expect(tool.name).toBe(serverToolName); expect(tool.schema.name).toBe(serverToolName); expect(tool.schema.description).toBe(baseDescription); @@ -105,7 +107,7 @@ describe('DiscoveredMCPTool', () => { it('should accept and store a custom timeout', () => { const customTimeout = 5000; - const tool = new DiscoveredMCPTool( + const toolWithTimeout = new DiscoveredMCPTool( mockCallableToolInstance, serverName, serverToolName, @@ -113,19 +115,12 @@ describe('DiscoveredMCPTool', () => { inputSchema, customTimeout, ); - expect(tool.timeout).toBe(customTimeout); + expect(toolWithTimeout.timeout).toBe(customTimeout); }); }); describe('execute', () => { it('should call mcpTool.callTool with correct parameters and format display output', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { param: 'testValue' }; const mockToolSuccessResultObject = { success: true, @@ -147,7 +142,10 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(mockMcpToolResponseParts); - const toolResult: ToolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult: ToolResult = await invocation.execute( + new AbortController().signal, + ); expect(mockCallTool).toHaveBeenCalledWith([ { name: serverToolName, args: params }, @@ -163,17 +161,13 @@ describe('DiscoveredMCPTool', () => { }); it('should handle empty result from getStringifiedResultForDisplay', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { param: 'testValue' }; const mockMcpToolResponsePartsEmpty: Part[] = []; mockCallTool.mockResolvedValue(mockMcpToolResponsePartsEmpty); - const toolResult: ToolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult: ToolResult = await invocation.execute( + new AbortController().signal, + ); expect(toolResult.returnDisplay).toBe('```json\n[]\n```'); expect(toolResult.llmContent).toEqual([ { text: '[Error: Could not parse tool response]' }, @@ -181,28 +175,17 @@ describe('DiscoveredMCPTool', () => { }); it('should propagate rejection if mcpTool.callTool rejects', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { param: 'failCase' }; const expectedError = new Error('MCP call failed'); mockCallTool.mockRejectedValue(expectedError); - await expect(tool.execute(params)).rejects.toThrow(expectedError); + const invocation = tool.build(params); + await expect( + invocation.execute(new AbortController().signal), + ).rejects.toThrow(expectedError); }); it('should handle a simple text response correctly', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { query: 'test' }; const successMessage = 'This is a success message.'; @@ -221,7 +204,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); // 1. Assert that the llmContent sent to the scheduler is a clean Part array. expect(toolResult.llmContent).toEqual([{ text: successMessage }]); @@ -236,13 +220,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle an AudioBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'play' }; const sdkResponse: Part[] = [ { @@ -262,7 +239,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { @@ -279,13 +257,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle a ResourceLinkBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { resource: 'get' }; const sdkResponse: Part[] = [ { @@ -306,7 +277,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { @@ -319,13 +291,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle an embedded text ResourceBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { resource: 'get' }; const sdkResponse: Part[] = [ { @@ -348,7 +313,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { text: 'This is the text content.' }, @@ -357,13 +323,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle an embedded binary ResourceBlock response', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { resource: 'get' }; const sdkResponse: Part[] = [ { @@ -386,7 +345,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { @@ -405,13 +365,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle a mix of content block types', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'complex' }; const sdkResponse: Part[] = [ { @@ -433,7 +386,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { text: 'First part.' }, @@ -454,13 +408,6 @@ describe('DiscoveredMCPTool', () => { }); it('should ignore unknown content block types', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'test' }; const sdkResponse: Part[] = [ { @@ -477,7 +424,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([{ text: 'Valid part.' }]); expect(toolResult.returnDisplay).toBe( @@ -486,13 +434,6 @@ describe('DiscoveredMCPTool', () => { }); it('should handle a complex mix of content block types', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const params = { action: 'super-complex' }; const sdkResponse: Part[] = [ { @@ -527,7 +468,8 @@ describe('DiscoveredMCPTool', () => { ]; mockCallTool.mockResolvedValue(sdkResponse); - const toolResult = await tool.execute(params); + const invocation = tool.build(params); + const toolResult = await invocation.execute(new AbortController().signal); expect(toolResult.llmContent).toEqual([ { text: 'Here is a resource.' }, @@ -552,10 +494,8 @@ describe('DiscoveredMCPTool', () => { }); describe('shouldConfirmExecute', () => { - // beforeEach is already clearing allowlist - it('should return false if trust is true', async () => { - const tool = new DiscoveredMCPTool( + const trustedTool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, serverToolName, @@ -564,50 +504,32 @@ describe('DiscoveredMCPTool', () => { undefined, true, ); + const invocation = trustedTool.build({}); expect( - await tool.shouldConfirmExecute({}, new AbortController().signal), + await invocation.shouldConfirmExecute(new AbortController().signal), ).toBe(false); }); it('should return false if server is allowlisted', async () => { - (DiscoveredMCPTool as any).allowlist.add(serverName); - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); + const invocation = tool.build({}) as any; + invocation.constructor.allowlist.add(serverName); expect( - await tool.shouldConfirmExecute({}, new AbortController().signal), + await invocation.shouldConfirmExecute(new AbortController().signal), ).toBe(false); }); it('should return false if tool is allowlisted', async () => { const toolAllowlistKey = `${serverName}.${serverToolName}`; - (DiscoveredMCPTool as any).allowlist.add(toolAllowlistKey); - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); + const invocation = tool.build({}) as any; + invocation.constructor.allowlist.add(toolAllowlistKey); expect( - await tool.shouldConfirmExecute({}, new AbortController().signal), + await invocation.shouldConfirmExecute(new AbortController().signal), ).toBe(false); }); it('should return confirmation details if not trusted and not allowlisted', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}); + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -629,15 +551,8 @@ describe('DiscoveredMCPTool', () => { }); it('should add server to allowlist on ProceedAlwaysServer', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -650,7 +565,7 @@ describe('DiscoveredMCPTool', () => { await confirmation.onConfirm( ToolConfirmationOutcome.ProceedAlwaysServer, ); - expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe(true); + expect(invocation.constructor.allowlist.has(serverName)).toBe(true); } else { throw new Error( 'Confirmation details or onConfirm not in expected format', @@ -659,16 +574,9 @@ describe('DiscoveredMCPTool', () => { }); it('should add tool to allowlist on ProceedAlwaysTool', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); const toolAllowlistKey = `${serverName}.${serverToolName}`; - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -679,7 +587,7 @@ describe('DiscoveredMCPTool', () => { typeof confirmation.onConfirm === 'function' ) { await confirmation.onConfirm(ToolConfirmationOutcome.ProceedAlwaysTool); - expect((DiscoveredMCPTool as any).allowlist.has(toolAllowlistKey)).toBe( + expect(invocation.constructor.allowlist.has(toolAllowlistKey)).toBe( true, ); } else { @@ -690,15 +598,8 @@ describe('DiscoveredMCPTool', () => { }); it('should handle Cancel confirmation outcome', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -710,11 +611,9 @@ describe('DiscoveredMCPTool', () => { ) { // Cancel should not add anything to allowlist await confirmation.onConfirm(ToolConfirmationOutcome.Cancel); - expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe( - false, - ); + expect(invocation.constructor.allowlist.has(serverName)).toBe(false); expect( - (DiscoveredMCPTool as any).allowlist.has( + invocation.constructor.allowlist.has( `${serverName}.${serverToolName}`, ), ).toBe(false); @@ -726,15 +625,8 @@ describe('DiscoveredMCPTool', () => { }); it('should handle ProceedOnce confirmation outcome', async () => { - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - serverName, - serverToolName, - baseDescription, - inputSchema, - ); - const confirmation = await tool.shouldConfirmExecute( - {}, + const invocation = tool.build({}) as any; + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); expect(confirmation).not.toBe(false); @@ -746,11 +638,9 @@ describe('DiscoveredMCPTool', () => { ) { // ProceedOnce should not add anything to allowlist await confirmation.onConfirm(ToolConfirmationOutcome.ProceedOnce); - expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe( - false, - ); + expect(invocation.constructor.allowlist.has(serverName)).toBe(false); expect( - (DiscoveredMCPTool as any).allowlist.has( + invocation.constructor.allowlist.has( `${serverName}.${serverToolName}`, ), ).toBe(false); diff --git a/packages/core/src/tools/mcp-tool.ts b/packages/core/src/tools/mcp-tool.ts index 59f83db3..01a8d75c 100644 --- a/packages/core/src/tools/mcp-tool.ts +++ b/packages/core/src/tools/mcp-tool.ts @@ -5,14 +5,16 @@ */ import { - BaseTool, - ToolResult, + BaseDeclarativeTool, + BaseToolInvocation, + Kind, ToolCallConfirmationDetails, ToolConfirmationOutcome, + ToolInvocation, ToolMcpConfirmationDetails, - Kind, + ToolResult, } from './tools.js'; -import { CallableTool, Part, FunctionCall } from '@google/genai'; +import { CallableTool, FunctionCall, Part } from '@google/genai'; type ToolParams = Record; @@ -50,9 +52,84 @@ type McpContentBlock = | McpResourceBlock | McpResourceLinkBlock; -export class DiscoveredMCPTool extends BaseTool { +class DiscoveredMCPToolInvocation extends BaseToolInvocation< + ToolParams, + ToolResult +> { private static readonly allowlist: Set = new Set(); + constructor( + private readonly mcpTool: CallableTool, + readonly serverName: string, + readonly serverToolName: string, + readonly displayName: string, + readonly timeout?: number, + readonly trust?: boolean, + params: ToolParams = {}, + ) { + super(params); + } + + async shouldConfirmExecute( + _abortSignal: AbortSignal, + ): Promise { + const serverAllowListKey = this.serverName; + const toolAllowListKey = `${this.serverName}.${this.serverToolName}`; + + if (this.trust) { + return false; // server is trusted, no confirmation needed + } + + if ( + DiscoveredMCPToolInvocation.allowlist.has(serverAllowListKey) || + DiscoveredMCPToolInvocation.allowlist.has(toolAllowListKey) + ) { + return false; // server and/or tool already allowlisted + } + + const confirmationDetails: ToolMcpConfirmationDetails = { + type: 'mcp', + title: 'Confirm MCP Tool Execution', + serverName: this.serverName, + toolName: this.serverToolName, // Display original tool name in confirmation + toolDisplayName: this.displayName, // Display global registry name exposed to model and user + onConfirm: async (outcome: ToolConfirmationOutcome) => { + if (outcome === ToolConfirmationOutcome.ProceedAlwaysServer) { + DiscoveredMCPToolInvocation.allowlist.add(serverAllowListKey); + } else if (outcome === ToolConfirmationOutcome.ProceedAlwaysTool) { + DiscoveredMCPToolInvocation.allowlist.add(toolAllowListKey); + } + }, + }; + return confirmationDetails; + } + + async execute(): Promise { + const functionCalls: FunctionCall[] = [ + { + name: this.serverToolName, + args: this.params, + }, + ]; + + const rawResponseParts = await this.mcpTool.callTool(functionCalls); + const transformedParts = transformMcpContentToParts(rawResponseParts); + + return { + llmContent: transformedParts, + returnDisplay: getStringifiedResultForDisplay(rawResponseParts), + }; + } + + getDescription(): string { + return this.displayName; + } +} + +export class DiscoveredMCPTool extends BaseDeclarativeTool< + ToolParams, + ToolResult +> { constructor( private readonly mcpTool: CallableTool, readonly serverName: string, @@ -87,56 +164,18 @@ export class DiscoveredMCPTool extends BaseTool { ); } - async shouldConfirmExecute( - _params: ToolParams, - _abortSignal: AbortSignal, - ): Promise { - const serverAllowListKey = this.serverName; - const toolAllowListKey = `${this.serverName}.${this.serverToolName}`; - - if (this.trust) { - return false; // server is trusted, no confirmation needed - } - - if ( - DiscoveredMCPTool.allowlist.has(serverAllowListKey) || - DiscoveredMCPTool.allowlist.has(toolAllowListKey) - ) { - return false; // server and/or tool already allowlisted - } - - const confirmationDetails: ToolMcpConfirmationDetails = { - type: 'mcp', - title: 'Confirm MCP Tool Execution', - serverName: this.serverName, - toolName: this.serverToolName, // Display original tool name in confirmation - toolDisplayName: this.name, // Display global registry name exposed to model and user - onConfirm: async (outcome: ToolConfirmationOutcome) => { - if (outcome === ToolConfirmationOutcome.ProceedAlwaysServer) { - DiscoveredMCPTool.allowlist.add(serverAllowListKey); - } else if (outcome === ToolConfirmationOutcome.ProceedAlwaysTool) { - DiscoveredMCPTool.allowlist.add(toolAllowListKey); - } - }, - }; - return confirmationDetails; - } - - async execute(params: ToolParams): Promise { - const functionCalls: FunctionCall[] = [ - { - name: this.serverToolName, - args: params, - }, - ]; - - const rawResponseParts = await this.mcpTool.callTool(functionCalls); - const transformedParts = transformMcpContentToParts(rawResponseParts); - - return { - llmContent: transformedParts, - returnDisplay: getStringifiedResultForDisplay(rawResponseParts), - }; + protected createInvocation( + params: ToolParams, + ): ToolInvocation { + return new DiscoveredMCPToolInvocation( + this.mcpTool, + this.serverName, + this.serverToolName, + this.displayName, + this.timeout, + this.trust, + params, + ); } } diff --git a/packages/core/src/tools/memoryTool.test.ts b/packages/core/src/tools/memoryTool.test.ts index 2a5c4c39..0e382325 100644 --- a/packages/core/src/tools/memoryTool.test.ts +++ b/packages/core/src/tools/memoryTool.test.ts @@ -218,7 +218,8 @@ describe('MemoryTool', () => { it('should call performAddMemoryEntry with correct parameters and return success', async () => { const params = { fact: 'The sky is blue' }; - const result = await memoryTool.execute(params, mockAbortSignal); + const invocation = memoryTool.build(params); + const result = await invocation.execute(mockAbortSignal); // Use getCurrentGeminiMdFilename for the default expectation before any setGeminiMdFilename calls in a test const expectedFilePath = path.join( os.homedir(), @@ -247,14 +248,12 @@ describe('MemoryTool', () => { it('should return an error if fact is empty', async () => { const params = { fact: ' ' }; // Empty fact - const result = await memoryTool.execute(params, mockAbortSignal); - const errorMessage = 'Parameter "fact" must be a non-empty string.'; - - expect(performAddMemoryEntrySpy).not.toHaveBeenCalled(); - expect(result.llmContent).toBe( - JSON.stringify({ success: false, error: errorMessage }), + expect(memoryTool.validateToolParams(params)).toBe( + 'Parameter "fact" must be a non-empty string.', + ); + expect(() => memoryTool.build(params)).toThrow( + 'Parameter "fact" must be a non-empty string.', ); - expect(result.returnDisplay).toBe(`Error: ${errorMessage}`); }); it('should handle errors from performAddMemoryEntry', async () => { @@ -264,7 +263,8 @@ describe('MemoryTool', () => { ); performAddMemoryEntrySpy.mockRejectedValue(underlyingError); - const result = await memoryTool.execute(params, mockAbortSignal); + const invocation = memoryTool.build(params); + const result = await invocation.execute(mockAbortSignal); expect(result.llmContent).toBe( JSON.stringify({ @@ -284,17 +284,17 @@ describe('MemoryTool', () => { beforeEach(() => { memoryTool = new MemoryTool(); // Clear the allowlist before each test - (MemoryTool as unknown as { allowlist: Set }).allowlist.clear(); + const invocation = memoryTool.build({ fact: 'mock-fact' }); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (invocation.constructor as any).allowlist.clear(); // Mock fs.readFile to return empty string (file doesn't exist) vi.mocked(fs.readFile).mockResolvedValue(''); }); it('should return confirmation details when memory file is not allowlisted', async () => { const params = { fact: 'Test fact' }; - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -321,15 +321,12 @@ describe('MemoryTool', () => { getCurrentGeminiMdFilename(), ); + const invocation = memoryTool.build(params); // Add the memory file to the allowlist - (MemoryTool as unknown as { allowlist: Set }).allowlist.add( - memoryFilePath, - ); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (invocation.constructor as any).allowlist.add(memoryFilePath); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBe(false); }); @@ -342,10 +339,8 @@ describe('MemoryTool', () => { getCurrentGeminiMdFilename(), ); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -356,9 +351,8 @@ describe('MemoryTool', () => { // Check that the memory file was added to the allowlist expect( - (MemoryTool as unknown as { allowlist: Set }).allowlist.has( - memoryFilePath, - ), + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (invocation.constructor as any).allowlist.has(memoryFilePath), ).toBe(true); } }); @@ -371,10 +365,8 @@ describe('MemoryTool', () => { getCurrentGeminiMdFilename(), ); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); @@ -382,18 +374,12 @@ describe('MemoryTool', () => { if (result && result.type === 'edit') { // Simulate the onConfirm callback with different outcomes await result.onConfirm(ToolConfirmationOutcome.ProceedOnce); - expect( - (MemoryTool as unknown as { allowlist: Set }).allowlist.has( - memoryFilePath, - ), - ).toBe(false); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const allowlist = (invocation.constructor as any).allowlist; + expect(allowlist.has(memoryFilePath)).toBe(false); await result.onConfirm(ToolConfirmationOutcome.Cancel); - expect( - (MemoryTool as unknown as { allowlist: Set }).allowlist.has( - memoryFilePath, - ), - ).toBe(false); + expect(allowlist.has(memoryFilePath)).toBe(false); } }); @@ -405,10 +391,8 @@ describe('MemoryTool', () => { // Mock fs.readFile to return existing content vi.mocked(fs.readFile).mockResolvedValue(existingContent); - const result = await memoryTool.shouldConfirmExecute( - params, - mockAbortSignal, - ); + const invocation = memoryTool.build(params); + const result = await invocation.shouldConfirmExecute(mockAbortSignal); expect(result).toBeDefined(); expect(result).not.toBe(false); diff --git a/packages/core/src/tools/memoryTool.ts b/packages/core/src/tools/memoryTool.ts index c8e88c97..a9d765c4 100644 --- a/packages/core/src/tools/memoryTool.ts +++ b/packages/core/src/tools/memoryTool.ts @@ -5,11 +5,12 @@ */ import { - BaseTool, + BaseDeclarativeTool, + BaseToolInvocation, Kind, - ToolResult, ToolEditConfirmationDetails, ToolConfirmationOutcome, + ToolResult, } from './tools.js'; import { FunctionDeclaration } from '@google/genai'; import * as fs from 'fs/promises'; @@ -19,6 +20,7 @@ import * as Diff from 'diff'; import { DEFAULT_DIFF_OPTIONS } from './diffOptions.js'; import { tildeifyPath } from '../utils/paths.js'; import { ModifiableDeclarativeTool, ModifyContext } from './modifiable-tool.js'; +import { SchemaValidator } from '../utils/schemaValidator.js'; const memoryToolSchemaData: FunctionDeclaration = { name: 'save_memory', @@ -110,101 +112,86 @@ function ensureNewlineSeparation(currentContent: string): string { return '\n\n'; } -export class MemoryTool - extends BaseTool - implements ModifiableDeclarativeTool -{ - private static readonly allowlist: Set = new Set(); +/** + * Reads the current content of the memory file + */ +async function readMemoryFileContent(): Promise { + try { + return await fs.readFile(getGlobalMemoryFilePath(), 'utf-8'); + } catch (err) { + const error = err as Error & { code?: string }; + if (!(error instanceof Error) || error.code !== 'ENOENT') throw err; + return ''; + } +} - static readonly Name: string = memoryToolSchemaData.name!; - constructor() { - super( - MemoryTool.Name, - 'Save Memory', - memoryToolDescription, - Kind.Think, - memoryToolSchemaData.parametersJsonSchema as Record, +/** + * Computes the new content that would result from adding a memory entry + */ +function computeNewContent(currentContent: string, fact: string): string { + let processedText = fact.trim(); + processedText = processedText.replace(/^(-+\s*)+/, '').trim(); + const newMemoryItem = `- ${processedText}`; + + const headerIndex = currentContent.indexOf(MEMORY_SECTION_HEADER); + + if (headerIndex === -1) { + // Header not found, append header and then the entry + const separator = ensureNewlineSeparation(currentContent); + return ( + currentContent + + `${separator}${MEMORY_SECTION_HEADER}\n${newMemoryItem}\n` + ); + } else { + // Header found, find where to insert the new memory entry + const startOfSectionContent = headerIndex + MEMORY_SECTION_HEADER.length; + let endOfSectionIndex = currentContent.indexOf( + '\n## ', + startOfSectionContent, + ); + if (endOfSectionIndex === -1) { + endOfSectionIndex = currentContent.length; // End of file + } + + const beforeSectionMarker = currentContent + .substring(0, startOfSectionContent) + .trimEnd(); + let sectionContent = currentContent + .substring(startOfSectionContent, endOfSectionIndex) + .trimEnd(); + const afterSectionMarker = currentContent.substring(endOfSectionIndex); + + sectionContent += `\n${newMemoryItem}`; + return ( + `${beforeSectionMarker}\n${sectionContent.trimStart()}\n${afterSectionMarker}`.trimEnd() + + '\n' ); } +} - getDescription(_params: SaveMemoryParams): string { +class MemoryToolInvocation extends BaseToolInvocation< + SaveMemoryParams, + ToolResult +> { + private static readonly allowlist: Set = new Set(); + + getDescription(): string { const memoryFilePath = getGlobalMemoryFilePath(); return `in ${tildeifyPath(memoryFilePath)}`; } - /** - * Reads the current content of the memory file - */ - private async readMemoryFileContent(): Promise { - try { - return await fs.readFile(getGlobalMemoryFilePath(), 'utf-8'); - } catch (err) { - const error = err as Error & { code?: string }; - if (!(error instanceof Error) || error.code !== 'ENOENT') throw err; - return ''; - } - } - - /** - * Computes the new content that would result from adding a memory entry - */ - private computeNewContent(currentContent: string, fact: string): string { - let processedText = fact.trim(); - processedText = processedText.replace(/^(-+\s*)+/, '').trim(); - const newMemoryItem = `- ${processedText}`; - - const headerIndex = currentContent.indexOf(MEMORY_SECTION_HEADER); - - if (headerIndex === -1) { - // Header not found, append header and then the entry - const separator = ensureNewlineSeparation(currentContent); - return ( - currentContent + - `${separator}${MEMORY_SECTION_HEADER}\n${newMemoryItem}\n` - ); - } else { - // Header found, find where to insert the new memory entry - const startOfSectionContent = headerIndex + MEMORY_SECTION_HEADER.length; - let endOfSectionIndex = currentContent.indexOf( - '\n## ', - startOfSectionContent, - ); - if (endOfSectionIndex === -1) { - endOfSectionIndex = currentContent.length; // End of file - } - - const beforeSectionMarker = currentContent - .substring(0, startOfSectionContent) - .trimEnd(); - let sectionContent = currentContent - .substring(startOfSectionContent, endOfSectionIndex) - .trimEnd(); - const afterSectionMarker = currentContent.substring(endOfSectionIndex); - - sectionContent += `\n${newMemoryItem}`; - return ( - `${beforeSectionMarker}\n${sectionContent.trimStart()}\n${afterSectionMarker}`.trimEnd() + - '\n' - ); - } - } - async shouldConfirmExecute( - params: SaveMemoryParams, _abortSignal: AbortSignal, ): Promise { const memoryFilePath = getGlobalMemoryFilePath(); const allowlistKey = memoryFilePath; - if (MemoryTool.allowlist.has(allowlistKey)) { + if (MemoryToolInvocation.allowlist.has(allowlistKey)) { return false; } - // Read current content of the memory file - const currentContent = await this.readMemoryFileContent(); - - // Calculate the new content that will be written to the memory file - const newContent = this.computeNewContent(currentContent, params.fact); + const currentContent = await readMemoryFileContent(); + const newContent = computeNewContent(currentContent, this.params.fact); const fileName = path.basename(memoryFilePath); const fileDiff = Diff.createPatch( @@ -226,13 +213,107 @@ export class MemoryTool newContent, onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { - MemoryTool.allowlist.add(allowlistKey); + MemoryToolInvocation.allowlist.add(allowlistKey); } }, }; return confirmationDetails; } + async execute(_signal: AbortSignal): Promise { + const { fact, modified_by_user, modified_content } = this.params; + + try { + if (modified_by_user && modified_content !== undefined) { + // User modified the content in external editor, write it directly + await fs.mkdir(path.dirname(getGlobalMemoryFilePath()), { + recursive: true, + }); + await fs.writeFile( + getGlobalMemoryFilePath(), + modified_content, + 'utf-8', + ); + const successMessage = `Okay, I've updated the memory file with your modifications.`; + return { + llmContent: JSON.stringify({ + success: true, + message: successMessage, + }), + returnDisplay: successMessage, + }; + } else { + // Use the normal memory entry logic + await MemoryTool.performAddMemoryEntry( + fact, + getGlobalMemoryFilePath(), + { + readFile: fs.readFile, + writeFile: fs.writeFile, + mkdir: fs.mkdir, + }, + ); + const successMessage = `Okay, I've remembered that: "${fact}"`; + return { + llmContent: JSON.stringify({ + success: true, + message: successMessage, + }), + returnDisplay: successMessage, + }; + } + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + console.error( + `[MemoryTool] Error executing save_memory for fact "${fact}": ${errorMessage}`, + ); + return { + llmContent: JSON.stringify({ + success: false, + error: `Failed to save memory. Detail: ${errorMessage}`, + }), + returnDisplay: `Error saving memory: ${errorMessage}`, + }; + } + } +} + +export class MemoryTool + extends BaseDeclarativeTool + implements ModifiableDeclarativeTool +{ + static readonly Name: string = memoryToolSchemaData.name!; + constructor() { + super( + MemoryTool.Name, + 'Save Memory', + memoryToolDescription, + Kind.Think, + memoryToolSchemaData.parametersJsonSchema as Record, + ); + } + + validateToolParams(params: SaveMemoryParams): string | null { + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + + if (params.fact.trim() === '') { + return 'Parameter "fact" must be a non-empty string.'; + } + + return null; + } + + protected createInvocation(params: SaveMemoryParams) { + return new MemoryToolInvocation(params); + } + static async performAddMemoryEntry( text: string, memoryFilePath: string, @@ -303,83 +384,14 @@ export class MemoryTool } } - async execute( - params: SaveMemoryParams, - _signal: AbortSignal, - ): Promise { - const { fact, modified_by_user, modified_content } = params; - - if (!fact || typeof fact !== 'string' || fact.trim() === '') { - const errorMessage = 'Parameter "fact" must be a non-empty string.'; - return { - llmContent: JSON.stringify({ success: false, error: errorMessage }), - returnDisplay: `Error: ${errorMessage}`, - }; - } - - try { - if (modified_by_user && modified_content !== undefined) { - // User modified the content in external editor, write it directly - await fs.mkdir(path.dirname(getGlobalMemoryFilePath()), { - recursive: true, - }); - await fs.writeFile( - getGlobalMemoryFilePath(), - modified_content, - 'utf-8', - ); - const successMessage = `Okay, I've updated the memory file with your modifications.`; - return { - llmContent: JSON.stringify({ - success: true, - message: successMessage, - }), - returnDisplay: successMessage, - }; - } else { - // Use the normal memory entry logic - await MemoryTool.performAddMemoryEntry( - fact, - getGlobalMemoryFilePath(), - { - readFile: fs.readFile, - writeFile: fs.writeFile, - mkdir: fs.mkdir, - }, - ); - const successMessage = `Okay, I've remembered that: "${fact}"`; - return { - llmContent: JSON.stringify({ - success: true, - message: successMessage, - }), - returnDisplay: successMessage, - }; - } - } catch (error) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.error( - `[MemoryTool] Error executing save_memory for fact "${fact}": ${errorMessage}`, - ); - return { - llmContent: JSON.stringify({ - success: false, - error: `Failed to save memory. Detail: ${errorMessage}`, - }), - returnDisplay: `Error saving memory: ${errorMessage}`, - }; - } - } - getModifyContext(_abortSignal: AbortSignal): ModifyContext { return { getFilePath: (_params: SaveMemoryParams) => getGlobalMemoryFilePath(), getCurrentContent: async (_params: SaveMemoryParams): Promise => - this.readMemoryFileContent(), + readMemoryFileContent(), getProposedContent: async (params: SaveMemoryParams): Promise => { - const currentContent = await this.readMemoryFileContent(); - return this.computeNewContent(currentContent, params.fact); + const currentContent = await readMemoryFileContent(); + return computeNewContent(currentContent, params.fact); }, createUpdatedParams: ( _oldContent: string, From c0c0e9b7a0a768515181e6ae7defe9f06199ea18 Mon Sep 17 00:00:00 2001 From: joshualitt Date: Wed, 13 Aug 2025 12:27:09 -0700 Subject: [PATCH 32/45] feat(core): Migrate read_many_files, shell, and web_fetch. (#6167) --- .../core/src/tools/read-many-files.test.ts | 97 +++--- packages/core/src/tools/read-many-files.ts | 279 ++++++++++-------- packages/core/src/tools/shell.test.ts | 135 ++++----- packages/core/src/tools/shell.ts | 255 ++++++++-------- packages/core/src/tools/web-fetch.test.ts | 20 +- packages/core/src/tools/web-fetch.ts | 181 ++++++------ 6 files changed, 503 insertions(+), 464 deletions(-) diff --git a/packages/core/src/tools/read-many-files.test.ts b/packages/core/src/tools/read-many-files.test.ts index c6b34665..af5012cd 100644 --- a/packages/core/src/tools/read-many-files.test.ts +++ b/packages/core/src/tools/read-many-files.test.ts @@ -121,66 +121,71 @@ describe('ReadManyFilesTool', () => { } }); - describe('validateParams', () => { - it('should return null for valid relative paths within root', () => { + describe('build', () => { + it('should return an invocation for valid relative paths within root', () => { const params = { paths: ['file1.txt', 'subdir/file2.txt'] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return null for valid glob patterns within root', () => { + it('should return an invocation for valid glob patterns within root', () => { const params = { paths: ['*.txt', 'subdir/**/*.js'] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return null for paths trying to escape the root (e.g., ../) as execute handles this', () => { + it('should return an invocation for paths trying to escape the root (e.g., ../) as execute handles this', () => { const params = { paths: ['../outside.txt'] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return null for absolute paths as execute handles this', () => { + it('should return an invocation for absolute paths as execute handles this', () => { const params = { paths: [path.join(tempDirOutsideRoot, 'absolute.txt')] }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return error if paths array is empty', () => { + it('should throw error if paths array is empty', () => { const params = { paths: [] }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/paths must NOT have fewer than 1 items', ); }); - it('should return null for valid exclude and include patterns', () => { + it('should return an invocation for valid exclude and include patterns', () => { const params = { paths: ['src/**/*.ts'], exclude: ['**/*.test.ts'], include: ['src/utils/*.ts'], }; - expect(tool.validateParams(params)).toBeNull(); + const invocation = tool.build(params); + expect(invocation).toBeDefined(); }); - it('should return error if paths array contains an empty string', () => { + it('should throw error if paths array contains an empty string', () => { const params = { paths: ['file1.txt', ''] }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/paths/1 must NOT have fewer than 1 characters', ); }); - it('should return error if include array contains non-string elements', () => { + it('should throw error if include array contains non-string elements', () => { const params = { paths: ['file1.txt'], include: ['*.ts', 123] as string[], }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/include/1 must be string', ); }); - it('should return error if exclude array contains non-string elements', () => { + it('should throw error if exclude array contains non-string elements', () => { const params = { paths: ['file1.txt'], exclude: ['*.log', {}] as string[], }; - expect(tool.validateParams(params)).toBe( + expect(() => tool.build(params)).toThrow( 'params/exclude/1 must be string', ); }); @@ -201,7 +206,8 @@ describe('ReadManyFilesTool', () => { it('should read a single specified file', async () => { createFile('file1.txt', 'Content of file1'); const params = { paths: ['file1.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const expectedPath = path.join(tempRootDir, 'file1.txt'); expect(result.llmContent).toEqual([ `--- ${expectedPath} ---\n\nContent of file1\n\n`, @@ -215,7 +221,8 @@ describe('ReadManyFilesTool', () => { createFile('file1.txt', 'Content1'); createFile('subdir/file2.js', 'Content2'); const params = { paths: ['file1.txt', 'subdir/file2.js'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath1 = path.join(tempRootDir, 'file1.txt'); const expectedPath2 = path.join(tempRootDir, 'subdir/file2.js'); @@ -239,7 +246,8 @@ describe('ReadManyFilesTool', () => { createFile('another.txt', 'Another text'); createFile('sub/data.json', '{}'); const params = { paths: ['*.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath1 = path.join(tempRootDir, 'file.txt'); const expectedPath2 = path.join(tempRootDir, 'another.txt'); @@ -263,7 +271,8 @@ describe('ReadManyFilesTool', () => { createFile('src/main.ts', 'Main content'); createFile('src/main.test.ts', 'Test content'); const params = { paths: ['src/**/*.ts'], exclude: ['**/*.test.ts'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath = path.join(tempRootDir, 'src/main.ts'); expect(content).toEqual([`--- ${expectedPath} ---\n\nMain content\n\n`]); @@ -277,7 +286,8 @@ describe('ReadManyFilesTool', () => { it('should handle nonexistent specific files gracefully', async () => { const params = { paths: ['nonexistent-file.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ 'No files matching the criteria were found or all were skipped.', ]); @@ -290,7 +300,8 @@ describe('ReadManyFilesTool', () => { createFile('node_modules/some-lib/index.js', 'lib code'); createFile('src/app.js', 'app code'); const params = { paths: ['**/*.js'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath = path.join(tempRootDir, 'src/app.js'); expect(content).toEqual([`--- ${expectedPath} ---\n\napp code\n\n`]); @@ -306,7 +317,8 @@ describe('ReadManyFilesTool', () => { createFile('node_modules/some-lib/index.js', 'lib code'); createFile('src/app.js', 'app code'); const params = { paths: ['**/*.js'], useDefaultExcludes: false }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath1 = path.join( tempRootDir, @@ -334,7 +346,8 @@ describe('ReadManyFilesTool', () => { Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]), ); const params = { paths: ['*.png'] }; // Explicitly requesting .png - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -356,7 +369,8 @@ describe('ReadManyFilesTool', () => { Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]), ); const params = { paths: ['myExactImage.png'] }; // Explicitly requesting by full name - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -373,7 +387,8 @@ describe('ReadManyFilesTool', () => { createBinaryFile('document.pdf', Buffer.from('%PDF-1.4...')); createFile('notes.txt', 'text notes'); const params = { paths: ['*'] }; // Generic glob, not specific to .pdf - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const expectedPath = path.join(tempRootDir, 'notes.txt'); expect( @@ -392,7 +407,8 @@ describe('ReadManyFilesTool', () => { it('should include PDF files as inlineData parts if explicitly requested by extension', async () => { createBinaryFile('important.pdf', Buffer.from('%PDF-1.4...')); const params = { paths: ['*.pdf'] }; // Explicitly requesting .pdf files - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -406,7 +422,8 @@ describe('ReadManyFilesTool', () => { it('should include PDF files as inlineData parts if explicitly requested by name', async () => { createBinaryFile('report-final.pdf', Buffer.from('%PDF-1.4...')); const params = { paths: ['report-final.pdf'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.llmContent).toEqual([ { inlineData: { @@ -422,7 +439,8 @@ describe('ReadManyFilesTool', () => { createFile('bar.ts', ''); createFile('foo.quux', ''); const params = { paths: ['foo.bar', 'bar.ts', 'foo.quux'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); expect(result.returnDisplay).not.toContain('foo.bar'); expect(result.returnDisplay).not.toContain('foo.quux'); expect(result.returnDisplay).toContain('bar.ts'); @@ -451,7 +469,8 @@ describe('ReadManyFilesTool', () => { fs.writeFileSync(path.join(tempDir2, 'file2.txt'), 'Content2'); const params = { paths: ['*.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; if (!Array.isArray(content)) { throw new Error(`llmContent is not an array: ${content}`); @@ -486,7 +505,8 @@ describe('ReadManyFilesTool', () => { createFile('large-file.txt', longContent); const params = { paths: ['*.txt'] }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; const normalFileContent = content.find((c) => c.includes('file1.txt')); @@ -541,7 +561,8 @@ describe('ReadManyFilesTool', () => { }); const params = { paths: files }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); // Verify all files were processed const content = result.llmContent as string[]; @@ -569,7 +590,8 @@ describe('ReadManyFilesTool', () => { ], }; - const result = await tool.execute(params, new AbortController().signal); + const invocation = tool.build(params); + const result = await invocation.execute(new AbortController().signal); const content = result.llmContent as string[]; // Should successfully process valid files despite one failure @@ -606,7 +628,8 @@ describe('ReadManyFilesTool', () => { return 'text'; }); - await tool.execute({ paths: files }, new AbortController().signal); + const invocation = tool.build({ paths: files }); + await invocation.execute(new AbortController().signal); console.log('Execution order:', executionOrder); diff --git a/packages/core/src/tools/read-many-files.ts b/packages/core/src/tools/read-many-files.ts index 5a0799bb..e4e94799 100644 --- a/packages/core/src/tools/read-many-files.ts +++ b/packages/core/src/tools/read-many-files.ts @@ -4,7 +4,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { BaseTool, Kind, ToolResult } from './tools.js'; +import { + BaseDeclarativeTool, + BaseToolInvocation, + Kind, + ToolInvocation, + ToolResult, +} from './tools.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; import * as path from 'path'; @@ -138,120 +144,28 @@ const DEFAULT_EXCLUDES: string[] = [ const DEFAULT_OUTPUT_SEPARATOR_FORMAT = '--- {filePath} ---'; -/** - * Tool implementation for finding and reading multiple text files from the local filesystem - * within a specified target directory. The content is concatenated. - * It is intended to run in an environment with access to the local file system (e.g., a Node.js backend). - */ -export class ReadManyFilesTool extends BaseTool< +class ReadManyFilesToolInvocation extends BaseToolInvocation< ReadManyFilesParams, ToolResult > { - static readonly Name: string = 'read_many_files'; - - constructor(private config: Config) { - const parameterSchema = { - type: 'object', - properties: { - paths: { - type: 'array', - items: { - type: 'string', - minLength: 1, - }, - minItems: 1, - description: - "Required. An array of glob patterns or paths relative to the tool's target directory. Examples: ['src/**/*.ts'], ['README.md', 'docs/']", - }, - include: { - type: 'array', - items: { - type: 'string', - minLength: 1, - }, - description: - 'Optional. Additional glob patterns to include. These are merged with `paths`. Example: ["*.test.ts"] to specifically add test files if they were broadly excluded.', - default: [], - }, - exclude: { - type: 'array', - items: { - type: 'string', - minLength: 1, - }, - description: - 'Optional. Glob patterns for files/directories to exclude. Added to default excludes if useDefaultExcludes is true. Example: ["**/*.log", "temp/"]', - default: [], - }, - recursive: { - type: 'boolean', - description: - 'Optional. Whether to search recursively (primarily controlled by `**` in glob patterns). Defaults to true.', - default: true, - }, - useDefaultExcludes: { - type: 'boolean', - description: - 'Optional. Whether to apply a list of default exclusion patterns (e.g., node_modules, .git, binary files). Defaults to true.', - default: true, - }, - file_filtering_options: { - description: - 'Whether to respect ignore patterns from .gitignore or .geminiignore', - type: 'object', - properties: { - respect_git_ignore: { - description: - 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', - type: 'boolean', - }, - respect_gemini_ignore: { - description: - 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', - type: 'boolean', - }, - }, - }, - }, - required: ['paths'], - }; - - super( - ReadManyFilesTool.Name, - 'ReadManyFiles', - `Reads content from multiple files specified by paths or glob patterns within a configured target directory. For text files, it concatenates their content into a single string. It is primarily designed for text-based files. However, it can also process image (e.g., .png, .jpg) and PDF (.pdf) files if their file names or extensions are explicitly included in the 'paths' argument. For these explicitly requested non-text files, their data is read and included in a format suitable for model consumption (e.g., base64 encoded). - -This tool is useful when you need to understand or analyze a collection of files, such as: -- Getting an overview of a codebase or parts of it (e.g., all TypeScript files in the 'src' directory). -- Finding where specific functionality is implemented if the user asks broad questions about code. -- Reviewing documentation files (e.g., all Markdown files in the 'docs' directory). -- Gathering context from multiple configuration files. -- When the user asks to "read all files in X directory" or "show me the content of all Y files". - -Use this tool when the user's query implies needing the content of several files simultaneously for context, analysis, or summarization. For text files, it uses default UTF-8 encoding and a '--- {filePath} ---' separator between file contents. Ensure paths are relative to the target directory. Glob patterns like 'src/**/*.js' are supported. Avoid using for single files if a more specific single-file reading tool is available, unless the user specifically requests to process a list containing just one file via this tool. Other binary files (not explicitly requested as image/PDF) are generally skipped. Default excludes apply to common non-text files (except for explicitly requested images/PDFs) and large dependency directories unless 'useDefaultExcludes' is false.`, - Kind.Read, - parameterSchema, - ); + constructor( + private readonly config: Config, + params: ReadManyFilesParams, + ) { + super(params); } - validateParams(params: ReadManyFilesParams): string | null { - const errors = SchemaValidator.validate( - this.schema.parametersJsonSchema, - params, - ); - if (errors) { - return errors; - } - return null; - } - - getDescription(params: ReadManyFilesParams): string { - const allPatterns = [...params.paths, ...(params.include || [])]; - const pathDesc = `using patterns: \`${allPatterns.join('`, `')}\` (within target directory: \`${this.config.getTargetDir()}\`)`; + getDescription(): string { + const allPatterns = [...this.params.paths, ...(this.params.include || [])]; + const pathDesc = `using patterns: +${allPatterns.join('`, `')} + (within target directory: +${this.config.getTargetDir()} +) `; // Determine the final list of exclusion patterns exactly as in execute method - const paramExcludes = params.exclude || []; - const paramUseDefaultExcludes = params.useDefaultExcludes !== false; + const paramExcludes = this.params.exclude || []; + const paramUseDefaultExcludes = this.params.useDefaultExcludes !== false; const geminiIgnorePatterns = this.config .getFileService() .getGeminiIgnorePatterns(); @@ -260,7 +174,16 @@ Use this tool when the user's query implies needing the content of several files ? [...DEFAULT_EXCLUDES, ...paramExcludes, ...geminiIgnorePatterns] : [...paramExcludes, ...geminiIgnorePatterns]; - let excludeDesc = `Excluding: ${finalExclusionPatternsForDescription.length > 0 ? `patterns like \`${finalExclusionPatternsForDescription.slice(0, 2).join('`, `')}${finalExclusionPatternsForDescription.length > 2 ? '...`' : '`'}` : 'none specified'}`; + let excludeDesc = `Excluding: ${ + finalExclusionPatternsForDescription.length > 0 + ? `patterns like +${finalExclusionPatternsForDescription + .slice(0, 2) + .join( + '`, `', + )}${finalExclusionPatternsForDescription.length > 2 ? '...`' : '`'}` + : 'none specified' + }`; // Add a note if .geminiignore patterns contributed to the final list of exclusions if (geminiIgnorePatterns.length > 0) { @@ -272,37 +195,29 @@ Use this tool when the user's query implies needing the content of several files } } - return `Will attempt to read and concatenate files ${pathDesc}. ${excludeDesc}. File encoding: ${DEFAULT_ENCODING}. Separator: "${DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace('{filePath}', 'path/to/file.ext')}".`; + return `Will attempt to read and concatenate files ${pathDesc}. ${excludeDesc}. File encoding: ${DEFAULT_ENCODING}. Separator: "${DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace( + '{filePath}', + 'path/to/file.ext', + )}".`; } - async execute( - params: ReadManyFilesParams, - signal: AbortSignal, - ): Promise { - const validationError = this.validateParams(params); - if (validationError) { - return { - llmContent: `Error: Invalid parameters for ${this.displayName}. Reason: ${validationError}`, - returnDisplay: `## Parameter Error\n\n${validationError}`, - }; - } - + async execute(signal: AbortSignal): Promise { const { paths: inputPatterns, include = [], exclude = [], useDefaultExcludes = true, - } = params; + } = this.params; const defaultFileIgnores = this.config.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; const fileFilteringOptions = { respectGitIgnore: - params.file_filtering_options?.respect_git_ignore ?? + this.params.file_filtering_options?.respect_git_ignore ?? defaultFileIgnores.respectGitIgnore, // Use the property from the returned object respectGeminiIgnore: - params.file_filtering_options?.respect_gemini_ignore ?? + this.params.file_filtering_options?.respect_gemini_ignore ?? defaultFileIgnores.respectGeminiIgnore, // Use the property from the returned object }; // Get centralized file discovery service @@ -614,3 +529,117 @@ Use this tool when the user's query implies needing the content of several files }; } } + +/** + * Tool implementation for finding and reading multiple text files from the local filesystem + * within a specified target directory. The content is concatenated. + * It is intended to run in an environment with access to the local file system (e.g., a Node.js backend). + */ +export class ReadManyFilesTool extends BaseDeclarativeTool< + ReadManyFilesParams, + ToolResult +> { + static readonly Name: string = 'read_many_files'; + + constructor(private config: Config) { + const parameterSchema = { + type: 'object', + properties: { + paths: { + type: 'array', + items: { + type: 'string', + minLength: 1, + }, + minItems: 1, + description: + "Required. An array of glob patterns or paths relative to the tool's target directory. Examples: ['src/**/*.ts'], ['README.md', 'docs/']", + }, + include: { + type: 'array', + items: { + type: 'string', + minLength: 1, + }, + description: + 'Optional. Additional glob patterns to include. These are merged with `paths`. Example: "*.test.ts" to specifically add test files if they were broadly excluded.', + default: [], + }, + exclude: { + type: 'array', + items: { + type: 'string', + minLength: 1, + }, + description: + 'Optional. Glob patterns for files/directories to exclude. Added to default excludes if useDefaultExcludes is true. Example: "**/*.log", "temp/"', + default: [], + }, + recursive: { + type: 'boolean', + description: + 'Optional. Whether to search recursively (primarily controlled by `**` in glob patterns). Defaults to true.', + default: true, + }, + useDefaultExcludes: { + type: 'boolean', + description: + 'Optional. Whether to apply a list of default exclusion patterns (e.g., node_modules, .git, binary files). Defaults to true.', + default: true, + }, + file_filtering_options: { + description: + 'Whether to respect ignore patterns from .gitignore or .geminiignore', + type: 'object', + properties: { + respect_git_ignore: { + description: + 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', + type: 'boolean', + }, + respect_gemini_ignore: { + description: + 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', + type: 'boolean', + }, + }, + }, + }, + required: ['paths'], + }; + + super( + ReadManyFilesTool.Name, + 'ReadManyFiles', + `Reads content from multiple files specified by paths or glob patterns within a configured target directory. For text files, it concatenates their content into a single string. It is primarily designed for text-based files. However, it can also process image (e.g., .png, .jpg) and PDF (.pdf) files if their file names or extensions are explicitly included in the 'paths' argument. For these explicitly requested non-text files, their data is read and included in a format suitable for model consumption (e.g., base64 encoded). + +This tool is useful when you need to understand or analyze a collection of files, such as: +- Getting an overview of a codebase or parts of it (e.g., all TypeScript files in the 'src' directory). +- Finding where specific functionality is implemented if the user asks broad questions about code. +- Reviewing documentation files (e.g., all Markdown files in the 'docs' directory). +- Gathering context from multiple configuration files. +- When the user asks to "read all files in X directory" or "show me the content of all Y files". + +Use this tool when the user's query implies needing the content of several files simultaneously for context, analysis, or summarization. For text files, it uses default UTF-8 encoding and a '--- {filePath} ---' separator between file contents. Ensure paths are relative to the target directory. Glob patterns like 'src/**/*.js' are supported. Avoid using for single files if a more specific single-file reading tool is available, unless the user specifically requests to process a list containing just one file via this tool. Other binary files (not explicitly requested as image/PDF) are generally skipped. Default excludes apply to common non-text files (except for explicitly requested images/PDFs) and large dependency directories unless 'useDefaultExcludes' is false.`, + Kind.Read, + parameterSchema, + ); + } + + protected validateToolParams(params: ReadManyFilesParams): string | null { + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + return null; + } + + protected createInvocation( + params: ReadManyFilesParams, + ): ToolInvocation { + return new ReadManyFilesToolInvocation(this.config, params); + } +} diff --git a/packages/core/src/tools/shell.test.ts b/packages/core/src/tools/shell.test.ts index 55720af5..96ff49a1 100644 --- a/packages/core/src/tools/shell.test.ts +++ b/packages/core/src/tools/shell.test.ts @@ -25,7 +25,6 @@ vi.mock('../utils/summarizer.js'); import { isCommandAllowed } from '../utils/shell-utils.js'; import { ShellTool } from './shell.js'; -import { ToolErrorType } from './tool-error.js'; import { type Config } from '../config/config.js'; import { type ShellExecutionResult, @@ -93,22 +92,25 @@ describe('ShellTool', () => { }); }); - describe('validateToolParams', () => { - it('should return null for a valid command', () => { - expect(shellTool.validateToolParams({ command: 'ls -l' })).toBeNull(); + describe('build', () => { + it('should return an invocation for a valid command', () => { + const invocation = shellTool.build({ command: 'ls -l' }); + expect(invocation).toBeDefined(); }); - it('should return an error for an empty command', () => { - expect(shellTool.validateToolParams({ command: ' ' })).toBe( + it('should throw an error for an empty command', () => { + expect(() => shellTool.build({ command: ' ' })).toThrow( 'Command cannot be empty.', ); }); - it('should return an error for a non-existent directory', () => { + it('should throw an error for a non-existent directory', () => { vi.mocked(fs.existsSync).mockReturnValue(false); - expect( - shellTool.validateToolParams({ command: 'ls', directory: 'rel/path' }), - ).toBe("Directory 'rel/path' is not a registered workspace directory."); + expect(() => + shellTool.build({ command: 'ls', directory: 'rel/path' }), + ).toThrow( + "Directory 'rel/path' is not a registered workspace directory.", + ); }); }); @@ -134,10 +136,8 @@ describe('ShellTool', () => { }; it('should wrap command on linux and parse pgrep output', async () => { - const promise = shellTool.execute( - { command: 'my-command &' }, - mockAbortSignal, - ); + const invocation = shellTool.build({ command: 'my-command &' }); + const promise = invocation.execute(mockAbortSignal); resolveShellExecution({ pid: 54321 }); vi.mocked(fs.existsSync).mockReturnValue(true); @@ -159,8 +159,9 @@ describe('ShellTool', () => { it('should not wrap command on windows', async () => { vi.mocked(os.platform).mockReturnValue('win32'); - const promise = shellTool.execute({ command: 'dir' }, mockAbortSignal); - resolveExecutionPromise({ + const invocation = shellTool.build({ command: 'dir' }); + const promise = invocation.execute(mockAbortSignal); + resolveShellExecution({ rawOutput: Buffer.from(''), output: '', stdout: '', @@ -182,10 +183,8 @@ describe('ShellTool', () => { it('should format error messages correctly', async () => { const error = new Error('wrapped command failed'); - const promise = shellTool.execute( - { command: 'user-command' }, - mockAbortSignal, - ); + const invocation = shellTool.build({ command: 'user-command' }); + const promise = invocation.execute(mockAbortSignal); resolveShellExecution({ error, exitCode: 1, @@ -204,40 +203,19 @@ describe('ShellTool', () => { expect(result.llmContent).not.toContain('pgrep'); }); - it('should return error with error property for invalid parameters', async () => { - const result = await shellTool.execute( - { command: '' }, // Empty command is invalid - mockAbortSignal, + it('should throw an error for invalid parameters', () => { + expect(() => shellTool.build({ command: '' })).toThrow( + 'Command cannot be empty.', ); - - expect(result.llmContent).toContain( - 'Could not execute command due to invalid parameters:', - ); - expect(result.returnDisplay).toBe('Command cannot be empty.'); - expect(result.error).toEqual({ - message: 'Command cannot be empty.', - type: ToolErrorType.INVALID_TOOL_PARAMS, - }); }); - it('should return error with error property for invalid directory', async () => { + it('should throw an error for invalid directory', () => { vi.mocked(fs.existsSync).mockReturnValue(false); - const result = await shellTool.execute( - { command: 'ls', directory: 'nonexistent' }, - mockAbortSignal, + expect(() => + shellTool.build({ command: 'ls', directory: 'nonexistent' }), + ).toThrow( + `Directory 'nonexistent' is not a registered workspace directory.`, ); - - expect(result.llmContent).toContain( - 'Could not execute command due to invalid parameters:', - ); - expect(result.returnDisplay).toBe( - "Directory 'nonexistent' is not a registered workspace directory.", - ); - expect(result.error).toEqual({ - message: - "Directory 'nonexistent' is not a registered workspace directory.", - type: ToolErrorType.INVALID_TOOL_PARAMS, - }); }); it('should summarize output when configured', async () => { @@ -248,7 +226,8 @@ describe('ShellTool', () => { 'summarized output', ); - const promise = shellTool.execute({ command: 'ls' }, mockAbortSignal); + const invocation = shellTool.build({ command: 'ls' }); + const promise = invocation.execute(mockAbortSignal); resolveExecutionPromise({ output: 'long output', rawOutput: Buffer.from('long output'), @@ -280,9 +259,8 @@ describe('ShellTool', () => { }); vi.mocked(fs.existsSync).mockReturnValue(true); // Pretend the file exists - await expect( - shellTool.execute({ command: 'a-command' }, mockAbortSignal), - ).rejects.toThrow(error); + const invocation = shellTool.build({ command: 'a-command' }); + await expect(invocation.execute(mockAbortSignal)).rejects.toThrow(error); const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp'); expect(vi.mocked(fs.unlinkSync)).toHaveBeenCalledWith(tmpFile); @@ -299,11 +277,8 @@ describe('ShellTool', () => { }); it('should throttle text output updates', async () => { - const promise = shellTool.execute( - { command: 'stream' }, - mockAbortSignal, - updateOutputMock, - ); + const invocation = shellTool.build({ command: 'stream' }); + const promise = invocation.execute(mockAbortSignal, updateOutputMock); // First chunk, should be throttled. mockShellOutputCallback({ @@ -342,11 +317,8 @@ describe('ShellTool', () => { }); it('should immediately show binary detection message and throttle progress', async () => { - const promise = shellTool.execute( - { command: 'cat img' }, - mockAbortSignal, - updateOutputMock, - ); + const invocation = shellTool.build({ command: 'cat img' }); + const promise = invocation.execute(mockAbortSignal, updateOutputMock); mockShellOutputCallback({ type: 'binary_detected' }); expect(updateOutputMock).toHaveBeenCalledOnce(); @@ -394,8 +366,8 @@ describe('ShellTool', () => { describe('shouldConfirmExecute', () => { it('should request confirmation for a new command and whitelist it on "Always"', async () => { const params = { command: 'npm install' }; - const confirmation = await shellTool.shouldConfirmExecute( - params, + const invocation = shellTool.build(params); + const confirmation = await invocation.shouldConfirmExecute( new AbortController().signal, ); @@ -408,25 +380,21 @@ describe('ShellTool', () => { ); // Should now be whitelisted - const secondConfirmation = await shellTool.shouldConfirmExecute( - { command: 'npm test' }, + const secondInvocation = shellTool.build({ command: 'npm test' }); + const secondConfirmation = await secondInvocation.shouldConfirmExecute( new AbortController().signal, ); expect(secondConfirmation).toBe(false); }); - it('should skip confirmation if validation fails', async () => { - const confirmation = await shellTool.shouldConfirmExecute( - { command: '' }, - new AbortController().signal, - ); - expect(confirmation).toBe(false); + it('should throw an error if validation fails', () => { + expect(() => shellTool.build({ command: '' })).toThrow(); }); }); }); -describe('validateToolParams', () => { - it('should return null for valid directory', () => { +describe('build', () => { + it('should return an invocation for valid directory', () => { const config = { getCoreTools: () => undefined, getExcludeTools: () => undefined, @@ -435,14 +403,14 @@ describe('validateToolParams', () => { createMockWorkspaceContext('/root', ['/users/test']), } as unknown as Config; const shellTool = new ShellTool(config); - const result = shellTool.validateToolParams({ + const invocation = shellTool.build({ command: 'ls', directory: 'test', }); - expect(result).toBeNull(); + expect(invocation).toBeDefined(); }); - it('should return error for directory outside workspace', () => { + it('should throw an error for directory outside workspace', () => { const config = { getCoreTools: () => undefined, getExcludeTools: () => undefined, @@ -451,10 +419,11 @@ describe('validateToolParams', () => { createMockWorkspaceContext('/root', ['/users/test']), } as unknown as Config; const shellTool = new ShellTool(config); - const result = shellTool.validateToolParams({ - command: 'ls', - directory: 'test2', - }); - expect(result).toContain('is not a registered workspace directory'); + expect(() => + shellTool.build({ + command: 'ls', + directory: 'test2', + }), + ).toThrow('is not a registered workspace directory'); }); }); diff --git a/packages/core/src/tools/shell.ts b/packages/core/src/tools/shell.ts index 4fa08297..0cc727fb 100644 --- a/packages/core/src/tools/shell.ts +++ b/packages/core/src/tools/shell.ts @@ -10,14 +10,15 @@ import os from 'os'; import crypto from 'crypto'; import { Config } from '../config/config.js'; import { - BaseTool, + BaseDeclarativeTool, + BaseToolInvocation, + ToolInvocation, ToolResult, ToolCallConfirmationDetails, ToolExecuteConfirmationDetails, ToolConfirmationOutcome, Kind, } from './tools.js'; -import { ToolErrorType } from './tool-error.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; import { summarizeToolOutput } from '../utils/summarizer.js'; @@ -40,120 +41,36 @@ export interface ShellToolParams { directory?: string; } -export class ShellTool extends BaseTool { - static Name: string = 'run_shell_command'; - private allowlist: Set = new Set(); - - constructor(private readonly config: Config) { - super( - ShellTool.Name, - 'Shell', - `This tool executes a given shell command as \`bash -c \`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`. - - The following information is returned: - - Command: Executed command. - Directory: Directory (relative to project root) where command was executed, or \`(root)\`. - Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. - Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. - Error: Error or \`(none)\` if no error was reported for the subprocess. - Exit Code: Exit code or \`(none)\` if terminated by signal. - Signal: Signal number or \`(none)\` if no signal was received. - Background PIDs: List of background processes started or \`(none)\`. - Process Group PGID: Process group started or \`(none)\``, - Kind.Execute, - { - type: 'object', - properties: { - command: { - type: 'string', - description: 'Exact bash command to execute as `bash -c `', - }, - description: { - type: 'string', - description: - 'Brief description of the command for the user. Be specific and concise. Ideally a single sentence. Can be up to 3 sentences for clarity. No line breaks.', - }, - directory: { - type: 'string', - description: - '(OPTIONAL) Directory to run the command in, if not the project root directory. Must be relative to the project root directory and must already exist.', - }, - }, - required: ['command'], - }, - false, // output is not markdown - true, // output can be updated - ); +class ShellToolInvocation extends BaseToolInvocation< + ShellToolParams, + ToolResult +> { + constructor( + private readonly config: Config, + params: ShellToolParams, + private readonly allowlist: Set, + ) { + super(params); } - getDescription(params: ShellToolParams): string { - let description = `${params.command}`; + getDescription(): string { + let description = `${this.params.command}`; // append optional [in directory] // note description is needed even if validation fails due to absolute path - if (params.directory) { - description += ` [in ${params.directory}]`; + if (this.params.directory) { + description += ` [in ${this.params.directory}]`; } // append optional (description), replacing any line breaks with spaces - if (params.description) { - description += ` (${params.description.replace(/\n/g, ' ')})`; + if (this.params.description) { + description += ` (${this.params.description.replace(/\n/g, ' ')})`; } return description; } - validateToolParams(params: ShellToolParams): string | null { - const commandCheck = isCommandAllowed(params.command, this.config); - if (!commandCheck.allowed) { - if (!commandCheck.reason) { - console.error( - 'Unexpected: isCommandAllowed returned false without a reason', - ); - return `Command is not allowed: ${params.command}`; - } - return commandCheck.reason; - } - const errors = SchemaValidator.validate( - this.schema.parametersJsonSchema, - params, - ); - if (errors) { - return errors; - } - if (!params.command.trim()) { - return 'Command cannot be empty.'; - } - if (getCommandRoots(params.command).length === 0) { - return 'Could not identify command root to obtain permission from user.'; - } - if (params.directory) { - if (path.isAbsolute(params.directory)) { - return 'Directory cannot be absolute. Please refer to workspace directories by their name.'; - } - const workspaceDirs = this.config.getWorkspaceContext().getDirectories(); - const matchingDirs = workspaceDirs.filter( - (dir) => path.basename(dir) === params.directory, - ); - - if (matchingDirs.length === 0) { - return `Directory '${params.directory}' is not a registered workspace directory.`; - } - - if (matchingDirs.length > 1) { - return `Directory name '${params.directory}' is ambiguous as it matches multiple workspace directories.`; - } - } - return null; - } - async shouldConfirmExecute( - params: ShellToolParams, _abortSignal: AbortSignal, ): Promise { - if (this.validateToolParams(params)) { - return false; // skip confirmation, execute call will fail immediately - } - - const command = stripShellWrapper(params.command); + const command = stripShellWrapper(this.params.command); const rootCommands = [...new Set(getCommandRoots(command))]; const commandsToConfirm = rootCommands.filter( (command) => !this.allowlist.has(command), @@ -166,7 +83,7 @@ export class ShellTool extends BaseTool { const confirmationDetails: ToolExecuteConfirmationDetails = { type: 'exec', title: 'Confirm Shell Command', - command: params.command, + command: this.params.command, rootCommand: commandsToConfirm.join(', '), onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { @@ -178,25 +95,10 @@ export class ShellTool extends BaseTool { } async execute( - params: ShellToolParams, signal: AbortSignal, updateOutput?: (output: string) => void, ): Promise { - const strippedCommand = stripShellWrapper(params.command); - const validationError = this.validateToolParams({ - ...params, - command: strippedCommand, - }); - if (validationError) { - return { - llmContent: `Could not execute command due to invalid parameters: ${validationError}`, - returnDisplay: validationError, - error: { - message: validationError, - type: ToolErrorType.INVALID_TOOL_PARAMS, - }, - }; - } + const strippedCommand = stripShellWrapper(this.params.command); if (signal.aborted) { return { @@ -224,7 +126,7 @@ export class ShellTool extends BaseTool { const cwd = path.resolve( this.config.getTargetDir(), - params.directory || '', + this.params.directory || '', ); let cumulativeStdout = ''; @@ -324,12 +226,12 @@ export class ShellTool extends BaseTool { // Create a formatted error string for display, replacing the wrapper command // with the user-facing command. const finalError = result.error - ? result.error.message.replace(commandToExecute, params.command) + ? result.error.message.replace(commandToExecute, this.params.command) : '(none)'; llmContent = [ - `Command: ${params.command}`, - `Directory: ${params.directory || '(root)'}`, + `Command: ${this.params.command}`, + `Directory: ${this.params.directory || '(root)'}`, `Stdout: ${result.stdout || '(empty)'}`, `Stderr: ${result.stderr || '(empty)'}`, `Error: ${finalError}`, // Use the cleaned error string. @@ -366,12 +268,12 @@ export class ShellTool extends BaseTool { } const summarizeConfig = this.config.getSummarizeToolOutputConfig(); - if (summarizeConfig && summarizeConfig[this.name]) { + if (summarizeConfig && summarizeConfig[ShellTool.Name]) { const summary = await summarizeToolOutput( llmContent, this.config.getGeminiClient(), signal, - summarizeConfig[this.name].tokenBudget, + summarizeConfig[ShellTool.Name].tokenBudget, ); return { llmContent: summary, @@ -390,3 +292,104 @@ export class ShellTool extends BaseTool { } } } + +export class ShellTool extends BaseDeclarativeTool< + ShellToolParams, + ToolResult +> { + static Name: string = 'run_shell_command'; + private allowlist: Set = new Set(); + + constructor(private readonly config: Config) { + super( + ShellTool.Name, + 'Shell', + `This tool executes a given shell command as \`bash -c \`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`. + + The following information is returned: + + Command: Executed command. + Directory: Directory (relative to project root) where command was executed, or \`(root)\`. + Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. + Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. + Error: Error or \`(none)\` if no error was reported for the subprocess. + Exit Code: Exit code or \`(none)\` if terminated by signal. + Signal: Signal number or \`(none)\` if no signal was received. + Background PIDs: List of background processes started or \`(none)\`. + Process Group PGID: Process group started or \`(none)\``, + Kind.Execute, + { + type: 'object', + properties: { + command: { + type: 'string', + description: 'Exact bash command to execute as `bash -c `', + }, + description: { + type: 'string', + description: + 'Brief description of the command for the user. Be specific and concise. Ideally a single sentence. Can be up to 3 sentences for clarity. No line breaks.', + }, + directory: { + type: 'string', + description: + '(OPTIONAL) Directory to run the command in, if not the project root directory. Must be relative to the project root directory and must already exist.', + }, + }, + required: ['command'], + }, + false, // output is not markdown + true, // output can be updated + ); + } + + protected validateToolParams(params: ShellToolParams): string | null { + const commandCheck = isCommandAllowed(params.command, this.config); + if (!commandCheck.allowed) { + if (!commandCheck.reason) { + console.error( + 'Unexpected: isCommandAllowed returned false without a reason', + ); + return `Command is not allowed: ${params.command}`; + } + return commandCheck.reason; + } + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + if (!params.command.trim()) { + return 'Command cannot be empty.'; + } + if (getCommandRoots(params.command).length === 0) { + return 'Could not identify command root to obtain permission from user.'; + } + if (params.directory) { + if (path.isAbsolute(params.directory)) { + return 'Directory cannot be absolute. Please refer to workspace directories by their name.'; + } + const workspaceDirs = this.config.getWorkspaceContext().getDirectories(); + const matchingDirs = workspaceDirs.filter( + (dir) => path.basename(dir) === params.directory, + ); + + if (matchingDirs.length === 0) { + return `Directory '${params.directory}' is not a registered workspace directory.`; + } + + if (matchingDirs.length > 1) { + return `Directory name '${params.directory}' is ambiguous as it matches multiple workspace directories.`; + } + } + return null; + } + + protected createInvocation( + params: ShellToolParams, + ): ToolInvocation { + return new ShellToolInvocation(this.config, params, this.allowlist); + } +} diff --git a/packages/core/src/tools/web-fetch.test.ts b/packages/core/src/tools/web-fetch.test.ts index 6be9d504..b589c139 100644 --- a/packages/core/src/tools/web-fetch.test.ts +++ b/packages/core/src/tools/web-fetch.test.ts @@ -20,7 +20,10 @@ describe('WebFetchTool', () => { it('should return confirmation details with the correct prompt and urls', async () => { const tool = new WebFetchTool(mockConfig); const params = { prompt: 'fetch https://example.com' }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); expect(confirmationDetails).toEqual({ type: 'info', @@ -37,7 +40,10 @@ describe('WebFetchTool', () => { prompt: 'fetch https://github.com/google/gemini-react/blob/main/README.md', }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); expect(confirmationDetails).toEqual({ type: 'info', @@ -57,7 +63,10 @@ describe('WebFetchTool', () => { getApprovalMode: () => ApprovalMode.AUTO_EDIT, } as unknown as Config); const params = { prompt: 'fetch https://example.com' }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); expect(confirmationDetails).toBe(false); }); @@ -69,7 +78,10 @@ describe('WebFetchTool', () => { setApprovalMode, } as unknown as Config); const params = { prompt: 'fetch https://example.com' }; - const confirmationDetails = await tool.shouldConfirmExecute(params); + const invocation = tool.build(params); + const confirmationDetails = await invocation.shouldConfirmExecute( + new AbortController().signal, + ); if ( confirmationDetails && diff --git a/packages/core/src/tools/web-fetch.ts b/packages/core/src/tools/web-fetch.ts index bf8d1968..909fc548 100644 --- a/packages/core/src/tools/web-fetch.ts +++ b/packages/core/src/tools/web-fetch.ts @@ -6,14 +6,16 @@ import { SchemaValidator } from '../utils/schemaValidator.js'; import { - BaseTool, - ToolResult, + BaseDeclarativeTool, + BaseToolInvocation, + Kind, ToolCallConfirmationDetails, ToolConfirmationOutcome, - Kind, + ToolInvocation, + ToolResult, } from './tools.js'; import { getErrorMessage } from '../utils/errors.js'; -import { Config, ApprovalMode } from '../config/config.js'; +import { ApprovalMode, Config } from '../config/config.js'; import { getResponseText } from '../utils/generateContentResponseUtilities.js'; import { fetchWithTimeout, isPrivateIp } from '../utils/fetch.js'; import { convert } from 'html-to-text'; @@ -59,41 +61,19 @@ export interface WebFetchToolParams { prompt: string; } -/** - * Implementation of the WebFetch tool logic - */ -export class WebFetchTool extends BaseTool { - static readonly Name: string = 'web_fetch'; - - constructor(private readonly config: Config) { - super( - WebFetchTool.Name, - 'WebFetch', - "Processes content from URL(s), including local and private network addresses (e.g., localhost), embedded in a prompt. Include up to 20 URLs and instructions (e.g., summarize, extract specific data) directly in the 'prompt' parameter.", - Kind.Fetch, - { - properties: { - prompt: { - description: - 'A comprehensive prompt that includes the URL(s) (up to 20) to fetch and specific instructions on how to process their content (e.g., "Summarize https://example.com/article and extract key points from https://another.com/data"). Must contain as least one URL starting with http:// or https://.', - type: 'string', - }, - }, - required: ['prompt'], - type: 'object', - }, - ); - const proxy = config.getProxy(); - if (proxy) { - setGlobalDispatcher(new ProxyAgent(proxy as string)); - } +class WebFetchToolInvocation extends BaseToolInvocation< + WebFetchToolParams, + ToolResult +> { + constructor( + private readonly config: Config, + params: WebFetchToolParams, + ) { + super(params); } - private async executeFallback( - params: WebFetchToolParams, - signal: AbortSignal, - ): Promise { - const urls = extractUrls(params.prompt); + private async executeFallback(signal: AbortSignal): Promise { + const urls = extractUrls(this.params.prompt); if (urls.length === 0) { return { llmContent: 'Error: No URL found in the prompt for fallback.', @@ -127,13 +107,14 @@ export class WebFetchTool extends BaseTool { }).substring(0, MAX_CONTENT_LENGTH); const geminiClient = this.config.getGeminiClient(); - const fallbackPrompt = `The user requested the following: "${params.prompt}". + const fallbackPrompt = `The user requested the following: "${this.params.prompt}". -I was unable to access the URL directly. Instead, I have fetched the raw content of the page. Please use the following content to answer the user's request. Do not attempt to access the URL again. +I was unable to access the URL directly. Instead, I have fetched the raw content of the page. Please use the following content to answer the request. Do not attempt to access the URL again. --- ${textContent} ----`; +--- +`; const result = await geminiClient.generateContent( [{ role: 'user', parts: [{ text: fallbackPrompt }] }], {}, @@ -154,49 +135,22 @@ ${textContent} } } - validateParams(params: WebFetchToolParams): string | null { - const errors = SchemaValidator.validate( - this.schema.parametersJsonSchema, - params, - ); - if (errors) { - return errors; - } - if (!params.prompt || params.prompt.trim() === '') { - return "The 'prompt' parameter cannot be empty and must contain URL(s) and instructions."; - } - if ( - !params.prompt.includes('http://') && - !params.prompt.includes('https://') - ) { - return "The 'prompt' must contain at least one valid URL (starting with http:// or https://)."; - } - return null; - } - - getDescription(params: WebFetchToolParams): string { + getDescription(): string { const displayPrompt = - params.prompt.length > 100 - ? params.prompt.substring(0, 97) + '...' - : params.prompt; + this.params.prompt.length > 100 + ? this.params.prompt.substring(0, 97) + '...' + : this.params.prompt; return `Processing URLs and instructions from prompt: "${displayPrompt}"`; } - async shouldConfirmExecute( - params: WebFetchToolParams, - ): Promise { + async shouldConfirmExecute(): Promise { if (this.config.getApprovalMode() === ApprovalMode.AUTO_EDIT) { return false; } - const validationError = this.validateParams(params); - if (validationError) { - return false; - } - // Perform GitHub URL conversion here to differentiate between user-provided // URL and the actual URL to be fetched. - const urls = extractUrls(params.prompt).map((url) => { + const urls = extractUrls(this.params.prompt).map((url) => { if (url.includes('github.com') && url.includes('/blob/')) { return url .replace('github.com', 'raw.githubusercontent.com') @@ -208,7 +162,7 @@ ${textContent} const confirmationDetails: ToolCallConfirmationDetails = { type: 'info', title: `Confirm Web Fetch`, - prompt: params.prompt, + prompt: this.params.prompt, urls, onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { @@ -219,25 +173,14 @@ ${textContent} return confirmationDetails; } - async execute( - params: WebFetchToolParams, - signal: AbortSignal, - ): Promise { - const validationError = this.validateParams(params); - if (validationError) { - return { - llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`, - returnDisplay: validationError, - }; - } - - const userPrompt = params.prompt; + async execute(signal: AbortSignal): Promise { + const userPrompt = this.params.prompt; const urls = extractUrls(userPrompt); const url = urls[0]; const isPrivate = isPrivateIp(url); if (isPrivate) { - return this.executeFallback(params, signal); + return this.executeFallback(signal); } const geminiClient = this.config.getGeminiClient(); @@ -295,7 +238,7 @@ ${textContent} } if (processingError) { - return this.executeFallback(params, signal); + return this.executeFallback(signal); } const sourceListFormatted: string[] = []; @@ -360,3 +303,63 @@ ${sourceListFormatted.join('\n')}`; } } } + +/** + * Implementation of the WebFetch tool logic + */ +export class WebFetchTool extends BaseDeclarativeTool< + WebFetchToolParams, + ToolResult +> { + static readonly Name: string = 'web_fetch'; + + constructor(private readonly config: Config) { + super( + WebFetchTool.Name, + 'WebFetch', + "Processes content from URL(s), including local and private network addresses (e.g., localhost), embedded in a prompt. Include up to 20 URLs and instructions (e.g., summarize, extract specific data) directly in the 'prompt' parameter.", + Kind.Fetch, + { + properties: { + prompt: { + description: + 'A comprehensive prompt that includes the URL(s) (up to 20) to fetch and specific instructions on how to process their content (e.g., "Summarize https://example.com/article and extract key points from https://another.com/data"). Must contain as least one URL starting with http:// or https://.', + type: 'string', + }, + }, + required: ['prompt'], + type: 'object', + }, + ); + const proxy = config.getProxy(); + if (proxy) { + setGlobalDispatcher(new ProxyAgent(proxy as string)); + } + } + + protected validateToolParams(params: WebFetchToolParams): string | null { + const errors = SchemaValidator.validate( + this.schema.parametersJsonSchema, + params, + ); + if (errors) { + return errors; + } + if (!params.prompt || params.prompt.trim() === '') { + return "The 'prompt' parameter cannot be empty and must contain URL(s) and instructions."; + } + if ( + !params.prompt.includes('http://') && + !params.prompt.includes('https://') + ) { + return "The 'prompt' must contain at least one valid URL (starting with http:// or https://)."; + } + return null; + } + + protected createInvocation( + params: WebFetchToolParams, + ): ToolInvocation { + return new WebFetchToolInvocation(this.config, params); + } +} From 8fae227e8d53b962f8b7db3abff51906fad1d181 Mon Sep 17 00:00:00 2001 From: Harold Mciver Date: Wed, 13 Aug 2025 19:31:24 +0000 Subject: [PATCH 33/45] fix: Prevent duplicate or inactive tools/prompts after server refresh (#5850) --- packages/cli/src/ui/commands/mcpCommand.test.ts | 9 +++++++++ packages/cli/src/ui/commands/mcpCommand.ts | 6 ++++++ packages/cli/src/ui/commands/types.ts | 1 + packages/cli/src/ui/hooks/slashCommandProcessor.ts | 9 ++++++++- packages/core/src/tools/tool-registry.ts | 12 ++++++++++++ 5 files changed, 36 insertions(+), 1 deletion(-) diff --git a/packages/cli/src/ui/commands/mcpCommand.test.ts b/packages/cli/src/ui/commands/mcpCommand.test.ts index ad04cb69..0f339665 100644 --- a/packages/cli/src/ui/commands/mcpCommand.test.ts +++ b/packages/cli/src/ui/commands/mcpCommand.test.ts @@ -881,9 +881,14 @@ describe('mcpCommand', () => { }), getToolRegistry: vi.fn().mockResolvedValue(mockToolRegistry), getGeminiClient: vi.fn().mockReturnValue(mockGeminiClient), + getPromptRegistry: vi.fn().mockResolvedValue({ + removePromptsByServer: vi.fn(), + }), }, }, }); + // Mock the reloadCommands function + context.ui.reloadCommands = vi.fn(); const { MCPOAuthProvider } = await import('@google/gemini-cli-core'); @@ -901,6 +906,7 @@ describe('mcpCommand', () => { 'test-server', ); expect(mockGeminiClient.setTools).toHaveBeenCalled(); + expect(context.ui.reloadCommands).toHaveBeenCalledTimes(1); expect(isMessageAction(result)).toBe(true); if (isMessageAction(result)) { @@ -985,6 +991,8 @@ describe('mcpCommand', () => { }, }, }); + // Mock the reloadCommands function, which is new logic. + context.ui.reloadCommands = vi.fn(); const refreshCommand = mcpCommand.subCommands?.find( (cmd) => cmd.name === 'refresh', @@ -1002,6 +1010,7 @@ describe('mcpCommand', () => { ); expect(mockToolRegistry.discoverMcpTools).toHaveBeenCalled(); expect(mockGeminiClient.setTools).toHaveBeenCalled(); + expect(context.ui.reloadCommands).toHaveBeenCalledTimes(1); expect(isMessageAction(result)).toBe(true); if (isMessageAction(result)) { diff --git a/packages/cli/src/ui/commands/mcpCommand.ts b/packages/cli/src/ui/commands/mcpCommand.ts index 11c71f1a..686102be 100644 --- a/packages/cli/src/ui/commands/mcpCommand.ts +++ b/packages/cli/src/ui/commands/mcpCommand.ts @@ -417,6 +417,9 @@ const authCommand: SlashCommand = { await geminiClient.setTools(); } + // Reload the slash commands to reflect the changes. + context.ui.reloadCommands(); + return { type: 'message', messageType: 'info', @@ -507,6 +510,9 @@ const refreshCommand: SlashCommand = { await geminiClient.setTools(); } + // Reload the slash commands to reflect the changes. + context.ui.reloadCommands(); + return getMcpStatus(context, false, false, false); }, }; diff --git a/packages/cli/src/ui/commands/types.ts b/packages/cli/src/ui/commands/types.ts index d4f0b454..876409d0 100644 --- a/packages/cli/src/ui/commands/types.ts +++ b/packages/cli/src/ui/commands/types.ts @@ -61,6 +61,7 @@ export interface CommandContext { toggleCorgiMode: () => void; toggleVimEnabled: () => Promise; setGeminiMdFileCount: (count: number) => void; + reloadCommands: () => void; }; // Session-specific data session: { diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.ts index b4ce0d4d..32f55de2 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.ts @@ -57,6 +57,11 @@ export const useSlashCommandProcessor = ( ) => { const session = useSessionStats(); const [commands, setCommands] = useState([]); + const [reloadTrigger, setReloadTrigger] = useState(0); + + const reloadCommands = useCallback(() => { + setReloadTrigger((v) => v + 1); + }, []); const [shellConfirmationRequest, setShellConfirmationRequest] = useState { controller.abort(); }; - }, [config, ideMode]); + }, [config, ideMode, reloadTrigger]); const handleSlashCommand = useCallback( async ( diff --git a/packages/core/src/tools/tool-registry.ts b/packages/core/src/tools/tool-registry.ts index 02f77727..b3625285 100644 --- a/packages/core/src/tools/tool-registry.ts +++ b/packages/core/src/tools/tool-registry.ts @@ -158,6 +158,18 @@ export class ToolRegistry { } } + /** + * Removes all tools from a specific MCP server. + * @param serverName The name of the server to remove tools from. + */ + removeMcpToolsByServer(serverName: string): void { + for (const [name, tool] of this.tools.entries()) { + if (tool instanceof DiscoveredMCPTool && tool.serverName === serverName) { + this.tools.delete(name); + } + } + } + /** * Discovers tools from project (if available and configured). * Can be called multiple times to update discovered tools. From a90aeb3d8fd05fc6303ce9ef4e957c2e19cbe9c4 Mon Sep 17 00:00:00 2001 From: Richie Foreman Date: Wed, 13 Aug 2025 16:17:38 -0400 Subject: [PATCH 34/45] chore(build/compiler): Enable a bunch of strict TS compiler options. (#6138) --- GEMINI.md | 41 ++++++++++++++----- .../cli/src/ui/commands/directoryCommand.tsx | 6 +-- .../components/shared/vim-buffer-actions.ts | 3 +- packages/cli/src/utils/checks.ts | 28 +++++++++++++ .../core/src/core/coreToolScheduler.test.ts | 9 ++-- packages/core/src/mcp/oauth-provider.ts | 1 - packages/core/src/test-utils/tools.ts | 2 +- packages/core/src/tools/edit.ts | 2 +- packages/core/src/tools/glob.ts | 2 +- packages/core/src/tools/grep.ts | 2 +- packages/core/src/tools/ls.ts | 2 +- packages/core/src/tools/mcp-tool.ts | 4 +- packages/core/src/tools/memoryTool.ts | 4 +- packages/core/src/tools/read-file.ts | 4 +- packages/core/src/tools/read-many-files.ts | 4 +- packages/core/src/tools/shell.ts | 6 ++- packages/core/src/tools/tool-registry.ts | 4 +- packages/core/src/tools/tools.ts | 16 ++++---- packages/core/src/tools/web-fetch.ts | 8 +++- packages/core/src/tools/web-search.ts | 2 +- packages/core/src/tools/write-file.ts | 8 ++-- packages/core/src/utils/errorParsing.test.ts | 2 - .../core/src/utils/filesearch/fileSearch.ts | 2 +- .../src/utils/filesearch/result-cache.test.ts | 7 ++-- .../core/src/utils/filesearch/result-cache.ts | 5 +-- .../src/utils/memoryImportProcessor.test.ts | 40 +++++++++--------- .../core/src/utils/memoryImportProcessor.ts | 2 +- tsconfig.json | 9 ++++ 28 files changed, 141 insertions(+), 84 deletions(-) create mode 100644 packages/cli/src/utils/checks.ts diff --git a/GEMINI.md b/GEMINI.md index 74185b4b..6eab6a47 100644 --- a/GEMINI.md +++ b/GEMINI.md @@ -97,17 +97,17 @@ TypeScript's power lies in its ability to provide static type checking, catching - **Preferring `unknown` over `any`**: When you absolutely cannot determine the type of a value at compile time, and you're tempted to reach for any, consider using unknown instead. unknown is a type-safe counterpart to any. While a variable of type unknown can hold any value, you must perform type narrowing (e.g., using typeof or instanceof checks, or a type assertion) before you can perform any operations on it. This forces you to handle the unknown type explicitly, preventing accidental runtime errors. - ``` + ```ts function processValue(value: unknown) { - if (typeof value === 'string') { - // value is now safely a string - console.log(value.toUpperCase()); - } else if (typeof value === 'number') { - // value is now safely a number - console.log(value * 2); - } - // Without narrowing, you cannot access properties or methods on 'value' - // console.log(value.someProperty); // Error: Object is of type 'unknown'. + if (typeof value === 'string') { + // value is now safely a string + console.log(value.toUpperCase()); + } else if (typeof value === 'number') { + // value is now safely a number + console.log(value * 2); + } + // Without narrowing, you cannot access properties or methods on 'value' + // console.log(value.someProperty); // Error: Object is of type 'unknown'. } ``` @@ -115,6 +115,27 @@ TypeScript's power lies in its ability to provide static type checking, catching - **Bypassing Type Checking**: Like `any`, type assertions bypass TypeScript's safety checks. If your assertion is incorrect, you introduce a runtime error that TypeScript would not have warned you about. - **Code Smell in Testing**: A common scenario where `any` or type assertions might be tempting is when trying to test "private" implementation details (e.g., spying on or stubbing an unexported function within a module). This is a strong indication of a "code smell" in your testing strategy and potentially your code structure. Instead of trying to force access to private internals, consider whether those internal details should be refactored into a separate module with a well-defined public API. This makes them inherently testable without compromising encapsulation. +### Type narrowing `switch` clauses + +When authoring a switch clause over an enumeration or fixed list of items, +always prefer to use the `checkExhaustive` helper method within the default +clause of the switch. This will ensure that all of the possible options within +the value or enumeration are used. + +This helper method can be found in `packages/cli/src/utils/checks.ts` + +Here's an example of using the helper method properly: + +``` +switch (someValue) { + case 1: + case 2: + // ... + default: + return checkExhaustive(someValue); +} +``` + ### Embracing JavaScript's Array Operators To further enhance code cleanliness and promote safe functional programming practices, leverage JavaScript's rich set of array operators as much as possible. Methods like `.map()`, `.filter()`, `.reduce()`, `.slice()`, `.sort()`, and others are incredibly powerful for transforming and manipulating data collections in an immutable and declarative way. diff --git a/packages/cli/src/ui/commands/directoryCommand.tsx b/packages/cli/src/ui/commands/directoryCommand.tsx index 6c667f44..d3f50e4c 100644 --- a/packages/cli/src/ui/commands/directoryCommand.tsx +++ b/packages/cli/src/ui/commands/directoryCommand.tsx @@ -138,13 +138,11 @@ export const directoryCommand: SlashCommand = { if (errors.length > 0) { addItem( - { - type: MessageType.ERROR, - text: errors.join('\n'), - }, + { type: MessageType.ERROR, text: errors.join('\n') }, Date.now(), ); } + return; }, }, { diff --git a/packages/cli/src/ui/components/shared/vim-buffer-actions.ts b/packages/cli/src/ui/components/shared/vim-buffer-actions.ts index 0e2e7989..bf04716f 100644 --- a/packages/cli/src/ui/components/shared/vim-buffer-actions.ts +++ b/packages/cli/src/ui/components/shared/vim-buffer-actions.ts @@ -19,6 +19,7 @@ import { findWordEndInLine, } from './text-buffer.js'; import { cpLen, toCodePoints } from '../../utils/textUtils.js'; +import { assumeExhaustive } from '../../../utils/checks.js'; // Check if we're at the end of a base word (on the last base character) // Returns true if current position has a base character followed only by combining marks until non-word @@ -806,7 +807,7 @@ export function handleVimAction( default: { // This should never happen if TypeScript is working correctly - const _exhaustiveCheck: never = action; + assumeExhaustive(action); return state; } } diff --git a/packages/cli/src/utils/checks.ts b/packages/cli/src/utils/checks.ts new file mode 100644 index 00000000..0598835f --- /dev/null +++ b/packages/cli/src/utils/checks.ts @@ -0,0 +1,28 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Fail to compile on unexpected values. */ +export function assumeExhaustive(_value: never): void {} + +/** + * Throws an exception on unexpected values. + * + * A common use case is switch statements: + * switch(enumValue) { + * case Enum.A: + * case Enum.B: + * break; + * default: + * checkExhaustive(enumValue); + * } + */ +export function checkExhaustive( + value: never, + msg = `unexpected value ${value}!`, +): never { + assumeExhaustive(value); + throw new Error(msg); +} diff --git a/packages/core/src/core/coreToolScheduler.test.ts b/packages/core/src/core/coreToolScheduler.test.ts index df39c1dc..71b2d64c 100644 --- a/packages/core/src/core/coreToolScheduler.test.ts +++ b/packages/core/src/core/coreToolScheduler.test.ts @@ -9,7 +9,6 @@ import { describe, it, expect, vi } from 'vitest'; import { CoreToolScheduler, ToolCall, - ValidatingToolCall, convertToFunctionResponse, } from './coreToolScheduler.js'; import { @@ -54,7 +53,9 @@ class MockModifiableTool }; } - async shouldConfirmExecute(): Promise { + override async shouldConfirmExecute(): Promise< + ToolCallConfirmationDetails | false + > { if (this.shouldConfirm) { return { type: 'edit', @@ -121,8 +122,6 @@ describe('CoreToolScheduler', () => { abortController.abort(); await scheduler.schedule([request], abortController.signal); - const _waitingCall = onToolCallsUpdate.mock - .calls[1][0][0] as ValidatingToolCall; const confirmationDetails = await mockTool.shouldConfirmExecute( {}, abortController.signal, @@ -394,7 +393,7 @@ describe('CoreToolScheduler edit cancellation', () => { ); } - async shouldConfirmExecute( + override async shouldConfirmExecute( _params: Record, _abortSignal: AbortSignal, ): Promise { diff --git a/packages/core/src/mcp/oauth-provider.ts b/packages/core/src/mcp/oauth-provider.ts index c86478c6..eaec5c2e 100644 --- a/packages/core/src/mcp/oauth-provider.ts +++ b/packages/core/src/mcp/oauth-provider.ts @@ -91,7 +91,6 @@ export class MCPOAuthProvider { private static readonly REDIRECT_PORT = 7777; private static readonly REDIRECT_PATH = '/oauth/callback'; private static readonly HTTP_OK = 200; - private static readonly HTTP_REDIRECT = 302; /** * Register a client dynamically with the OAuth server. diff --git a/packages/core/src/test-utils/tools.ts b/packages/core/src/test-utils/tools.ts index 7d917b6c..da642212 100644 --- a/packages/core/src/test-utils/tools.ts +++ b/packages/core/src/test-utils/tools.ts @@ -45,7 +45,7 @@ export class MockTool extends BaseTool<{ [key: string]: unknown }, ToolResult> { ); } - async shouldConfirmExecute( + override async shouldConfirmExecute( _params: { [key: string]: unknown }, _abortSignal: AbortSignal, ): Promise { diff --git a/packages/core/src/tools/edit.ts b/packages/core/src/tools/edit.ts index 733c1bf8..8d90dfe4 100644 --- a/packages/core/src/tools/edit.ts +++ b/packages/core/src/tools/edit.ts @@ -471,7 +471,7 @@ Expectation for required parameters: * @param params Parameters to validate * @returns Error message string or null if valid */ - validateToolParams(params: EditToolParams): string | null { + override validateToolParams(params: EditToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/glob.ts b/packages/core/src/tools/glob.ts index 77a7241f..65454232 100644 --- a/packages/core/src/tools/glob.ts +++ b/packages/core/src/tools/glob.ts @@ -281,7 +281,7 @@ export class GlobTool extends BaseDeclarativeTool { /** * Validates the parameters for the tool. */ - validateToolParams(params: GlobToolParams): string | null { + override validateToolParams(params: GlobToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/grep.ts b/packages/core/src/tools/grep.ts index 9d3d638a..4cac389f 100644 --- a/packages/core/src/tools/grep.ts +++ b/packages/core/src/tools/grep.ts @@ -614,7 +614,7 @@ export class GrepTool extends BaseDeclarativeTool { * @param params Parameters to validate * @returns An error message string if invalid, null otherwise */ - validateToolParams(params: GrepToolParams): string | null { + override validateToolParams(params: GrepToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/ls.ts b/packages/core/src/tools/ls.ts index 2618136a..918c0b2b 100644 --- a/packages/core/src/tools/ls.ts +++ b/packages/core/src/tools/ls.ts @@ -314,7 +314,7 @@ export class LSTool extends BaseDeclarativeTool { * @param params Parameters to validate * @returns An error message string if invalid, null otherwise */ - validateToolParams(params: LSToolParams): string | null { + override validateToolParams(params: LSToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/mcp-tool.ts b/packages/core/src/tools/mcp-tool.ts index 01a8d75c..fbb104fd 100644 --- a/packages/core/src/tools/mcp-tool.ts +++ b/packages/core/src/tools/mcp-tool.ts @@ -70,7 +70,7 @@ class DiscoveredMCPToolInvocation extends BaseToolInvocation< super(params); } - async shouldConfirmExecute( + override async shouldConfirmExecute( _abortSignal: AbortSignal, ): Promise { const serverAllowListKey = this.serverName; @@ -135,7 +135,7 @@ export class DiscoveredMCPTool extends BaseDeclarativeTool< readonly serverName: string, readonly serverToolName: string, description: string, - readonly parameterSchema: unknown, + override readonly parameterSchema: unknown, readonly timeout?: number, readonly trust?: boolean, nameOverride?: string, diff --git a/packages/core/src/tools/memoryTool.ts b/packages/core/src/tools/memoryTool.ts index a9d765c4..73282d60 100644 --- a/packages/core/src/tools/memoryTool.ts +++ b/packages/core/src/tools/memoryTool.ts @@ -180,7 +180,7 @@ class MemoryToolInvocation extends BaseToolInvocation< return `in ${tildeifyPath(memoryFilePath)}`; } - async shouldConfirmExecute( + override async shouldConfirmExecute( _abortSignal: AbortSignal, ): Promise { const memoryFilePath = getGlobalMemoryFilePath(); @@ -294,7 +294,7 @@ export class MemoryTool ); } - validateToolParams(params: SaveMemoryParams): string | null { + override validateToolParams(params: SaveMemoryParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/read-file.ts b/packages/core/src/tools/read-file.ts index d10c73d1..f02db506 100644 --- a/packages/core/src/tools/read-file.ts +++ b/packages/core/src/tools/read-file.ts @@ -198,7 +198,9 @@ export class ReadFileTool extends BaseDeclarativeTool< ); } - protected validateToolParams(params: ReadFileToolParams): string | null { + protected override validateToolParams( + params: ReadFileToolParams, + ): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/read-many-files.ts b/packages/core/src/tools/read-many-files.ts index e4e94799..aaf524c4 100644 --- a/packages/core/src/tools/read-many-files.ts +++ b/packages/core/src/tools/read-many-files.ts @@ -626,7 +626,9 @@ Use this tool when the user's query implies needing the content of several files ); } - protected validateToolParams(params: ReadManyFilesParams): string | null { + protected override validateToolParams( + params: ReadManyFilesParams, + ): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/shell.ts b/packages/core/src/tools/shell.ts index 0cc727fb..5b01a82f 100644 --- a/packages/core/src/tools/shell.ts +++ b/packages/core/src/tools/shell.ts @@ -67,7 +67,7 @@ class ShellToolInvocation extends BaseToolInvocation< return description; } - async shouldConfirmExecute( + override async shouldConfirmExecute( _abortSignal: AbortSignal, ): Promise { const command = stripShellWrapper(this.params.command); @@ -343,7 +343,9 @@ export class ShellTool extends BaseDeclarativeTool< ); } - protected validateToolParams(params: ShellToolParams): string | null { + protected override validateToolParams( + params: ShellToolParams, + ): string | null { const commandCheck = isCommandAllowed(params.command, this.config); if (!commandCheck.allowed) { if (!commandCheck.reason) { diff --git a/packages/core/src/tools/tool-registry.ts b/packages/core/src/tools/tool-registry.ts index b3625285..416ee99e 100644 --- a/packages/core/src/tools/tool-registry.ts +++ b/packages/core/src/tools/tool-registry.ts @@ -19,8 +19,8 @@ export class DiscoveredTool extends BaseTool { constructor( private readonly config: Config, name: string, - readonly description: string, - readonly parameterSchema: Record, + override readonly description: string, + override readonly parameterSchema: Record, ) { const discoveryCmd = config.getToolDiscoveryCommand()!; const callCommand = config.getToolCallCommand()!; diff --git a/packages/core/src/tools/tools.ts b/packages/core/src/tools/tools.ts index 3e7d0647..00f2a842 100644 --- a/packages/core/src/tools/tools.ts +++ b/packages/core/src/tools/tools.ts @@ -284,13 +284,13 @@ export abstract class BaseTool< * @param parameterSchema JSON Schema defining the parameters */ constructor( - readonly name: string, - readonly displayName: string, - readonly description: string, - readonly kind: Kind, - readonly parameterSchema: unknown, - readonly isOutputMarkdown: boolean = true, - readonly canUpdateOutput: boolean = false, + override readonly name: string, + override readonly displayName: string, + override readonly description: string, + override readonly kind: Kind, + override readonly parameterSchema: unknown, + override readonly isOutputMarkdown: boolean = true, + override readonly canUpdateOutput: boolean = false, ) { super( name, @@ -320,7 +320,7 @@ export abstract class BaseTool< * @returns An error message string if invalid, null otherwise */ // eslint-disable-next-line @typescript-eslint/no-unused-vars - validateToolParams(params: TParams): string | null { + override validateToolParams(params: TParams): string | null { // Implementation would typically use a JSON Schema validator // This is a placeholder that should be implemented by derived classes return null; diff --git a/packages/core/src/tools/web-fetch.ts b/packages/core/src/tools/web-fetch.ts index 909fc548..7c80650b 100644 --- a/packages/core/src/tools/web-fetch.ts +++ b/packages/core/src/tools/web-fetch.ts @@ -143,7 +143,9 @@ ${textContent} return `Processing URLs and instructions from prompt: "${displayPrompt}"`; } - async shouldConfirmExecute(): Promise { + override async shouldConfirmExecute(): Promise< + ToolCallConfirmationDetails | false + > { if (this.config.getApprovalMode() === ApprovalMode.AUTO_EDIT) { return false; } @@ -337,7 +339,9 @@ export class WebFetchTool extends BaseDeclarativeTool< } } - protected validateToolParams(params: WebFetchToolParams): string | null { + protected override validateToolParams( + params: WebFetchToolParams, + ): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, diff --git a/packages/core/src/tools/web-search.ts b/packages/core/src/tools/web-search.ts index 54679452..a2306894 100644 --- a/packages/core/src/tools/web-search.ts +++ b/packages/core/src/tools/web-search.ts @@ -103,7 +103,7 @@ export class WebSearchTool extends BaseTool< return null; } - getDescription(params: WebSearchToolParams): string { + override getDescription(params: WebSearchToolParams): string { return `Searching the web for: "${params.query}"`; } diff --git a/packages/core/src/tools/write-file.ts b/packages/core/src/tools/write-file.ts index fa1e1301..01c92865 100644 --- a/packages/core/src/tools/write-file.ts +++ b/packages/core/src/tools/write-file.ts @@ -102,11 +102,11 @@ export class WriteFileTool ); } - toolLocations(params: WriteFileToolParams): ToolLocation[] { + override toolLocations(params: WriteFileToolParams): ToolLocation[] { return [{ path: params.file_path }]; } - validateToolParams(params: WriteFileToolParams): string | null { + override validateToolParams(params: WriteFileToolParams): string | null { const errors = SchemaValidator.validate( this.schema.parametersJsonSchema, params, @@ -144,7 +144,7 @@ export class WriteFileTool return null; } - getDescription(params: WriteFileToolParams): string { + override getDescription(params: WriteFileToolParams): string { if (!params.file_path) { return `Model did not provide valid parameters for write file tool, missing or empty "file_path"`; } @@ -158,7 +158,7 @@ export class WriteFileTool /** * Handles the confirmation prompt for the WriteFile tool. */ - async shouldConfirmExecute( + override async shouldConfirmExecute( params: WriteFileToolParams, abortSignal: AbortSignal, ): Promise { diff --git a/packages/core/src/utils/errorParsing.test.ts b/packages/core/src/utils/errorParsing.test.ts index f2a4709a..72a84d82 100644 --- a/packages/core/src/utils/errorParsing.test.ts +++ b/packages/core/src/utils/errorParsing.test.ts @@ -13,8 +13,6 @@ import { AuthType } from '../core/contentGenerator.js'; import { StructuredError } from '../core/turn.js'; describe('parseAndFormatApiError', () => { - const _enterpriseMessage = - 'upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits'; const vertexMessage = 'request a quota increase through Vertex'; const geminiMessage = 'request a quota increase through AI Studio'; diff --git a/packages/core/src/utils/filesearch/fileSearch.ts b/packages/core/src/utils/filesearch/fileSearch.ts index 480d5815..dff8d0ec 100644 --- a/packages/core/src/utils/filesearch/fileSearch.ts +++ b/packages/core/src/utils/filesearch/fileSearch.ts @@ -289,7 +289,7 @@ export class FileSearch { * Builds the in-memory cache for fast pattern matching. */ private buildResultCache(): void { - this.resultCache = new ResultCache(this.allFiles, this.absoluteDir); + this.resultCache = new ResultCache(this.allFiles); // The v1 algorithm is much faster since it only looks at the first // occurence of the pattern. We use it for search spaces that have >20k // files, because the v2 algorithm is just too slow in those cases. diff --git a/packages/core/src/utils/filesearch/result-cache.test.ts b/packages/core/src/utils/filesearch/result-cache.test.ts index 0b1b4e17..fcfa3f00 100644 --- a/packages/core/src/utils/filesearch/result-cache.test.ts +++ b/packages/core/src/utils/filesearch/result-cache.test.ts @@ -4,7 +4,6 @@ * SPDX-License-Identifier: Apache-2.0 */ -import path from 'node:path'; import { test, expect } from 'vitest'; import { ResultCache } from './result-cache.js'; @@ -17,7 +16,7 @@ test('ResultCache basic usage', async () => { 'subdir/other.js', 'subdir/nested/file.md', ]; - const cache = new ResultCache(files, path.resolve('.')); + const cache = new ResultCache(files); const { files: resultFiles, isExactMatch } = await cache.get('*.js'); expect(resultFiles).toEqual(files); expect(isExactMatch).toBe(false); @@ -25,7 +24,7 @@ test('ResultCache basic usage', async () => { test('ResultCache cache hit/miss', async () => { const files = ['foo.txt', 'bar.js', 'baz.md']; - const cache = new ResultCache(files, path.resolve('.')); + const cache = new ResultCache(files); // First call: miss const { files: result1Files, isExactMatch: isExactMatch1 } = await cache.get('*.js'); @@ -44,7 +43,7 @@ test('ResultCache cache hit/miss', async () => { test('ResultCache best base query', async () => { const files = ['foo.txt', 'foobar.js', 'baz.md']; - const cache = new ResultCache(files, path.resolve('.')); + const cache = new ResultCache(files); // Cache a broader query cache.set('foo', ['foo.txt', 'foobar.js']); diff --git a/packages/core/src/utils/filesearch/result-cache.ts b/packages/core/src/utils/filesearch/result-cache.ts index 77b99aec..cf0c2b4b 100644 --- a/packages/core/src/utils/filesearch/result-cache.ts +++ b/packages/core/src/utils/filesearch/result-cache.ts @@ -13,10 +13,7 @@ export class ResultCache { private hits = 0; private misses = 0; - constructor( - private readonly allFiles: string[], - private readonly absoluteDir: string, - ) { + constructor(private readonly allFiles: string[]) { this.cache = new Map(); } diff --git a/packages/core/src/utils/memoryImportProcessor.test.ts b/packages/core/src/utils/memoryImportProcessor.test.ts index 94fc1193..300d44fb 100644 --- a/packages/core/src/utils/memoryImportProcessor.test.ts +++ b/packages/core/src/utils/memoryImportProcessor.test.ts @@ -11,7 +11,7 @@ import { marked } from 'marked'; import { processImports, validateImportPath } from './memoryImportProcessor.js'; // Helper function to create platform-agnostic test paths -const testPath = (...segments: string[]) => { +function testPath(...segments: string[]): string { // Start with the first segment as is (might be an absolute path on Windows) let result = segments[0]; @@ -27,9 +27,8 @@ const testPath = (...segments: string[]) => { } return path.normalize(result); -}; +} -// Mock fs/promises vi.mock('fs/promises'); const mockedFs = vi.mocked(fs); @@ -509,21 +508,21 @@ describe('memoryImportProcessor', () => { expect(result.importTree.imports).toHaveLength(2); // First import: nested.md - // Prefix with underscore to indicate they're intentionally unused - const _expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); - const _expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); - const _expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); - // Check that the paths match using includes to handle potential absolute/relative differences - expect(result.importTree.imports![0].path).toContain('nested.md'); + const expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); + + expect(result.importTree.imports![0].path).toContain(expectedNestedPath); expect(result.importTree.imports![0].imports).toHaveLength(1); + + const expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); expect(result.importTree.imports![0].imports![0].path).toContain( - 'inner.md', + expectedInnerPath, ); expect(result.importTree.imports![0].imports![0].imports).toBeUndefined(); // Second import: simple.md - expect(result.importTree.imports![1].path).toContain('simple.md'); + const expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); + expect(result.importTree.imports![1].path).toContain(expectedSimplePath); expect(result.importTree.imports![1].imports).toBeUndefined(); }); @@ -724,21 +723,20 @@ describe('memoryImportProcessor', () => { expect(result.importTree.imports).toHaveLength(2); // First import: nested.md - // Prefix with underscore to indicate they're intentionally unused - const _expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); - const _expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); - const _expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); + const expectedNestedPath = testPath(projectRoot, 'src', 'nested.md'); + const expectedInnerPath = testPath(projectRoot, 'src', 'inner.md'); + const expectedSimplePath = testPath(projectRoot, 'src', 'simple.md'); // Check that the paths match using includes to handle potential absolute/relative differences - expect(result.importTree.imports![0].path).toContain('nested.md'); + expect(result.importTree.imports![0].path).toContain(expectedNestedPath); expect(result.importTree.imports![0].imports).toHaveLength(1); expect(result.importTree.imports![0].imports![0].path).toContain( - 'inner.md', + expectedInnerPath, ); expect(result.importTree.imports![0].imports![0].imports).toBeUndefined(); // Second import: simple.md - expect(result.importTree.imports![1].path).toContain('simple.md'); + expect(result.importTree.imports![1].path).toContain(expectedSimplePath); expect(result.importTree.imports![1].imports).toBeUndefined(); }); @@ -899,7 +897,7 @@ describe('memoryImportProcessor', () => { // Test relative paths - resolve them against basePath const relativePath = './file.md'; - const _resolvedRelativePath = path.resolve(basePath, relativePath); + path.resolve(basePath, relativePath); expect(validateImportPath(relativePath, basePath, [basePath])).toBe(true); // Test parent directory access (should be allowed if parent is in allowed paths) @@ -907,12 +905,12 @@ describe('memoryImportProcessor', () => { if (parentPath !== basePath) { // Only test if parent is different const parentRelativePath = '../file.md'; - const _resolvedParentPath = path.resolve(basePath, parentRelativePath); + path.resolve(basePath, parentRelativePath); expect( validateImportPath(parentRelativePath, basePath, [parentPath]), ).toBe(true); - const _resolvedSubPath = path.resolve(basePath, 'sub'); + path.resolve(basePath, 'sub'); const resultSub = validateImportPath('sub', basePath, [basePath]); expect(resultSub).toBe(true); } diff --git a/packages/core/src/utils/memoryImportProcessor.ts b/packages/core/src/utils/memoryImportProcessor.ts index 68de7963..751e0ace 100644 --- a/packages/core/src/utils/memoryImportProcessor.ts +++ b/packages/core/src/utils/memoryImportProcessor.ts @@ -261,7 +261,7 @@ export async function processImports( // Process imports in reverse order to handle indices correctly for (let i = imports.length - 1; i >= 0; i--) { - const { start, _end, path: importPath } = imports[i]; + const { start, path: importPath } = imports[i]; // Skip if inside a code region if ( diff --git a/tsconfig.json b/tsconfig.json index e761d3e1..5f303ddc 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -3,7 +3,16 @@ "strict": true, "esModuleInterop": true, "skipLibCheck": true, + "noImplicitAny": true, + "noImplicitOverride": true, + "noImplicitReturns": true, + "noImplicitThis": true, "forceConsistentCasingInFileNames": true, + "noUnusedLocals": true, + "strictBindCallApply": true, + "strictFunctionTypes": true, + "strictNullChecks": true, + "strictPropertyInitialization": true, "resolveJsonModule": true, "sourceMap": true, "composite": true, From 61047173a8f5bc279f480c5ab150d74337c0265a Mon Sep 17 00:00:00 2001 From: Sandy Tao Date: Wed, 13 Aug 2025 13:33:04 -0700 Subject: [PATCH 35/45] fix(core): Discard thought signature when switching from Gemini API Key to OAuth (#6090) Co-authored-by: Jacob Richman --- packages/core/src/config/config.test.ts | 88 +++++++++++++++++++++++++ packages/core/src/config/config.ts | 10 ++- packages/core/src/core/client.test.ts | 69 +++++++++++++++++++ packages/core/src/core/client.ts | 28 +++++++- 4 files changed, 192 insertions(+), 3 deletions(-) diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index 6c57d058..f1d8b965 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -15,6 +15,7 @@ import { } from '../telemetry/index.js'; import { AuthType, + ContentGeneratorConfig, createContentGeneratorConfig, } from '../core/contentGenerator.js'; import { GeminiClient } from '../core/client.js'; @@ -249,6 +250,7 @@ describe('Server Config (config.ts)', () => { // Verify that history was restored to the new client expect(mockNewClient.setHistory).toHaveBeenCalledWith( mockExistingHistory, + { stripThoughts: false }, ); }); @@ -282,6 +284,92 @@ describe('Server Config (config.ts)', () => { // Verify that setHistory was not called since there was no existing history expect(mockNewClient.setHistory).not.toHaveBeenCalled(); }); + + it('should strip thoughts when switching from GenAI to Vertex', async () => { + const config = new Config(baseParams); + const mockContentConfig = { + model: 'gemini-pro', + apiKey: 'test-key', + authType: AuthType.USE_GEMINI, + }; + ( + config as unknown as { contentGeneratorConfig: ContentGeneratorConfig } + ).contentGeneratorConfig = mockContentConfig; + + (createContentGeneratorConfig as Mock).mockReturnValue({ + ...mockContentConfig, + authType: AuthType.LOGIN_WITH_GOOGLE, + }); + + const mockExistingHistory = [ + { role: 'user', parts: [{ text: 'Hello' }] }, + ]; + const mockExistingClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue(mockExistingHistory), + }; + const mockNewClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue([]), + setHistory: vi.fn(), + initialize: vi.fn().mockResolvedValue(undefined), + }; + + ( + config as unknown as { geminiClient: typeof mockExistingClient } + ).geminiClient = mockExistingClient; + (GeminiClient as Mock).mockImplementation(() => mockNewClient); + + await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE); + + expect(mockNewClient.setHistory).toHaveBeenCalledWith( + mockExistingHistory, + { stripThoughts: true }, + ); + }); + + it('should not strip thoughts when switching from Vertex to GenAI', async () => { + const config = new Config(baseParams); + const mockContentConfig = { + model: 'gemini-pro', + apiKey: 'test-key', + authType: AuthType.LOGIN_WITH_GOOGLE, + }; + ( + config as unknown as { contentGeneratorConfig: ContentGeneratorConfig } + ).contentGeneratorConfig = mockContentConfig; + + (createContentGeneratorConfig as Mock).mockReturnValue({ + ...mockContentConfig, + authType: AuthType.USE_GEMINI, + }); + + const mockExistingHistory = [ + { role: 'user', parts: [{ text: 'Hello' }] }, + ]; + const mockExistingClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue(mockExistingHistory), + }; + const mockNewClient = { + isInitialized: vi.fn().mockReturnValue(true), + getHistory: vi.fn().mockReturnValue([]), + setHistory: vi.fn(), + initialize: vi.fn().mockResolvedValue(undefined), + }; + + ( + config as unknown as { geminiClient: typeof mockExistingClient } + ).geminiClient = mockExistingClient; + (GeminiClient as Mock).mockImplementation(() => mockNewClient); + + await config.refreshAuth(AuthType.USE_GEMINI); + + expect(mockNewClient.setHistory).toHaveBeenCalledWith( + mockExistingHistory, + { stripThoughts: false }, + ); + }); }); it('Config constructor should store userMemory correctly', () => { diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 7c61f239..5c11667b 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -379,13 +379,21 @@ export class Config { const newGeminiClient = new GeminiClient(this); await newGeminiClient.initialize(newContentGeneratorConfig); + // Vertex and Genai have incompatible encryption and sending history with + // throughtSignature from Genai to Vertex will fail, we need to strip them + const fromGenaiToVertex = + this.contentGeneratorConfig?.authType === AuthType.USE_GEMINI && + authMethod === AuthType.LOGIN_WITH_GOOGLE; + // Only assign to instance properties after successful initialization this.contentGeneratorConfig = newContentGeneratorConfig; this.geminiClient = newGeminiClient; // Restore the conversation history to the new client if (existingHistory.length > 0) { - this.geminiClient.setHistory(existingHistory); + this.geminiClient.setHistory(existingHistory, { + stripThoughts: fromGenaiToVertex, + }); } // Reset the session flag since we're explicitly changing auth and using default model diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index 5e68cfb6..9f6dcbe9 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -1596,4 +1596,73 @@ ${JSON.stringify( ); }); }); + + describe('setHistory', () => { + it('should strip thought signatures when stripThoughts is true', () => { + const mockChat = { + setHistory: vi.fn(), + }; + client['chat'] = mockChat as unknown as GeminiChat; + + const historyWithThoughts: Content[] = [ + { + role: 'user', + parts: [{ text: 'hello' }], + }, + { + role: 'model', + parts: [ + { text: 'thinking...', thoughtSignature: 'thought-123' }, + { + functionCall: { name: 'test', args: {} }, + thoughtSignature: 'thought-456', + }, + ], + }, + ]; + + client.setHistory(historyWithThoughts, { stripThoughts: true }); + + const expectedHistory: Content[] = [ + { + role: 'user', + parts: [{ text: 'hello' }], + }, + { + role: 'model', + parts: [ + { text: 'thinking...' }, + { functionCall: { name: 'test', args: {} } }, + ], + }, + ]; + + expect(mockChat.setHistory).toHaveBeenCalledWith(expectedHistory); + }); + + it('should not strip thought signatures when stripThoughts is false', () => { + const mockChat = { + setHistory: vi.fn(), + }; + client['chat'] = mockChat as unknown as GeminiChat; + + const historyWithThoughts: Content[] = [ + { + role: 'user', + parts: [{ text: 'hello' }], + }, + { + role: 'model', + parts: [ + { text: 'thinking...', thoughtSignature: 'thought-123' }, + { text: 'ok', thoughtSignature: 'thought-456' }, + ], + }, + ]; + + client.setHistory(historyWithThoughts, { stripThoughts: false }); + + expect(mockChat.setHistory).toHaveBeenCalledWith(historyWithThoughts); + }); + }); }); diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index 96be4111..93de190d 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -164,8 +164,32 @@ export class GeminiClient { return this.getChat().getHistory(); } - setHistory(history: Content[]) { - this.getChat().setHistory(history); + setHistory( + history: Content[], + { stripThoughts = false }: { stripThoughts?: boolean } = {}, + ) { + const historyToSet = stripThoughts + ? history.map((content) => { + const newContent = { ...content }; + if (newContent.parts) { + newContent.parts = newContent.parts.map((part) => { + if ( + part && + typeof part === 'object' && + 'thoughtSignature' in part + ) { + const newPart = { ...part }; + delete (newPart as { thoughtSignature?: string }) + .thoughtSignature; + return newPart; + } + return part; + }); + } + return newContent; + }) + : history; + this.getChat().setHistory(historyToSet); this.forceFullIdeContext = true; } From 2dbd5ecdc80b55cc13c81a0f836ad65ef874e8f8 Mon Sep 17 00:00:00 2001 From: Richie Foreman Date: Wed, 13 Aug 2025 16:37:08 -0400 Subject: [PATCH 36/45] chore(cli/slashcommands): Add status enum to SlashCommandEvent telemetry (#6166) --- .../ui/hooks/slashCommandProcessor.test.ts | 147 ++++++++++++++---- .../cli/src/ui/hooks/slashCommandProcessor.ts | 136 ++++++++-------- packages/core/index.ts | 1 + .../clearcut-logger/clearcut-logger.ts | 7 + .../clearcut-logger/event-metadata-key.ts | 3 + packages/core/src/telemetry/index.ts | 4 +- packages/core/src/telemetry/types.ts | 87 +++++++---- 7 files changed, 256 insertions(+), 129 deletions(-) diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts index 66c1b883..c35a4aef 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts @@ -4,18 +4,17 @@ * SPDX-License-Identifier: Apache-2.0 */ -const { logSlashCommand, SlashCommandEvent } = vi.hoisted(() => ({ +const { logSlashCommand } = vi.hoisted(() => ({ logSlashCommand: vi.fn(), - SlashCommandEvent: vi.fn((command, subCommand) => ({ command, subCommand })), })); vi.mock('@google/gemini-cli-core', async (importOriginal) => { const original = await importOriginal(); + return { ...original, logSlashCommand, - SlashCommandEvent, getIdeInstaller: vi.fn().mockReturnValue(null), }; }); @@ -25,10 +24,10 @@ const { mockProcessExit } = vi.hoisted(() => ({ })); vi.mock('node:process', () => { - const mockProcess = { + const mockProcess: Partial = { exit: mockProcessExit, - platform: 'test-platform', - }; + platform: 'sunos', + } as unknown as NodeJS.Process; return { ...mockProcess, default: mockProcess, @@ -77,22 +76,28 @@ import { ConfirmShellCommandsActionReturn, SlashCommand, } from '../commands/types.js'; -import { Config, ToolConfirmationOutcome } from '@google/gemini-cli-core'; +import { ToolConfirmationOutcome } from '@google/gemini-cli-core'; import { LoadedSettings } from '../../config/settings.js'; import { MessageType } from '../types.js'; import { BuiltinCommandLoader } from '../../services/BuiltinCommandLoader.js'; import { FileCommandLoader } from '../../services/FileCommandLoader.js'; import { McpPromptLoader } from '../../services/McpPromptLoader.js'; +import { + SlashCommandStatus, + makeFakeConfig, +} from '@google/gemini-cli-core/index.js'; -const createTestCommand = ( +function createTestCommand( overrides: Partial, kind: CommandKind = CommandKind.BUILT_IN, -): SlashCommand => ({ - name: 'test', - description: 'a test command', - kind, - ...overrides, -}); +): SlashCommand { + return { + name: 'test', + description: 'a test command', + kind, + ...overrides, + }; +} describe('useSlashCommandProcessor', () => { const mockAddItem = vi.fn(); @@ -102,15 +107,7 @@ describe('useSlashCommandProcessor', () => { const mockOpenAuthDialog = vi.fn(); const mockSetQuittingMessages = vi.fn(); - const mockConfig = { - getProjectRoot: vi.fn(() => '/mock/cwd'), - getSessionId: vi.fn(() => 'test-session'), - getGeminiClient: vi.fn(() => ({ - setHistory: vi.fn().mockResolvedValue(undefined), - })), - getExtensions: vi.fn(() => []), - getIdeMode: vi.fn(() => false), - } as unknown as Config; + const mockConfig = makeFakeConfig({}); const mockSettings = {} as LoadedSettings; @@ -314,6 +311,39 @@ describe('useSlashCommandProcessor', () => { ); }); + it('sets isProcessing to false if the the input is not a command', async () => { + const setMockIsProcessing = vi.fn(); + const result = setupProcessorHook([], [], [], setMockIsProcessing); + + await act(async () => { + await result.current.handleSlashCommand('imnotacommand'); + }); + + expect(setMockIsProcessing).not.toHaveBeenCalled(); + }); + + it('sets isProcessing to false if the command has an error', async () => { + const setMockIsProcessing = vi.fn(); + const failCommand = createTestCommand({ + name: 'fail', + action: vi.fn().mockRejectedValue(new Error('oh no!')), + }); + + const result = setupProcessorHook( + [failCommand], + [], + [], + setMockIsProcessing, + ); + + await act(async () => { + await result.current.handleSlashCommand('/fail'); + }); + + expect(setMockIsProcessing).toHaveBeenNthCalledWith(1, true); + expect(setMockIsProcessing).toHaveBeenNthCalledWith(2, false); + }); + it('should set isProcessing to true during execution and false afterwards', async () => { const mockSetIsProcessing = vi.fn(); const command = createTestCommand({ @@ -329,14 +359,14 @@ describe('useSlashCommandProcessor', () => { }); // It should be true immediately after starting - expect(mockSetIsProcessing).toHaveBeenCalledWith(true); + expect(mockSetIsProcessing).toHaveBeenNthCalledWith(1, true); // It should not have been called with false yet expect(mockSetIsProcessing).not.toHaveBeenCalledWith(false); await executionPromise; // After the promise resolves, it should be called with false - expect(mockSetIsProcessing).toHaveBeenCalledWith(false); + expect(mockSetIsProcessing).toHaveBeenNthCalledWith(2, false); expect(mockSetIsProcessing).toHaveBeenCalledTimes(2); }); }); @@ -884,7 +914,9 @@ describe('useSlashCommandProcessor', () => { const loggingTestCommands: SlashCommand[] = [ createTestCommand({ name: 'logtest', - action: mockCommandAction, + action: vi + .fn() + .mockResolvedValue({ type: 'message', content: 'hello world' }), }), createTestCommand({ name: 'logwithsub', @@ -895,6 +927,10 @@ describe('useSlashCommandProcessor', () => { }), ], }), + createTestCommand({ + name: 'fail', + action: vi.fn().mockRejectedValue(new Error('oh no!')), + }), createTestCommand({ name: 'logalias', altNames: ['la'], @@ -905,7 +941,6 @@ describe('useSlashCommandProcessor', () => { beforeEach(() => { mockCommandAction.mockClear(); vi.mocked(logSlashCommand).mockClear(); - vi.mocked(SlashCommandEvent).mockClear(); }); it('should log a simple slash command', async () => { @@ -917,8 +952,45 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logtest'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logtest', undefined); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logtest', + subcommand: undefined, + status: SlashCommandStatus.SUCCESS, + }), + ); + }); + + it('logs nothing for a bogus command', async () => { + const result = setupProcessorHook(loggingTestCommands); + await waitFor(() => + expect(result.current.slashCommands.length).toBeGreaterThan(0), + ); + await act(async () => { + await result.current.handleSlashCommand('/bogusbogusbogus'); + }); + + expect(logSlashCommand).not.toHaveBeenCalled(); + }); + + it('logs a failure event for a failed command', async () => { + const result = setupProcessorHook(loggingTestCommands); + await waitFor(() => + expect(result.current.slashCommands.length).toBeGreaterThan(0), + ); + await act(async () => { + await result.current.handleSlashCommand('/fail'); + }); + + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'fail', + status: 'error', + subcommand: undefined, + }), + ); }); it('should log a slash command with a subcommand', async () => { @@ -930,8 +1002,13 @@ describe('useSlashCommandProcessor', () => { await result.current.handleSlashCommand('/logwithsub sub'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logwithsub', 'sub'); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logwithsub', + subcommand: 'sub', + }), + ); }); it('should log the command path when an alias is used', async () => { @@ -942,8 +1019,12 @@ describe('useSlashCommandProcessor', () => { await act(async () => { await result.current.handleSlashCommand('/la'); }); - expect(logSlashCommand).toHaveBeenCalledTimes(1); - expect(SlashCommandEvent).toHaveBeenCalledWith('logalias', undefined); + expect(logSlashCommand).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + command: 'logalias', + }), + ); }); it('should not log for unknown commands', async () => { diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.ts index 32f55de2..b8799ec3 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.ts @@ -14,7 +14,8 @@ import { GitService, Logger, logSlashCommand, - SlashCommandEvent, + makeSlashCommandEvent, + SlashCommandStatus, ToolConfirmationOutcome, } from '@google/gemini-cli-core'; import { useSessionStats } from '../contexts/SessionContext.js'; @@ -235,77 +236,71 @@ export const useSlashCommandProcessor = ( oneTimeShellAllowlist?: Set, overwriteConfirmed?: boolean, ): Promise => { + if (typeof rawQuery !== 'string') { + return false; + } + + const trimmed = rawQuery.trim(); + if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { + return false; + } + setIsProcessing(true); - try { - if (typeof rawQuery !== 'string') { - return false; + + const userMessageTimestamp = Date.now(); + addItem({ type: MessageType.USER, text: trimmed }, userMessageTimestamp); + + const parts = trimmed.substring(1).trim().split(/\s+/); + const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] + + let currentCommands = commands; + let commandToExecute: SlashCommand | undefined; + let pathIndex = 0; + let hasError = false; + const canonicalPath: string[] = []; + + for (const part of commandPath) { + // TODO: For better performance and architectural clarity, this two-pass + // search could be replaced. A more optimal approach would be to + // pre-compute a single lookup map in `CommandService.ts` that resolves + // all name and alias conflicts during the initial loading phase. The + // processor would then perform a single, fast lookup on that map. + + // First pass: check for an exact match on the primary command name. + let foundCommand = currentCommands.find((cmd) => cmd.name === part); + + // Second pass: if no primary name matches, check for an alias. + if (!foundCommand) { + foundCommand = currentCommands.find((cmd) => + cmd.altNames?.includes(part), + ); } - const trimmed = rawQuery.trim(); - if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { - return false; - } - - const userMessageTimestamp = Date.now(); - addItem( - { type: MessageType.USER, text: trimmed }, - userMessageTimestamp, - ); - - const parts = trimmed.substring(1).trim().split(/\s+/); - const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] - - let currentCommands = commands; - let commandToExecute: SlashCommand | undefined; - let pathIndex = 0; - const canonicalPath: string[] = []; - - for (const part of commandPath) { - // TODO: For better performance and architectural clarity, this two-pass - // search could be replaced. A more optimal approach would be to - // pre-compute a single lookup map in `CommandService.ts` that resolves - // all name and alias conflicts during the initial loading phase. The - // processor would then perform a single, fast lookup on that map. - - // First pass: check for an exact match on the primary command name. - let foundCommand = currentCommands.find((cmd) => cmd.name === part); - - // Second pass: if no primary name matches, check for an alias. - if (!foundCommand) { - foundCommand = currentCommands.find((cmd) => - cmd.altNames?.includes(part), - ); - } - - if (foundCommand) { - commandToExecute = foundCommand; - canonicalPath.push(foundCommand.name); - pathIndex++; - if (foundCommand.subCommands) { - currentCommands = foundCommand.subCommands; - } else { - break; - } + if (foundCommand) { + commandToExecute = foundCommand; + canonicalPath.push(foundCommand.name); + pathIndex++; + if (foundCommand.subCommands) { + currentCommands = foundCommand.subCommands; } else { break; } + } else { + break; } + } + const resolvedCommandPath = canonicalPath; + const subcommand = + resolvedCommandPath.length > 1 + ? resolvedCommandPath.slice(1).join(' ') + : undefined; + + try { if (commandToExecute) { const args = parts.slice(pathIndex).join(' '); if (commandToExecute.action) { - if (config) { - const resolvedCommandPath = canonicalPath; - const event = new SlashCommandEvent( - resolvedCommandPath[0], - resolvedCommandPath.length > 1 - ? resolvedCommandPath.slice(1).join(' ') - : undefined, - ); - logSlashCommand(config, event); - } - const fullCommandContext: CommandContext = { ...commandContext, invocation: { @@ -327,7 +322,6 @@ export const useSlashCommandProcessor = ( ]), }; } - const result = await commandToExecute.action( fullCommandContext, args, @@ -500,8 +494,18 @@ export const useSlashCommandProcessor = ( content: `Unknown command: ${trimmed}`, timestamp: new Date(), }); + return { type: 'handled' }; - } catch (e) { + } catch (e: unknown) { + hasError = true; + if (config) { + const event = makeSlashCommandEvent({ + command: resolvedCommandPath[0], + subcommand, + status: SlashCommandStatus.ERROR, + }); + logSlashCommand(config, event); + } addItem( { type: MessageType.ERROR, @@ -511,6 +515,14 @@ export const useSlashCommandProcessor = ( ); return { type: 'handled' }; } finally { + if (config && resolvedCommandPath[0] && !hasError) { + const event = makeSlashCommandEvent({ + command: resolvedCommandPath[0], + subcommand, + status: SlashCommandStatus.SUCCESS, + }); + logSlashCommand(config, event); + } setIsProcessing(false); } }, diff --git a/packages/core/index.ts b/packages/core/index.ts index 65a214ae..7b75b365 100644 --- a/packages/core/index.ts +++ b/packages/core/index.ts @@ -15,3 +15,4 @@ export { IdeConnectionEvent, IdeConnectionType, } from './src/telemetry/types.js'; +export { makeFakeConfig } from './src/test-utils/config.js'; diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index b7be2af7..9450f06d 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -639,6 +639,13 @@ export class ClearcutLogger { }); } + if (event.status) { + data.push({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_STATUS, + value: JSON.stringify(event.status), + }); + } + this.enqueueLogEvent(this.createLogEvent(slash_command_event_name, data)); this.flushIfNeeded(); } diff --git a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts index 314e61a8..cb4172ed 100644 --- a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts +++ b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts @@ -174,6 +174,9 @@ export enum EventMetadataKey { // Logs the subcommand of the slash command. GEMINI_CLI_SLASH_COMMAND_SUBCOMMAND = 42, + // Logs the status of the slash command (e.g. 'success', 'error') + GEMINI_CLI_SLASH_COMMAND_STATUS = 51, + // ========================================================================== // Next Speaker Check Event Keys // =========================================================================== diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts index 33781b87..7775802d 100644 --- a/packages/core/src/telemetry/index.ts +++ b/packages/core/src/telemetry/index.ts @@ -39,8 +39,10 @@ export { ApiResponseEvent, TelemetryEvent, FlashFallbackEvent, - SlashCommandEvent, KittySequenceOverflowEvent, + SlashCommandEvent, + makeSlashCommandEvent, + SlashCommandStatus, } from './types.js'; export { SpanStatusCode, ValueType } from '@opentelemetry/api'; export { SemanticAttributes } from '@opentelemetry/semantic-conventions'; diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index 2b10280d..3fd16caf 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -14,9 +14,17 @@ import { ToolCallDecision, } from './tool-call-decision.js'; -export class StartSessionEvent { +interface BaseTelemetryEvent { + 'event.name': string; + /** Current timestamp in ISO 8601 format */ + 'event.timestamp': string; +} + +type CommonFields = keyof BaseTelemetryEvent; + +export class StartSessionEvent implements BaseTelemetryEvent { 'event.name': 'cli_config'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; embedding_model: string; sandbox_enabled: boolean; @@ -60,9 +68,9 @@ export class StartSessionEvent { } } -export class EndSessionEvent { +export class EndSessionEvent implements BaseTelemetryEvent { 'event.name': 'end_session'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; session_id?: string; constructor(config?: Config) { @@ -72,9 +80,9 @@ export class EndSessionEvent { } } -export class UserPromptEvent { +export class UserPromptEvent implements BaseTelemetryEvent { 'event.name': 'user_prompt'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; prompt_length: number; prompt_id: string; auth_type?: string; @@ -95,9 +103,9 @@ export class UserPromptEvent { } } -export class ToolCallEvent { +export class ToolCallEvent implements BaseTelemetryEvent { 'event.name': 'tool_call'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; function_name: string; function_args: Record; duration_ms: number; @@ -142,9 +150,9 @@ export class ToolCallEvent { } } -export class ApiRequestEvent { +export class ApiRequestEvent implements BaseTelemetryEvent { 'event.name': 'api_request'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; prompt_id: string; request_text?: string; @@ -158,9 +166,9 @@ export class ApiRequestEvent { } } -export class ApiErrorEvent { +export class ApiErrorEvent implements BaseTelemetryEvent { 'event.name': 'api_error'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; error: string; error_type?: string; @@ -190,9 +198,9 @@ export class ApiErrorEvent { } } -export class ApiResponseEvent { +export class ApiResponseEvent implements BaseTelemetryEvent { 'event.name': 'api_response'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; status_code?: number | string; duration_ms: number; @@ -234,9 +242,9 @@ export class ApiResponseEvent { } } -export class FlashFallbackEvent { +export class FlashFallbackEvent implements BaseTelemetryEvent { 'event.name': 'flash_fallback'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; auth_type: string; constructor(auth_type: string) { @@ -252,9 +260,9 @@ export enum LoopType { LLM_DETECTED_LOOP = 'llm_detected_loop', } -export class LoopDetectedEvent { +export class LoopDetectedEvent implements BaseTelemetryEvent { 'event.name': 'loop_detected'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; loop_type: LoopType; prompt_id: string; @@ -266,9 +274,9 @@ export class LoopDetectedEvent { } } -export class NextSpeakerCheckEvent { +export class NextSpeakerCheckEvent implements BaseTelemetryEvent { 'event.name': 'next_speaker_check'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; prompt_id: string; finish_reason: string; result: string; @@ -282,23 +290,36 @@ export class NextSpeakerCheckEvent { } } -export class SlashCommandEvent { +export interface SlashCommandEvent extends BaseTelemetryEvent { 'event.name': 'slash_command'; 'event.timestamp': string; // ISO 8106 command: string; subcommand?: string; - - constructor(command: string, subcommand?: string) { - this['event.name'] = 'slash_command'; - this['event.timestamp'] = new Date().toISOString(); - this.command = command; - this.subcommand = subcommand; - } + status?: SlashCommandStatus; } -export class MalformedJsonResponseEvent { +export function makeSlashCommandEvent({ + command, + subcommand, + status, +}: Omit): SlashCommandEvent { + return { + 'event.name': 'slash_command', + 'event.timestamp': new Date().toISOString(), + command, + subcommand, + status, + }; +} + +export enum SlashCommandStatus { + SUCCESS = 'success', + ERROR = 'error', +} + +export class MalformedJsonResponseEvent implements BaseTelemetryEvent { 'event.name': 'malformed_json_response'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; model: string; constructor(model: string) { @@ -315,7 +336,7 @@ export enum IdeConnectionType { export class IdeConnectionEvent { 'event.name': 'ide_connection'; - 'event.timestamp': string; // ISO 8601 + 'event.timestamp': string; connection_type: IdeConnectionType; constructor(connection_type: IdeConnectionType) { @@ -350,7 +371,7 @@ export type TelemetryEvent = | FlashFallbackEvent | LoopDetectedEvent | NextSpeakerCheckEvent - | SlashCommandEvent + | KittySequenceOverflowEvent | MalformedJsonResponseEvent | IdeConnectionEvent - | KittySequenceOverflowEvent; + | SlashCommandEvent; From f9a1e8eb6f35182b65b76aa16a3550c5d142dae8 Mon Sep 17 00:00:00 2001 From: Gaurav <39389231+gsquared94@users.noreply.github.com> Date: Wed, 13 Aug 2025 14:04:58 -0700 Subject: [PATCH 37/45] fix: use server-returned project for gca free tier auth (#6113) --- packages/core/src/code_assist/setup.test.ts | 134 +++++++++++++++++++- packages/core/src/code_assist/setup.ts | 63 ++++++--- 2 files changed, 175 insertions(+), 22 deletions(-) diff --git a/packages/core/src/code_assist/setup.test.ts b/packages/core/src/code_assist/setup.test.ts index c1260e3f..cba051dd 100644 --- a/packages/core/src/code_assist/setup.test.ts +++ b/packages/core/src/code_assist/setup.test.ts @@ -16,9 +16,17 @@ const mockPaidTier: GeminiUserTier = { id: UserTierId.STANDARD, name: 'paid', description: 'Paid tier', + isDefault: true, }; -describe('setupUser', () => { +const mockFreeTier: GeminiUserTier = { + id: UserTierId.FREE, + name: 'free', + description: 'Free tier', + isDefault: true, +}; + +describe('setupUser for existing user', () => { let mockLoad: ReturnType; let mockOnboardUser: ReturnType; @@ -42,7 +50,7 @@ describe('setupUser', () => { ); }); - it('should use GOOGLE_CLOUD_PROJECT when set', async () => { + it('should use GOOGLE_CLOUD_PROJECT when set and project from server is undefined', async () => { process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; mockLoad.mockResolvedValue({ currentTier: mockPaidTier, @@ -57,8 +65,8 @@ describe('setupUser', () => { ); }); - it('should treat empty GOOGLE_CLOUD_PROJECT as undefined and use project from server', async () => { - process.env.GOOGLE_CLOUD_PROJECT = ''; + it('should ignore GOOGLE_CLOUD_PROJECT when project from server is set', async () => { + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; mockLoad.mockResolvedValue({ cloudaicompanionProject: 'server-project', currentTier: mockPaidTier, @@ -66,7 +74,7 @@ describe('setupUser', () => { const projectId = await setupUser({} as OAuth2Client); expect(CodeAssistServer).toHaveBeenCalledWith( {}, - undefined, + 'test-project', {}, '', undefined, @@ -89,3 +97,119 @@ describe('setupUser', () => { ); }); }); + +describe('setupUser for new user', () => { + let mockLoad: ReturnType; + let mockOnboardUser: ReturnType; + + beforeEach(() => { + vi.resetAllMocks(); + mockLoad = vi.fn(); + mockOnboardUser = vi.fn().mockResolvedValue({ + done: true, + response: { + cloudaicompanionProject: { + id: 'server-project', + }, + }, + }); + vi.mocked(CodeAssistServer).mockImplementation( + () => + ({ + loadCodeAssist: mockLoad, + onboardUser: mockOnboardUser, + }) as unknown as CodeAssistServer, + ); + }); + + it('should use GOOGLE_CLOUD_PROJECT when set and onboard a new paid user', async () => { + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + mockLoad.mockResolvedValue({ + allowedTiers: [mockPaidTier], + }); + const userData = await setupUser({} as OAuth2Client); + expect(CodeAssistServer).toHaveBeenCalledWith( + {}, + 'test-project', + {}, + '', + undefined, + ); + expect(mockLoad).toHaveBeenCalled(); + expect(mockOnboardUser).toHaveBeenCalledWith({ + tierId: 'standard-tier', + cloudaicompanionProject: 'test-project', + metadata: { + ideType: 'IDE_UNSPECIFIED', + platform: 'PLATFORM_UNSPECIFIED', + pluginType: 'GEMINI', + duetProject: 'test-project', + }, + }); + expect(userData).toEqual({ + projectId: 'server-project', + userTier: 'standard-tier', + }); + }); + + it('should onboard a new free user when GOOGLE_CLOUD_PROJECT is not set', async () => { + delete process.env.GOOGLE_CLOUD_PROJECT; + mockLoad.mockResolvedValue({ + allowedTiers: [mockFreeTier], + }); + const userData = await setupUser({} as OAuth2Client); + expect(CodeAssistServer).toHaveBeenCalledWith( + {}, + undefined, + {}, + '', + undefined, + ); + expect(mockLoad).toHaveBeenCalled(); + expect(mockOnboardUser).toHaveBeenCalledWith({ + tierId: 'free-tier', + cloudaicompanionProject: undefined, + metadata: { + ideType: 'IDE_UNSPECIFIED', + platform: 'PLATFORM_UNSPECIFIED', + pluginType: 'GEMINI', + }, + }); + expect(userData).toEqual({ + projectId: 'server-project', + userTier: 'free-tier', + }); + }); + + it('should use GOOGLE_CLOUD_PROJECT when onboard response has no project ID', async () => { + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + mockLoad.mockResolvedValue({ + allowedTiers: [mockPaidTier], + }); + mockOnboardUser.mockResolvedValue({ + done: true, + response: { + cloudaicompanionProject: undefined, + }, + }); + const userData = await setupUser({} as OAuth2Client); + expect(userData).toEqual({ + projectId: 'test-project', + userTier: 'standard-tier', + }); + }); + + it('should throw ProjectIdRequiredError when no project ID is available', async () => { + delete process.env.GOOGLE_CLOUD_PROJECT; + mockLoad.mockResolvedValue({ + allowedTiers: [mockPaidTier], + }); + mockOnboardUser.mockResolvedValue({ + done: true, + response: {}, + }); + await expect(setupUser({} as OAuth2Client)).rejects.toThrow( + ProjectIdRequiredError, + ); + }); +}); diff --git a/packages/core/src/code_assist/setup.ts b/packages/core/src/code_assist/setup.ts index 02c9406c..2e460c98 100644 --- a/packages/core/src/code_assist/setup.ts +++ b/packages/core/src/code_assist/setup.ts @@ -33,32 +33,58 @@ export interface UserData { * @returns the user's actual project id */ export async function setupUser(client: OAuth2Client): Promise { - let projectId = process.env.GOOGLE_CLOUD_PROJECT || undefined; + const projectId = process.env.GOOGLE_CLOUD_PROJECT || undefined; const caServer = new CodeAssistServer(client, projectId, {}, '', undefined); - - const clientMetadata: ClientMetadata = { + const coreClientMetadata: ClientMetadata = { ideType: 'IDE_UNSPECIFIED', platform: 'PLATFORM_UNSPECIFIED', pluginType: 'GEMINI', - duetProject: projectId, }; const loadRes = await caServer.loadCodeAssist({ cloudaicompanionProject: projectId, - metadata: clientMetadata, + metadata: { + ...coreClientMetadata, + duetProject: projectId, + }, }); - if (!projectId && loadRes.cloudaicompanionProject) { - projectId = loadRes.cloudaicompanionProject; + if (loadRes.currentTier) { + if (!loadRes.cloudaicompanionProject) { + if (projectId) { + return { + projectId, + userTier: loadRes.currentTier.id, + }; + } + throw new ProjectIdRequiredError(); + } + return { + projectId: loadRes.cloudaicompanionProject, + userTier: loadRes.currentTier.id, + }; } const tier = getOnboardTier(loadRes); - const onboardReq: OnboardUserRequest = { - tierId: tier.id, - cloudaicompanionProject: projectId, - metadata: clientMetadata, - }; + let onboardReq: OnboardUserRequest; + if (tier.id === UserTierId.FREE) { + // The free tier uses a managed google cloud project. Setting a project in the `onboardUser` request causes a `Precondition Failed` error. + onboardReq = { + tierId: tier.id, + cloudaicompanionProject: undefined, + metadata: coreClientMetadata, + }; + } else { + onboardReq = { + tierId: tier.id, + cloudaicompanionProject: projectId, + metadata: { + ...coreClientMetadata, + duetProject: projectId, + }, + }; + } // Poll onboardUser until long running operation is complete. let lroRes = await caServer.onboardUser(onboardReq); @@ -67,20 +93,23 @@ export async function setupUser(client: OAuth2Client): Promise { lroRes = await caServer.onboardUser(onboardReq); } - if (!lroRes.response?.cloudaicompanionProject?.id && !projectId) { + if (!lroRes.response?.cloudaicompanionProject?.id) { + if (projectId) { + return { + projectId, + userTier: tier.id, + }; + } throw new ProjectIdRequiredError(); } return { - projectId: lroRes.response?.cloudaicompanionProject?.id || projectId!, + projectId: lroRes.response.cloudaicompanionProject.id, userTier: tier.id, }; } function getOnboardTier(res: LoadCodeAssistResponse): GeminiUserTier { - if (res.currentTier) { - return res.currentTier; - } for (const tier of res.allowedTiers || []) { if (tier.isDefault) { return tier; From 501b78f3032d8e1d506b5e58d762a30a3593a500 Mon Sep 17 00:00:00 2001 From: Jacob Richman Date: Wed, 13 Aug 2025 14:40:04 -0700 Subject: [PATCH 38/45] Update Ink version (#6175) --- package-lock.json | 8 ++++---- packages/cli/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package-lock.json b/package-lock.json index 92b08b02..a091fdfb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -6684,9 +6684,9 @@ } }, "node_modules/ink": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ink/-/ink-6.0.1.tgz", - "integrity": "sha512-vhhFrCodTHZAPPSdMYzLEbeI0Ug37R9j6yA0kLKok9kSK53lQtj/RJhEQJUjq6OwT4N33nxqSRd/7yXhEhVPIw==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/ink/-/ink-6.1.1.tgz", + "integrity": "sha512-Bqw78FX+1TSIGxs6bdvohgoy6mTfqjFJVNyYzXn8HIyZyVmwLX8XdnhUtUwyaelLCqLz8uuFseCbomRZWjyo5g==", "license": "MIT", "dependencies": { "@alcalzone/ansi-tokenize": "^0.1.3", @@ -12298,7 +12298,7 @@ "dotenv": "^17.1.0", "glob": "^10.4.1", "highlight.js": "^11.11.1", - "ink": "^6.0.1", + "ink": "^6.1.1", "ink-big-text": "^2.0.0", "ink-gradient": "^3.0.0", "ink-link": "^4.1.0", diff --git a/packages/cli/package.json b/packages/cli/package.json index 22a3853e..c30a1603 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -38,7 +38,7 @@ "dotenv": "^17.1.0", "glob": "^10.4.1", "highlight.js": "^11.11.1", - "ink": "^6.0.1", + "ink": "^6.1.1", "ink-big-text": "^2.0.0", "ink-gradient": "^3.0.0", "ink-link": "^4.1.0", From d6f74ea2f0a422c86daea2a06284e497db082a16 Mon Sep 17 00:00:00 2001 From: Richie Foreman Date: Wed, 13 Aug 2025 17:45:53 -0400 Subject: [PATCH 39/45] chore(telemetry): Add various surface detection to `determineSurface` for logging. (#6074) Co-authored-by: christine betts Co-authored-by: Jacob Richman Co-authored-by: matt korwel --- packages/core/src/ide/detect-ide.test.ts | 68 ++++++++++++++++ packages/core/src/ide/detect-ide.ts | 20 ++++- .../clearcut-logger/clearcut-logger.test.ts | 77 ++++++++++++++++++- .../clearcut-logger/clearcut-logger.ts | 13 ++-- 4 files changed, 168 insertions(+), 10 deletions(-) create mode 100644 packages/core/src/ide/detect-ide.test.ts diff --git a/packages/core/src/ide/detect-ide.test.ts b/packages/core/src/ide/detect-ide.test.ts new file mode 100644 index 00000000..85249ad6 --- /dev/null +++ b/packages/core/src/ide/detect-ide.test.ts @@ -0,0 +1,68 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, afterEach, vi } from 'vitest'; +import { detectIde, DetectedIde } from './detect-ide.js'; + +describe('detectIde', () => { + afterEach(() => { + vi.unstubAllEnvs(); + }); + + it.each([ + { + env: {}, + expected: DetectedIde.VSCode, + }, + { + env: { __COG_BASHRC_SOURCED: '1' }, + expected: DetectedIde.Devin, + }, + { + env: { REPLIT_USER: 'test' }, + expected: DetectedIde.Replit, + }, + { + env: { CURSOR_TRACE_ID: 'test' }, + expected: DetectedIde.Cursor, + }, + { + env: { CODESPACES: 'true' }, + expected: DetectedIde.Codespaces, + }, + { + env: { EDITOR_IN_CLOUD_SHELL: 'true' }, + expected: DetectedIde.CloudShell, + }, + { + env: { CLOUD_SHELL: 'true' }, + expected: DetectedIde.CloudShell, + }, + { + env: { TERM_PRODUCT: 'Trae' }, + expected: DetectedIde.Trae, + }, + { + env: { FIREBASE_DEPLOY_AGENT: 'true' }, + expected: DetectedIde.FirebaseStudio, + }, + { + env: { MONOSPACE_ENV: 'true' }, + expected: DetectedIde.FirebaseStudio, + }, + ])('detects the IDE for $expected', ({ env, expected }) => { + vi.stubEnv('TERM_PROGRAM', 'vscode'); + for (const [key, value] of Object.entries(env)) { + vi.stubEnv(key, value); + } + expect(detectIde()).toBe(expected); + }); + + it('returns undefined for non-vscode', () => { + vi.stubEnv('TERM_PROGRAM', 'definitely-not-vscode'); + expect(detectIde()).toBeUndefined(); + }); +}); diff --git a/packages/core/src/ide/detect-ide.ts b/packages/core/src/ide/detect-ide.ts index ef07994c..5cc3cb56 100644 --- a/packages/core/src/ide/detect-ide.ts +++ b/packages/core/src/ide/detect-ide.ts @@ -5,6 +5,8 @@ */ export enum DetectedIde { + Devin = 'devin', + Replit = 'replit', VSCode = 'vscode', Cursor = 'cursor', CloudShell = 'cloudshell', @@ -19,6 +21,14 @@ export interface IdeInfo { export function getIdeInfo(ide: DetectedIde): IdeInfo { switch (ide) { + case DetectedIde.Devin: + return { + displayName: 'Devin', + }; + case DetectedIde.Replit: + return { + displayName: 'Replit', + }; case DetectedIde.VSCode: return { displayName: 'VS Code', @@ -56,19 +66,25 @@ export function detectIde(): DetectedIde | undefined { if (process.env.TERM_PROGRAM !== 'vscode') { return undefined; } + if (process.env.__COG_BASHRC_SOURCED) { + return DetectedIde.Devin; + } + if (process.env.REPLIT_USER) { + return DetectedIde.Replit; + } if (process.env.CURSOR_TRACE_ID) { return DetectedIde.Cursor; } if (process.env.CODESPACES) { return DetectedIde.Codespaces; } - if (process.env.EDITOR_IN_CLOUD_SHELL) { + if (process.env.EDITOR_IN_CLOUD_SHELL || process.env.CLOUD_SHELL) { return DetectedIde.CloudShell; } if (process.env.TERM_PRODUCT === 'Trae') { return DetectedIde.Trae; } - if (process.env.FIREBASE_DEPLOY_AGENT) { + if (process.env.FIREBASE_DEPLOY_AGENT || process.env.MONOSPACE_ENV) { return DetectedIde.FirebaseStudio; } return DetectedIde.VSCode; diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts index 96129ad3..f2ce4d19 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.test.ts @@ -47,7 +47,6 @@ describe('ClearcutLogger', () => { const CLEARCUT_URL = 'https://play.googleapis.com/log'; const MOCK_DATE = new Date('2025-01-02T00:00:00.000Z'); const EXAMPLE_RESPONSE = `["${NEXT_WAIT_MS}",null,[[["ANDROID_BACKUP",0],["BATTERY_STATS",0],["SMART_SETUP",0],["TRON",0]],-3334737594024971225],[]]`; - // A helper to get the internal events array for testing const getEvents = (l: ClearcutLogger): LogEventEntry[][] => l['events'].toArray() as LogEventEntry[][]; @@ -57,6 +56,10 @@ describe('ClearcutLogger', () => { const requeueFailedEvents = (l: ClearcutLogger, events: LogEventEntry[][]) => l['requeueFailedEvents'](events); + afterEach(() => { + vi.unstubAllEnvs(); + }); + function setup({ config = {} as Partial, lifetimeGoogleAccounts = 1, @@ -135,16 +138,84 @@ describe('ClearcutLogger', () => { }); }); - it('logs the current surface', () => { + it('logs the current surface from a github action', () => { const { logger } = setup({}); + vi.stubEnv('GITHUB_SHA', '8675309'); + const event = logger?.createLogEvent('abc', []); expect(event?.event_metadata[0][1]).toEqual({ gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, - value: 'SURFACE_NOT_SET', + value: 'GitHub', }); }); + + it('honors the value from env.SURFACE over all others', () => { + const { logger } = setup({}); + + vi.stubEnv('TERM_PROGRAM', 'vscode'); + vi.stubEnv('SURFACE', 'ide-1234'); + + const event = logger?.createLogEvent('abc', []); + + expect(event?.event_metadata[0][1]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: 'ide-1234', + }); + }); + + it.each([ + { + env: { + CURSOR_TRACE_ID: 'abc123', + GITHUB_SHA: undefined, + }, + expectedValue: 'cursor', + }, + { + env: { + TERM_PROGRAM: 'vscode', + GITHUB_SHA: undefined, + }, + expectedValue: 'vscode', + }, + { + env: { + MONOSPACE_ENV: 'true', + GITHUB_SHA: undefined, + }, + expectedValue: 'firebasestudio', + }, + { + env: { + __COG_BASHRC_SOURCED: 'true', + GITHUB_SHA: undefined, + }, + expectedValue: 'devin', + }, + { + env: { + CLOUD_SHELL: 'true', + GITHUB_SHA: undefined, + }, + expectedValue: 'cloudshell', + }, + ])( + 'logs the current surface for as $expectedValue, preempting vscode detection', + ({ env, expectedValue }) => { + const { logger } = setup({}); + for (const [key, value] of Object.entries(env)) { + vi.stubEnv(key, value); + } + vi.stubEnv('TERM_PROGRAM', 'vscode'); + const event = logger?.createLogEvent('abc', []); + expect(event?.event_metadata[0][1]).toEqual({ + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: expectedValue, + }); + }, + ); }); describe('enqueueLogEvent', () => { diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index 9450f06d..7ccfd440 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -30,6 +30,7 @@ import { } from '../../utils/user_account.js'; import { getInstallationId } from '../../utils/user_id.js'; import { FixedDeque } from 'mnemonist'; +import { DetectedIde, detectIde } from '../../ide/detect-ide.js'; const start_session_event_name = 'start_session'; const new_prompt_event_name = 'new_prompt'; @@ -85,12 +86,14 @@ export interface LogRequest { * methods might have in their runtimes. */ function determineSurface(): string { - if (process.env.CLOUD_SHELL === 'true') { - return 'CLOUD_SHELL'; - } else if (process.env.MONOSPACE_ENV === 'true') { - return 'FIREBASE_STUDIO'; + if (process.env.SURFACE) { + return process.env.SURFACE; + } else if (process.env.GITHUB_SHA) { + return 'GitHub'; + } else if (process.env.TERM_PROGRAM === 'vscode') { + return detectIde() || DetectedIde.VSCode; } else { - return process.env.SURFACE || 'SURFACE_NOT_SET'; + return 'SURFACE_NOT_SET'; } } From 514e883af17cbfe5f1de7c2b92c08a5f5a3a8fad Mon Sep 17 00:00:00 2001 From: Richie Foreman Date: Wed, 13 Aug 2025 18:59:46 -0400 Subject: [PATCH 40/45] chore(gemini.md): Make the checkExhaustive helper section more brief/direct (#6181) --- GEMINI.md | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/GEMINI.md b/GEMINI.md index 6eab6a47..82f69c8a 100644 --- a/GEMINI.md +++ b/GEMINI.md @@ -117,25 +117,12 @@ TypeScript's power lies in its ability to provide static type checking, catching ### Type narrowing `switch` clauses -When authoring a switch clause over an enumeration or fixed list of items, -always prefer to use the `checkExhaustive` helper method within the default -clause of the switch. This will ensure that all of the possible options within -the value or enumeration are used. +Use the `checkExhaustive` helper in the default clause of a switch statement. +This will ensure that all of the possible options within the value or +enumeration are used. This helper method can be found in `packages/cli/src/utils/checks.ts` -Here's an example of using the helper method properly: - -``` -switch (someValue) { - case 1: - case 2: - // ... - default: - return checkExhaustive(someValue); -} -``` - ### Embracing JavaScript's Array Operators To further enhance code cleanliness and promote safe functional programming practices, leverage JavaScript's rich set of array operators as much as possible. Methods like `.map()`, `.filter()`, `.reduce()`, `.slice()`, `.sort()`, and others are incredibly powerful for transforming and manipulating data collections in an immutable and declarative way. From 6d01ba65a2f720b6765ae4328e2b8cf46f725589 Mon Sep 17 00:00:00 2001 From: Allen Hutchison Date: Wed, 13 Aug 2025 17:25:45 -0700 Subject: [PATCH 41/45] refactor: remove modelCheck feature (#6185) --- packages/core/src/core/contentGenerator.ts | 7 +- packages/core/src/core/modelCheck.ts | 76 ---------------------- 2 files changed, 1 insertion(+), 82 deletions(-) delete mode 100644 packages/core/src/core/modelCheck.ts diff --git a/packages/core/src/core/contentGenerator.ts b/packages/core/src/core/contentGenerator.ts index ac716ac3..599a569b 100644 --- a/packages/core/src/core/contentGenerator.ts +++ b/packages/core/src/core/contentGenerator.ts @@ -16,7 +16,7 @@ import { import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js'; import { DEFAULT_GEMINI_MODEL } from '../config/models.js'; import { Config } from '../config/config.js'; -import { getEffectiveModel } from './modelCheck.js'; + import { UserTierId } from '../code_assist/types.js'; import { LoggingContentGenerator } from './loggingContentGenerator.js'; @@ -85,11 +85,6 @@ export function createContentGeneratorConfig( if (authType === AuthType.USE_GEMINI && geminiApiKey) { contentGeneratorConfig.apiKey = geminiApiKey; contentGeneratorConfig.vertexai = false; - getEffectiveModel( - contentGeneratorConfig.apiKey, - contentGeneratorConfig.model, - contentGeneratorConfig.proxy, - ); return contentGeneratorConfig; } diff --git a/packages/core/src/core/modelCheck.ts b/packages/core/src/core/modelCheck.ts deleted file mode 100644 index 25d86993..00000000 --- a/packages/core/src/core/modelCheck.ts +++ /dev/null @@ -1,76 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import { setGlobalDispatcher, ProxyAgent } from 'undici'; -import { - DEFAULT_GEMINI_MODEL, - DEFAULT_GEMINI_FLASH_MODEL, -} from '../config/models.js'; - -/** - * Checks if the default "pro" model is rate-limited and returns a fallback "flash" - * model if necessary. This function is designed to be silent. - * @param apiKey The API key to use for the check. - * @param currentConfiguredModel The model currently configured in settings. - * @returns An object indicating the model to use, whether a switch occurred, - * and the original model if a switch happened. - */ -export async function getEffectiveModel( - apiKey: string, - currentConfiguredModel: string, - proxy?: string, -): Promise { - if (currentConfiguredModel !== DEFAULT_GEMINI_MODEL) { - // Only check if the user is trying to use the specific pro model we want to fallback from. - return currentConfiguredModel; - } - - const modelToTest = DEFAULT_GEMINI_MODEL; - const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL; - const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelToTest}:generateContent`; - const body = JSON.stringify({ - contents: [{ parts: [{ text: 'test' }] }], - generationConfig: { - maxOutputTokens: 1, - temperature: 0, - topK: 1, - thinkingConfig: { thinkingBudget: 128, includeThoughts: false }, - }, - }); - - const controller = new AbortController(); - const timeoutId = setTimeout(() => controller.abort(), 2000); // 500ms timeout for the request - - try { - if (proxy) { - setGlobalDispatcher(new ProxyAgent(proxy)); - } - const response = await fetch(endpoint, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'x-goog-api-key': apiKey, - }, - body, - signal: controller.signal, - }); - - clearTimeout(timeoutId); - - if (response.status === 429) { - console.log( - `[INFO] Your configured model (${modelToTest}) was temporarily unavailable. Switched to ${fallbackModel} for this session.`, - ); - return fallbackModel; - } - // For any other case (success, other error codes), we stick to the original model. - return currentConfiguredModel; - } catch (_error) { - clearTimeout(timeoutId); - // On timeout or any other fetch error, stick to the original model. - return currentConfiguredModel; - } -} From 342820cf5e0c6ee34d675dd5311d2f7147bc0494 Mon Sep 17 00:00:00 2001 From: Jacob Richman Date: Wed, 13 Aug 2025 17:33:01 -0700 Subject: [PATCH 42/45] Fix/emoji support (#6187) Co-authored-by: elasticdotventures --- .../ui/components/shared/text-buffer.test.ts | 40 ++++++++++++++++ .../src/ui/components/shared/text-buffer.ts | 46 ++++++++++++++----- 2 files changed, 75 insertions(+), 11 deletions(-) diff --git a/packages/cli/src/ui/components/shared/text-buffer.test.ts b/packages/cli/src/ui/components/shared/text-buffer.test.ts index fb75179e..b5f2d8c0 100644 --- a/packages/cli/src/ui/components/shared/text-buffer.test.ts +++ b/packages/cli/src/ui/components/shared/text-buffer.test.ts @@ -5,6 +5,7 @@ */ import { describe, it, expect, beforeEach } from 'vitest'; +import stripAnsi from 'strip-ansi'; import { renderHook, act } from '@testing-library/react'; import { useTextBuffer, @@ -1278,6 +1279,45 @@ Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots ); expect(getBufferState(result).text).toBe('Pasted Text'); }); + + it('should not strip popular emojis', () => { + const { result } = renderHook(() => + useTextBuffer({ viewport, isValidPath: () => false }), + ); + const emojis = '🐍🐳🦀🦄'; + act(() => + result.current.handleInput({ + name: '', + ctrl: false, + meta: false, + shift: false, + paste: false, + sequence: emojis, + }), + ); + expect(getBufferState(result).text).toBe(emojis); + }); + }); + + describe('stripAnsi', () => { + it('should correctly strip ANSI escape codes', () => { + const textWithAnsi = '\x1B[31mHello\x1B[0m World'; + expect(stripAnsi(textWithAnsi)).toBe('Hello World'); + }); + + it('should handle multiple ANSI codes', () => { + const textWithMultipleAnsi = '\x1B[1m\x1B[34mBold Blue\x1B[0m Text'; + expect(stripAnsi(textWithMultipleAnsi)).toBe('Bold Blue Text'); + }); + + it('should not modify text without ANSI codes', () => { + const plainText = 'Plain text'; + expect(stripAnsi(plainText)).toBe('Plain text'); + }); + + it('should handle empty string', () => { + expect(stripAnsi('')).toBe(''); + }); }); }); diff --git a/packages/cli/src/ui/components/shared/text-buffer.ts b/packages/cli/src/ui/components/shared/text-buffer.ts index d46e52cc..84bbdc9b 100644 --- a/packages/cli/src/ui/components/shared/text-buffer.ts +++ b/packages/cli/src/ui/components/shared/text-buffer.ts @@ -5,6 +5,7 @@ */ import stripAnsi from 'strip-ansi'; +import { stripVTControlCharacters } from 'util'; import { spawnSync } from 'child_process'; import fs from 'fs'; import os from 'os'; @@ -496,21 +497,44 @@ export const replaceRangeInternal = ( /** * Strip characters that can break terminal rendering. * - * Strip ANSI escape codes and control characters except for line breaks. - * Control characters such as delete break terminal UI rendering. + * Uses Node.js built-in stripVTControlCharacters to handle VT sequences, + * then filters remaining control characters that can disrupt display. + * + * Characters stripped: + * - ANSI escape sequences (via strip-ansi) + * - VT control sequences (via Node.js util.stripVTControlCharacters) + * - C0 control chars (0x00-0x1F) except CR/LF which are handled elsewhere + * - C1 control chars (0x80-0x9F) that can cause display issues + * + * Characters preserved: + * - All printable Unicode including emojis + * - DEL (0x7F) - handled functionally by applyOperations, not a display issue + * - CR/LF (0x0D/0x0A) - needed for line breaks */ function stripUnsafeCharacters(str: string): string { - const stripped = stripAnsi(str); - return toCodePoints(stripped) + const strippedAnsi = stripAnsi(str); + const strippedVT = stripVTControlCharacters(strippedAnsi); + + return toCodePoints(strippedVT) .filter((char) => { - if (char.length > 1) return false; const code = char.codePointAt(0); - if (code === undefined) { - return false; - } - const isUnsafe = - code === 127 || (code <= 31 && code !== 13 && code !== 10); - return !isUnsafe; + if (code === undefined) return false; + + // Preserve CR/LF for line handling + if (code === 0x0a || code === 0x0d) return true; + + // Remove C0 control chars (except CR/LF) that can break display + // Examples: BELL(0x07) makes noise, BS(0x08) moves cursor, VT(0x0B), FF(0x0C) + if (code >= 0x00 && code <= 0x1f) return false; + + // Remove C1 control chars (0x80-0x9F) - legacy 8-bit control codes + if (code >= 0x80 && code <= 0x9f) return false; + + // Preserve DEL (0x7F) - it's handled functionally by applyOperations as backspace + // and doesn't cause rendering issues when displayed + + // Preserve all other characters including Unicode/emojis + return true; }) .join(''); } From c63185dae709ec43e1c0597479e66042799ff0df Mon Sep 17 00:00:00 2001 From: Sandy Tao Date: Wed, 13 Aug 2025 19:12:11 -0700 Subject: [PATCH 43/45] feat(deps): Update @google/genai to 1.13.0 (#6184) --- package-lock.json | 67 +++++++++++++------------------------- packages/cli/package.json | 2 +- packages/core/package.json | 2 +- 3 files changed, 25 insertions(+), 46 deletions(-) diff --git a/package-lock.json b/package-lock.json index a091fdfb..9867d148 100644 --- a/package-lock.json +++ b/package-lock.json @@ -989,6 +989,27 @@ "resolved": "packages/test-utils", "link": true }, + "node_modules/@google/genai": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.13.0.tgz", + "integrity": "sha512-BxilXzE8cJ0zt5/lXk6KwuBcIT9P2Lbi2WXhwWMbxf1RNeC68/8DmYQqMrzQP333CieRMdbDXs0eNCphLoScWg==", + "license": "Apache-2.0", + "dependencies": { + "google-auth-library": "^9.14.2", + "ws": "^8.18.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "@modelcontextprotocol/sdk": "^1.11.0" + }, + "peerDependenciesMeta": { + "@modelcontextprotocol/sdk": { + "optional": true + } + } + }, "node_modules/@grpc/grpc-js": { "version": "1.13.4", "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.4.tgz", @@ -12289,7 +12310,7 @@ "version": "0.1.19", "dependencies": { "@google/gemini-cli-core": "file:../core", - "@google/genai": "1.9.0", + "@google/genai": "1.13.0", "@iarna/toml": "^2.2.5", "@modelcontextprotocol/sdk": "^1.15.1", "@types/update-notifier": "^6.0.8", @@ -12345,27 +12366,6 @@ "node": ">=20" } }, - "packages/cli/node_modules/@google/genai": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.9.0.tgz", - "integrity": "sha512-w9P93OXKPMs9H1mfAx9+p3zJqQGrWBGdvK/SVc7cLZEXNHr/3+vW2eif7ZShA6wU24rNLn9z9MK2vQFUvNRI2Q==", - "license": "Apache-2.0", - "dependencies": { - "google-auth-library": "^9.14.2", - "ws": "^8.18.0" - }, - "engines": { - "node": ">=20.0.0" - }, - "peerDependencies": { - "@modelcontextprotocol/sdk": "^1.11.0" - }, - "peerDependenciesMeta": { - "@modelcontextprotocol/sdk": { - "optional": true - } - } - }, "packages/cli/node_modules/@testing-library/dom": { "version": "10.4.0", "dev": true, @@ -12492,7 +12492,7 @@ "name": "@google/gemini-cli-core", "version": "0.1.19", "dependencies": { - "@google/genai": "1.9.0", + "@google/genai": "1.13.0", "@modelcontextprotocol/sdk": "^1.11.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0", @@ -12539,27 +12539,6 @@ "node": ">=20" } }, - "packages/core/node_modules/@google/genai": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.9.0.tgz", - "integrity": "sha512-w9P93OXKPMs9H1mfAx9+p3zJqQGrWBGdvK/SVc7cLZEXNHr/3+vW2eif7ZShA6wU24rNLn9z9MK2vQFUvNRI2Q==", - "license": "Apache-2.0", - "dependencies": { - "google-auth-library": "^9.14.2", - "ws": "^8.18.0" - }, - "engines": { - "node": ">=20.0.0" - }, - "peerDependencies": { - "@modelcontextprotocol/sdk": "^1.11.0" - }, - "peerDependenciesMeta": { - "@modelcontextprotocol/sdk": { - "optional": true - } - } - }, "packages/core/node_modules/ajv": { "version": "8.17.1", "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", diff --git a/packages/cli/package.json b/packages/cli/package.json index c30a1603..0e087804 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -29,7 +29,7 @@ }, "dependencies": { "@google/gemini-cli-core": "file:../core", - "@google/genai": "1.9.0", + "@google/genai": "1.13.0", "@iarna/toml": "^2.2.5", "@modelcontextprotocol/sdk": "^1.15.1", "@types/update-notifier": "^6.0.8", diff --git a/packages/core/package.json b/packages/core/package.json index e3fb4078..fac517fd 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -20,7 +20,7 @@ "dist" ], "dependencies": { - "@google/genai": "1.9.0", + "@google/genai": "1.13.0", "@modelcontextprotocol/sdk": "^1.11.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0", From 055fe46d21b2fe0604769e8b8e48fb781f10837a Mon Sep 17 00:00:00 2001 From: Gaurav <39389231+gsquared94@users.noreply.github.com> Date: Wed, 13 Aug 2025 21:01:11 -0700 Subject: [PATCH 44/45] docs: update how to use vertex AI auth with ADC (#6193) --- docs/cli/authentication.md | 76 ++++++++++++++++++++++++++------------ 1 file changed, 52 insertions(+), 24 deletions(-) diff --git a/docs/cli/authentication.md b/docs/cli/authentication.md index 564f0da3..9b3fa3ad 100644 --- a/docs/cli/authentication.md +++ b/docs/cli/authentication.md @@ -45,41 +45,69 @@ The Gemini CLI requires you to authenticate with Google's AI services. On initia :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. 3. **Vertex AI:** - - Obtain your Google Cloud API key: [Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser) + - **API Key:** + - Obtain your Google Cloud API key: [Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser) - Set the `GOOGLE_API_KEY` environment variable. In the following methods, replace `YOUR_GOOGLE_API_KEY` with your Vertex AI API key: - - You can temporarily set these environment variables in your current shell session using the following commands: + - You can temporarily set the environment variable in your current shell session using the following command: ```bash export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" ``` - - For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file: + - For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file: + ```bash echo 'export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"' >> ~/.bashrc source ~/.bashrc ``` - - To use Application Default Credentials (ADC), use the following command: - - Ensure you have a Google Cloud project and have enabled the Vertex AI API. - ```bash - gcloud auth application-default login - ``` - For more information, see [Set up Application Default Credentials for Google Cloud](https://cloud.google.com/docs/authentication/provide-credentials-adc). - - Set the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables. In the following methods, replace `YOUR_PROJECT_ID` and `YOUR_PROJECT_LOCATION` with the relevant values for your project: - - You can temporarily set these environment variables in your current shell session using the following commands: - ```bash - export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID" - export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION" # e.g., us-central1 - ``` - - For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) - - - Alternatively you can export the environment variables from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file: - - ```bash - echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc - echo 'export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"' >> ~/.bashrc - source ~/.bashrc - ``` :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. + > **Note:** + > If you encounter an error like `"API keys are not supported by this API - Expected OAuth2 access token or other authentication credentials that assert a principal"`, it is likely that your organization has restricted the creation of service account API keys. In this case, please try the [service account JSON key](#service-account-json-key) method described below. + + - **Application Default Credentials (ADC):** + + > **Note:** + > If you have previously set the `GOOGLE_API_KEY` or `GEMINI_API_KEY` environment variables, you must unset them to use Application Default Credentials. + > + > ```bash + > unset GOOGLE_API_KEY GEMINI_API_KEY + > ``` + - **Using `gcloud` (for local development):** + - Ensure you have a Google Cloud project and have enabled the Vertex AI API. + - Log in with your user credentials: + ```bash + gcloud auth application-default login + ``` + For more information, see [Set up Application Default Credentials for Google Cloud](https://cloud.google.com/docs/authentication/provide-credentials-adc). + - **Using a Service Account (for applications or when service account API keys are restricted):** + - If you are unable to create an API key due to [organization policies](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=existinguser#expandable-2), or if you are running in a non-interactive environment, you can authenticate using a service account key. + - [Create a service account and key](https://cloud.google.com/iam/docs/keys-create-delete), and download the JSON key file. The service account will need to be assigned the "Vertex AI User" role. + - Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to the absolute path of the JSON file. + - You can temporarily set the environment variable in your current shell session: + ```bash + export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/keyfile.json" + ``` + - For repeated use, you can add the command to your shell's configuration file (e.g., `~/.bashrc`). + ```bash + echo 'export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/keyfile.json"' >> ~/.bashrc + source ~/.bashrc + ``` + :warning: Be advised that when you export service account credentials inside your shell configuration file, any other process executed from the shell can read it. + + - **Required Environment Variables for ADC:** + - When using ADC (either with `gcloud` or a service account), you must also set the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables. In the following methods, replace `YOUR_PROJECT_ID` and `YOUR_PROJECT_LOCATION` with the relevant values for your project: + - You can temporarily set these environment variables in your current shell session using the following commands: + ```bash + export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID" + export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION" # e.g., us-central1 + ``` + - For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file: + ```bash + echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc + echo 'export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"' >> ~/.bashrc + source ~/.bashrc + ``` + 4. **Cloud Shell:** - This option is only available when running in a Google Cloud Shell environment. - It automatically uses the credentials of the logged-in user in the Cloud Shell environment. From 258217398285f2f799f31eceb4e4129396cdb27e Mon Sep 17 00:00:00 2001 From: gemini-cli-robot Date: Thu, 14 Aug 2025 05:37:43 +0000 Subject: [PATCH 45/45] chore(release): v0.1.21 --- package-lock.json | 10 +++++----- package.json | 4 ++-- packages/cli/package.json | 4 ++-- packages/core/package.json | 2 +- packages/test-utils/package.json | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/package-lock.json b/package-lock.json index 9867d148..4677fa3a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@google/gemini-cli", - "version": "0.1.19", + "version": "0.1.21", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@google/gemini-cli", - "version": "0.1.19", + "version": "0.1.21", "workspaces": [ "packages/*" ], @@ -12307,7 +12307,7 @@ }, "packages/cli": { "name": "@google/gemini-cli", - "version": "0.1.19", + "version": "0.1.21", "dependencies": { "@google/gemini-cli-core": "file:../core", "@google/genai": "1.13.0", @@ -12490,7 +12490,7 @@ }, "packages/core": { "name": "@google/gemini-cli-core", - "version": "0.1.19", + "version": "0.1.21", "dependencies": { "@google/genai": "1.13.0", "@modelcontextprotocol/sdk": "^1.11.0", @@ -12596,7 +12596,7 @@ }, "packages/test-utils": { "name": "@google/gemini-cli-test-utils", - "version": "0.1.19", + "version": "0.1.21", "license": "Apache-2.0", "devDependencies": { "typescript": "^5.3.3" diff --git a/package.json b/package.json index 8b6d7295..8a40681e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli", - "version": "0.1.19", + "version": "0.1.21", "engines": { "node": ">=20.0.0" }, @@ -14,7 +14,7 @@ "url": "git+https://github.com/google-gemini/gemini-cli.git" }, "config": { - "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.19" + "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.21" }, "scripts": { "start": "node scripts/start.js", diff --git a/packages/cli/package.json b/packages/cli/package.json index 0e087804..a460920e 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli", - "version": "0.1.19", + "version": "0.1.21", "description": "Gemini CLI", "repository": { "type": "git", @@ -25,7 +25,7 @@ "dist" ], "config": { - "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.19" + "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.21" }, "dependencies": { "@google/gemini-cli-core": "file:../core", diff --git a/packages/core/package.json b/packages/core/package.json index fac517fd..6f670f2c 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli-core", - "version": "0.1.19", + "version": "0.1.21", "description": "Gemini CLI Core", "repository": { "type": "git", diff --git a/packages/test-utils/package.json b/packages/test-utils/package.json index cb93c941..51359c5c 100644 --- a/packages/test-utils/package.json +++ b/packages/test-utils/package.json @@ -1,6 +1,6 @@ { "name": "@google/gemini-cli-test-utils", - "version": "0.1.19", + "version": "0.1.21", "private": true, "main": "src/index.ts", "license": "Apache-2.0",