mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-24 10:39:17 +00:00
Compare commits
62 Commits
fix/integr
...
feat/gemin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1dfd880e17 | ||
|
|
10a0c843c1 | ||
|
|
955547d523 | ||
|
|
3bc862df89 | ||
|
|
87d8d82be7 | ||
|
|
fefc138485 | ||
|
|
4e7929850c | ||
|
|
9cc5c3ed8f | ||
|
|
a92be72e88 | ||
|
|
52cd1da4a7 | ||
|
|
c5c556a326 | ||
|
|
a8a863581b | ||
|
|
e4468cfcbc | ||
|
|
3bf30ead67 | ||
|
|
a786f61e49 | ||
|
|
b8a16d362a | ||
|
|
17129024f4 | ||
|
|
fa7d857945 | ||
|
|
90489933fd | ||
|
|
3354b56a05 | ||
|
|
d40447cee4 | ||
|
|
ba87cf63f6 | ||
|
|
00a8c6a924 | ||
|
|
156134d3d4 | ||
|
|
b720209888 | ||
|
|
dfe667c364 | ||
|
|
1386fba278 | ||
|
|
d942250905 | ||
|
|
ec32a24508 | ||
|
|
c2b59038ae | ||
|
|
27bf42b4f5 | ||
|
|
d07ae35c90 | ||
|
|
cb59b5a9dc | ||
|
|
01e62a2120 | ||
|
|
d464f61b72 | ||
|
|
f866f7f071 | ||
|
|
7eabf543b4 | ||
|
|
8106a6b0f4 | ||
|
|
c0839dceac | ||
|
|
12f84fb730 | ||
|
|
f9a1ee2442 | ||
|
|
f824004f99 | ||
|
|
e274b4469a | ||
|
|
a4e3d764d3 | ||
|
|
0a39c91264 | ||
|
|
49b3e0dc92 | ||
|
|
25d9c4f1a7 | ||
|
|
d1a6b3207e | ||
|
|
1c62499977 | ||
|
|
4b8b4e2fe8 | ||
|
|
f9da1b819e | ||
|
|
36fb6b8291 | ||
|
|
f47c762620 | ||
|
|
573c33f68a | ||
|
|
32c085cf7d | ||
|
|
725843f9b3 | ||
|
|
54fd63f04b | ||
|
|
59c3d3d0f9 | ||
|
|
4f2b2d0a3e | ||
|
|
44794121a8 | ||
|
|
84cccfe99a | ||
|
|
b6a3ab11e0 |
12
.github/workflows/release-sdk.yml
vendored
12
.github/workflows/release-sdk.yml
vendored
@@ -121,6 +121,11 @@ jobs:
|
||||
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
|
||||
MANUAL_VERSION: '${{ inputs.version }}'
|
||||
|
||||
- name: 'Build CLI Bundle'
|
||||
run: |
|
||||
npm run build
|
||||
npm run bundle
|
||||
|
||||
- name: 'Run Tests'
|
||||
if: |-
|
||||
${{ github.event.inputs.force_skip_tests != 'true' }}
|
||||
@@ -132,13 +137,6 @@ jobs:
|
||||
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
|
||||
OPENAI_MODEL: '${{ secrets.OPENAI_MODEL }}'
|
||||
|
||||
- name: 'Build CLI for Integration Tests'
|
||||
if: |-
|
||||
${{ github.event.inputs.force_skip_tests != 'true' }}
|
||||
run: |
|
||||
npm run build
|
||||
npm run bundle
|
||||
|
||||
- name: 'Run SDK Integration Tests'
|
||||
if: |-
|
||||
${{ github.event.inputs.force_skip_tests != 'true' }}
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -133,8 +133,8 @@ jobs:
|
||||
${{ github.event.inputs.force_skip_tests != 'true' }}
|
||||
run: |
|
||||
npm run preflight
|
||||
npm run test:integration:sandbox:none
|
||||
npm run test:integration:sandbox:docker
|
||||
npm run test:integration:cli:sandbox:none
|
||||
npm run test:integration:cli:sandbox:docker
|
||||
env:
|
||||
OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}'
|
||||
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
|
||||
|
||||
@@ -627,7 +627,12 @@ The MCP integration tracks several states:
|
||||
|
||||
### Schema Compatibility
|
||||
|
||||
- **Property stripping:** The system automatically removes certain schema properties (`$schema`, `additionalProperties`) for Qwen API compatibility
|
||||
- **Schema compliance mode:** By default (`schemaCompliance: "auto"`), tool schemas are passed through as-is. Set `"model": { "generationConfig": { "schemaCompliance": "openapi_30" } }` in your `settings.json` to convert models to Strict OpenAPI 3.0 format.
|
||||
- **OpenAPI 3.0 transformations:** When `openapi_30` mode is enabled, the system handles:
|
||||
- Nullable types: `["string", "null"]` -> `type: "string", nullable: true`
|
||||
- Const values: `const: "foo"` -> `enum: ["foo"]`
|
||||
- Exclusive limits: numeric `exclusiveMinimum` -> boolean form with `minimum`
|
||||
- Keyword removal: `$schema`, `$id`, `dependencies`, `patternProperties`
|
||||
- **Name sanitization:** Tool names are automatically sanitized to meet API requirements
|
||||
- **Conflict resolution:** Tool name conflicts between servers are resolved through automatic prefixing
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ Learn how to use Qwen Code as an end user. This section covers:
|
||||
- Configuration options
|
||||
- Troubleshooting
|
||||
|
||||
### [Developer Guide](./developers/contributing)
|
||||
### [Developer Guide](./developers/architecture)
|
||||
|
||||
Learn how to contribute to and develop Qwen Code. This section covers:
|
||||
|
||||
|
||||
@@ -189,8 +189,8 @@ Then select "create" and follow the prompts to define:
|
||||
> - Create project-specific subagents in `.qwen/agents/` for team sharing
|
||||
> - Use descriptive `description` fields to enable automatic delegation
|
||||
> - Limit tool access to what each subagent actually needs
|
||||
> - Know more about [Sub Agents](/users/features/sub-agents)
|
||||
> - Know more about [Approval Mode](/users/features/approval-mode)
|
||||
> - Know more about [Sub Agents](./features/sub-agents)
|
||||
> - Know more about [Approval Mode](./features/approval-mode)
|
||||
|
||||
## Work with tests
|
||||
|
||||
@@ -318,7 +318,7 @@ This provides a directory listing with file information.
|
||||
Show me the data from @github: repos/owner/repo/issues
|
||||
```
|
||||
|
||||
This fetches data from connected MCP servers using the format @server: resource. See [MCP](/users/features/mcp) for details.
|
||||
This fetches data from connected MCP servers using the format @server: resource. See [MCP](./features/mcp) for details.
|
||||
|
||||
> [!tip]
|
||||
>
|
||||
|
||||
@@ -6,7 +6,7 @@ Qwen Code includes the ability to automatically ignore files, similar to `.gitig
|
||||
|
||||
## How it works
|
||||
|
||||
When you add a path to your `.qwenignore` file, tools that respect this file will exclude matching files and directories from their operations. For example, when you use the [`read_many_files`](/developers/tools/multi-file) command, any paths in your `.qwenignore` file will be automatically excluded.
|
||||
When you add a path to your `.qwenignore` file, tools that respect this file will exclude matching files and directories from their operations. For example, when you use the [`read_many_files`](../../developers/tools/multi-file) command, any paths in your `.qwenignore` file will be automatically excluded.
|
||||
|
||||
For the most part, `.qwenignore` follows the conventions of `.gitignore` files:
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
> [!tip]
|
||||
>
|
||||
> **Authentication / API keys:** Authentication (Qwen OAuth vs OpenAI-compatible API) and auth-related environment variables (like `OPENAI_API_KEY`) are documented in **[Authentication](/users/configuration/auth)**.
|
||||
> **Authentication / API keys:** Authentication (Qwen OAuth vs OpenAI-compatible API) and auth-related environment variables (like `OPENAI_API_KEY`) are documented in **[Authentication](../configuration/auth)**.
|
||||
|
||||
> [!note]
|
||||
>
|
||||
@@ -42,7 +42,7 @@ Qwen Code uses JSON settings files for persistent configuration. There are four
|
||||
|
||||
In addition to a project settings file, a project's `.qwen` directory can contain other project-specific files related to Qwen Code's operation, such as:
|
||||
|
||||
- [Custom sandbox profiles](/users/features/sandbox) (e.g. `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
|
||||
- [Custom sandbox profiles](../features/sandbox) (e.g. `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
|
||||
|
||||
### Available settings in `settings.json`
|
||||
|
||||
@@ -69,7 +69,7 @@ Settings are organized into categories. All settings should be placed within the
|
||||
|
||||
| Setting | Type | Description | Default |
|
||||
| ---------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| `ui.theme` | string | The color theme for the UI. See [Themes](/users/configuration/themes) for available options. | `undefined` |
|
||||
| `ui.theme` | string | The color theme for the UI. See [Themes](../configuration/themes) for available options. | `undefined` |
|
||||
| `ui.customThemes` | object | Custom theme definitions. | `{}` |
|
||||
| `ui.hideWindowTitle` | boolean | Hide the window title bar. | `false` |
|
||||
| `ui.hideTips` | boolean | Hide helpful tips in the UI. | `false` |
|
||||
@@ -326,7 +326,7 @@ The CLI keeps a history of shell commands you run. To avoid conflicts between di
|
||||
Environment variables are a common way to configure applications, especially for sensitive information (like tokens) or for settings that might change between environments.
|
||||
|
||||
Qwen Code can automatically load environment variables from `.env` files.
|
||||
For authentication-related variables (like `OPENAI_*`) and the recommended `.qwen/.env` approach, see **[Authentication](/users/configuration/auth)**.
|
||||
For authentication-related variables (like `OPENAI_*`) and the recommended `.qwen/.env` approach, see **[Authentication](../configuration/auth)**.
|
||||
|
||||
> [!tip]
|
||||
>
|
||||
@@ -357,38 +357,38 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
|
||||
### Command-Line Arguments Table
|
||||
|
||||
| Argument | Alias | Description | Possible Values | Notes |
|
||||
| ---------------------------- | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--model` | `-m` | Specifies the Qwen model to use for this session. | Model name | Example: `npm start -- --model qwen3-coder-plus` |
|
||||
| `--prompt` | `-p` | Used to pass a prompt directly to the command. This invokes Qwen Code in a non-interactive mode. | Your prompt text | For scripting examples, use the `--output-format json` flag to get structured output. |
|
||||
| `--prompt-interactive` | `-i` | Starts an interactive session with the provided prompt as the initial input. | Your prompt text | The prompt is processed within the interactive session, not before it. Cannot be used when piping input from stdin. Example: `qwen -i "explain this code"` |
|
||||
| `--output-format` | `-o` | Specifies the format of the CLI output for non-interactive mode. | `text`, `json`, `stream-json` | `text`: (Default) The standard human-readable output. `json`: A machine-readable JSON output emitted at the end of execution. `stream-json`: Streaming JSON messages emitted as they occur during execution. For structured output and scripting, use the `--output-format json` or `--output-format stream-json` flag. See [Headless Mode](/users/features/headless) for detailed information. |
|
||||
| `--input-format` | | Specifies the format consumed from standard input. | `text`, `stream-json` | `text`: (Default) Standard text input from stdin or command-line arguments. `stream-json`: JSON message protocol via stdin for bidirectional communication. Requirement: `--input-format stream-json` requires `--output-format stream-json` to be set. When using `stream-json`, stdin is reserved for protocol messages. See [Headless Mode](/users/features/headless) for detailed information. |
|
||||
| `--include-partial-messages` | | Include partial assistant messages when using `stream-json` output format. When enabled, emits stream events (message_start, content_block_delta, etc.) as they occur during streaming. | | Default: `false`. Requirement: Requires `--output-format stream-json` to be set. See [Headless Mode](/users/features/headless) for detailed information about stream events. |
|
||||
| `--sandbox` | `-s` | Enables sandbox mode for this session. | | |
|
||||
| `--sandbox-image` | | Sets the sandbox image URI. | | |
|
||||
| `--debug` | `-d` | Enables debug mode for this session, providing more verbose output. | | |
|
||||
| `--all-files` | `-a` | If set, recursively includes all files within the current directory as context for the prompt. | | |
|
||||
| `--help` | `-h` | Displays help information about command-line arguments. | | |
|
||||
| `--show-memory-usage` | | Displays the current memory usage. | | |
|
||||
| `--yolo` | | Enables YOLO mode, which automatically approves all tool calls. | | |
|
||||
| `--approval-mode` | | Sets the approval mode for tool calls. | `plan`, `default`, `auto-edit`, `yolo` | Supported modes: `plan`: Analyze only—do not modify files or execute commands. `default`: Require approval for file edits or shell commands (default behavior). `auto-edit`: Automatically approve edit tools (edit, write_file) while prompting for others. `yolo`: Automatically approve all tool calls (equivalent to `--yolo`). Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach. Example: `qwen --approval-mode auto-edit`<br>See more about [Approval Mode](/users/features/approval-mode). |
|
||||
| `--allowed-tools` | | A comma-separated list of tool names that will bypass the confirmation dialog. | Tool names | Example: `qwen --allowed-tools "Shell(git status)"` |
|
||||
| `--telemetry` | | Enables [telemetry](/developers/development/telemetry). | | |
|
||||
| `--telemetry-target` | | Sets the telemetry target. | | See [telemetry](/developers/development/telemetry) for more information. |
|
||||
| `--telemetry-otlp-endpoint` | | Sets the OTLP endpoint for telemetry. | | See [telemetry](/developers/development/telemetry) for more information. |
|
||||
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](/developers/development/telemetry) for more information. |
|
||||
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](/developers/development/telemetry) for more information. |
|
||||
| `--checkpointing` | | Enables [checkpointing](/users/features/checkpointing). | | |
|
||||
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
|
||||
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |
|
||||
| `--proxy` | | Sets the proxy for the CLI. | Proxy URL | Example: `--proxy http://localhost:7890`. |
|
||||
| `--include-directories` | | Includes additional directories in the workspace for multi-directory support. | Directory paths | Can be specified multiple times or as comma-separated values. 5 directories can be added at maximum. Example: `--include-directories /path/to/project1,/path/to/project2` or `--include-directories /path/to/project1 --include-directories /path/to/project2` |
|
||||
| `--screen-reader` | | Enables screen reader mode, which adjusts the TUI for better compatibility with screen readers. | | |
|
||||
| `--version` | | Displays the version of the CLI. | | |
|
||||
| `--openai-logging` | | Enables logging of OpenAI API calls for debugging and analysis. | | This flag overrides the `enableOpenAILogging` setting in `settings.json`. |
|
||||
| `--openai-logging-dir` | | Sets a custom directory path for OpenAI API logs. | Directory path | This flag overrides the `openAILoggingDir` setting in `settings.json`. Supports absolute paths, relative paths, and `~` expansion. Example: `qwen --openai-logging-dir "~/qwen-logs" --openai-logging` |
|
||||
| `--tavily-api-key` | | Sets the Tavily API key for web search functionality for this session. | API key | Example: `qwen --tavily-api-key tvly-your-api-key-here` |
|
||||
| Argument | Alias | Description | Possible Values | Notes |
|
||||
| ---------------------------- | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--model` | `-m` | Specifies the Qwen model to use for this session. | Model name | Example: `npm start -- --model qwen3-coder-plus` |
|
||||
| `--prompt` | `-p` | Used to pass a prompt directly to the command. This invokes Qwen Code in a non-interactive mode. | Your prompt text | For scripting examples, use the `--output-format json` flag to get structured output. |
|
||||
| `--prompt-interactive` | `-i` | Starts an interactive session with the provided prompt as the initial input. | Your prompt text | The prompt is processed within the interactive session, not before it. Cannot be used when piping input from stdin. Example: `qwen -i "explain this code"` |
|
||||
| `--output-format` | `-o` | Specifies the format of the CLI output for non-interactive mode. | `text`, `json`, `stream-json` | `text`: (Default) The standard human-readable output. `json`: A machine-readable JSON output emitted at the end of execution. `stream-json`: Streaming JSON messages emitted as they occur during execution. For structured output and scripting, use the `--output-format json` or `--output-format stream-json` flag. See [Headless Mode](../features/headless) for detailed information. |
|
||||
| `--input-format` | | Specifies the format consumed from standard input. | `text`, `stream-json` | `text`: (Default) Standard text input from stdin or command-line arguments. `stream-json`: JSON message protocol via stdin for bidirectional communication. Requirement: `--input-format stream-json` requires `--output-format stream-json` to be set. When using `stream-json`, stdin is reserved for protocol messages. See [Headless Mode](../features/headless) for detailed information. |
|
||||
| `--include-partial-messages` | | Include partial assistant messages when using `stream-json` output format. When enabled, emits stream events (message_start, content_block_delta, etc.) as they occur during streaming. | | Default: `false`. Requirement: Requires `--output-format stream-json` to be set. See [Headless Mode](../features/headless) for detailed information about stream events. |
|
||||
| `--sandbox` | `-s` | Enables sandbox mode for this session. | | |
|
||||
| `--sandbox-image` | | Sets the sandbox image URI. | | |
|
||||
| `--debug` | `-d` | Enables debug mode for this session, providing more verbose output. | | |
|
||||
| `--all-files` | `-a` | If set, recursively includes all files within the current directory as context for the prompt. | | |
|
||||
| `--help` | `-h` | Displays help information about command-line arguments. | | |
|
||||
| `--show-memory-usage` | | Displays the current memory usage. | | |
|
||||
| `--yolo` | | Enables YOLO mode, which automatically approves all tool calls. | | |
|
||||
| `--approval-mode` | | Sets the approval mode for tool calls. | `plan`, `default`, `auto-edit`, `yolo` | Supported modes: `plan`: Analyze only—do not modify files or execute commands. `default`: Require approval for file edits or shell commands (default behavior). `auto-edit`: Automatically approve edit tools (edit, write_file) while prompting for others. `yolo`: Automatically approve all tool calls (equivalent to `--yolo`). Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach. Example: `qwen --approval-mode auto-edit`<br>See more about [Approval Mode](../features/approval-mode). |
|
||||
| `--allowed-tools` | | A comma-separated list of tool names that will bypass the confirmation dialog. | Tool names | Example: `qwen --allowed-tools "Shell(git status)"` |
|
||||
| `--telemetry` | | Enables [telemetry](/developers/development/telemetry). | | |
|
||||
| `--telemetry-target` | | Sets the telemetry target. | | See [telemetry](/developers/development/telemetry) for more information. |
|
||||
| `--telemetry-otlp-endpoint` | | Sets the OTLP endpoint for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--checkpointing` | | Enables [checkpointing](../features/checkpointing). | | |
|
||||
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
|
||||
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |
|
||||
| `--proxy` | | Sets the proxy for the CLI. | Proxy URL | Example: `--proxy http://localhost:7890`. |
|
||||
| `--include-directories` | | Includes additional directories in the workspace for multi-directory support. | Directory paths | Can be specified multiple times or as comma-separated values. 5 directories can be added at maximum. Example: `--include-directories /path/to/project1,/path/to/project2` or `--include-directories /path/to/project1 --include-directories /path/to/project2` |
|
||||
| `--screen-reader` | | Enables screen reader mode, which adjusts the TUI for better compatibility with screen readers. | | |
|
||||
| `--version` | | Displays the version of the CLI. | | |
|
||||
| `--openai-logging` | | Enables logging of OpenAI API calls for debugging and analysis. | | This flag overrides the `enableOpenAILogging` setting in `settings.json`. |
|
||||
| `--openai-logging-dir` | | Sets a custom directory path for OpenAI API logs. | Directory path | This flag overrides the `openAILoggingDir` setting in `settings.json`. Supports absolute paths, relative paths, and `~` expansion. Example: `qwen --openai-logging-dir "~/qwen-logs" --openai-logging` |
|
||||
| `--tavily-api-key` | | Sets the Tavily API key for web search functionality for this session. | API key | Example: `qwen --tavily-api-key tvly-your-api-key-here` |
|
||||
|
||||
## Context Files (Hierarchical Instructional Context)
|
||||
|
||||
@@ -438,11 +438,11 @@ This example demonstrates how you can provide general project context, specific
|
||||
- Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.). The breadth of this search is limited to 200 directories by default, but can be configured with the `context.discoveryMaxDirs` setting in your `settings.json` file.
|
||||
- Scope: Allows for highly specific instructions relevant to a particular component, module, or subsection of your project.
|
||||
- **Concatenation & UI Indication:** The contents of all found context files are concatenated (with separators indicating their origin and path) and provided as part of the system prompt. The CLI footer displays the count of loaded context files, giving you a quick visual cue about the active instructional context.
|
||||
- **Importing Content:** You can modularize your context files by importing other Markdown files using the `@path/to/file.md` syntax. For more details, see the [Memory Import Processor documentation](/users/configuration/memory).
|
||||
- **Importing Content:** You can modularize your context files by importing other Markdown files using the `@path/to/file.md` syntax. For more details, see the [Memory Import Processor documentation](../configuration/memory).
|
||||
- **Commands for Memory Management:**
|
||||
- Use `/memory refresh` to force a re-scan and reload of all context files from all configured locations. This updates the AI's instructional context.
|
||||
- Use `/memory show` to display the combined instructional context currently loaded, allowing you to verify the hierarchy and content being used by the AI.
|
||||
- See the [Commands documentation](/users/reference/cli-reference) for full details on the `/memory` command and its sub-commands (`show` and `refresh`).
|
||||
- See the [Commands documentation](../features/commands) for full details on the `/memory` command and its sub-commands (`show` and `refresh`).
|
||||
|
||||
By understanding and utilizing these configuration layers and the hierarchical nature of context files, you can effectively manage the AI's memory and tailor Qwen Code's responses to your specific needs and projects.
|
||||
|
||||
@@ -450,7 +450,7 @@ By understanding and utilizing these configuration layers and the hierarchical n
|
||||
|
||||
Qwen Code can execute potentially unsafe operations (like shell commands and file modifications) within a sandboxed environment to protect your system.
|
||||
|
||||
[Sandbox](/users/features/sandbox) is disabled by default, but you can enable it in a few ways:
|
||||
[Sandbox](../features/sandbox) is disabled by default, but you can enable it in a few ways:
|
||||
|
||||
- Using `--sandbox` or `-s` flag.
|
||||
- Setting `GEMINI_SANDBOX` environment variable.
|
||||
|
||||
@@ -32,7 +32,7 @@ Qwen Code comes with a selection of pre-defined themes, which you can list using
|
||||
|
||||
### Theme Persistence
|
||||
|
||||
Selected themes are saved in Qwen Code's [configuration](./configuration.md) so your preference is remembered across sessions.
|
||||
Selected themes are saved in Qwen Code's [configuration](../configuration/settings) so your preference is remembered across sessions.
|
||||
|
||||
---
|
||||
|
||||
@@ -146,7 +146,7 @@ The theme file must be a valid JSON file that follows the same structure as a cu
|
||||
|
||||
- Select your custom theme using the `/theme` command in Qwen Code. Your custom theme will appear in the theme selection dialog.
|
||||
- Or, set it as the default by adding `"theme": "MyCustomTheme"` to the `ui` object in your `settings.json`.
|
||||
- Custom themes can be set at the user, project, or system level, and follow the same [configuration precedence](./configuration.md) as other settings.
|
||||
- Custom themes can be set at the user, project, or system level, and follow the same [configuration precedence](../configuration/settings) as other settings.
|
||||
|
||||
## Themes Preview
|
||||
|
||||
|
||||
@@ -56,6 +56,6 @@ If you need to change a decision or see all your settings, you have a couple of
|
||||
|
||||
For advanced users, it's helpful to know the exact order of operations for how trust is determined:
|
||||
|
||||
1. **IDE Trust Signal**: If you are using the [IDE Integration](/users/ide-integration/ide-integration), the CLI first asks the IDE if the workspace is trusted. The IDE's response takes highest priority.
|
||||
1. **IDE Trust Signal**: If you are using the [IDE Integration](../ide-integration/ide-integration), the CLI first asks the IDE if the workspace is trusted. The IDE's response takes highest priority.
|
||||
|
||||
2. **Local Trust File**: If the IDE is not connected, the CLI checks the central `~/.qwen/trustedFolders.json` file.
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# Approval Mode
|
||||
|
||||
Qwen Code offers three distinct permission modes that allow you to flexibly control how AI interacts with your code and system based on task complexity and risk level.
|
||||
|
||||
## Permission Modes Comparison
|
||||
|
||||
@@ -203,7 +203,7 @@ Key command-line options for headless usage:
|
||||
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
|
||||
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
|
||||
|
||||
For complete details on all available configuration options, settings files, and environment variables, see the [Configuration Guide](/users/configuration/settings).
|
||||
For complete details on all available configuration options, settings files, and environment variables, see the [Configuration Guide](../configuration/settings).
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -276,7 +276,7 @@ tail -5 usage.log
|
||||
|
||||
## Resources
|
||||
|
||||
- [CLI Configuration](/users/configuration/settings#command-line-arguments) - Complete configuration guide
|
||||
- [Authentication](/users/configuration/settings#environment-variables-for-api-access) - Setup authentication
|
||||
- [Commands](/users/reference/cli-reference) - Interactive commands reference
|
||||
- [Tutorials](/users/quickstart) - Step-by-step automation guides
|
||||
- [CLI Configuration](../configuration/settings#command-line-arguments) - Complete configuration guide
|
||||
- [Authentication](../configuration/settings#environment-variables-for-api-access) - Setup authentication
|
||||
- [Commands](../features/commands) - Interactive commands reference
|
||||
- [Tutorials](../quickstart) - Step-by-step automation guides
|
||||
|
||||
@@ -12,6 +12,7 @@ With MCP servers connected, you can ask Qwen Code to:
|
||||
- Automate workflows (repeatable tasks exposed as tools/prompts)
|
||||
|
||||
> [!tip]
|
||||
>
|
||||
> If you’re looking for the “one command to get started”, jump to [Quick start](#quick-start).
|
||||
|
||||
## Quick start
|
||||
@@ -51,7 +52,8 @@ qwen mcp add --scope user --transport http my-server http://localhost:3000/mcp
|
||||
```
|
||||
|
||||
> [!tip]
|
||||
> For advanced configuration layers (system defaults/system settings and precedence rules), see [Settings](/users/configuration/settings).
|
||||
>
|
||||
> For advanced configuration layers (system defaults/system settings and precedence rules), see [Settings](../configuration/settings).
|
||||
|
||||
## Configure servers
|
||||
|
||||
@@ -64,6 +66,7 @@ qwen mcp add --scope user --transport http my-server http://localhost:3000/mcp
|
||||
| `stdio` | Local process (scripts, CLIs, Docker) on your machine | `command`, `args` (+ optional `cwd`, `env`) |
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> If a server supports both, prefer **HTTP** over **SSE**.
|
||||
|
||||
### Configure via `settings.json` vs `qwen mcp add`
|
||||
|
||||
@@ -220,6 +220,6 @@ qwen -s -p "run shell command: mount | grep workspace"
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Configuration](/users/configuration/settings): Full configuration options.
|
||||
- [Commands](/users/reference/cli-reference): Available commands.
|
||||
- [Troubleshooting](/users/support/troubleshooting): General troubleshooting.
|
||||
- [Configuration](../configuration/settings): Full configuration options.
|
||||
- [Commands](../features/commands): Available commands.
|
||||
- [Troubleshooting](../support/troubleshooting): General troubleshooting.
|
||||
|
||||
@@ -16,16 +16,15 @@ The plugin **MUST** run a local HTTP server that implements the **Model Context
|
||||
- **Endpoint:** The server should expose a single endpoint (e.g., `/mcp`) for all MCP communication.
|
||||
- **Port:** The server **MUST** listen on a dynamically assigned port (i.e., listen on port `0`).
|
||||
|
||||
### 2. Discovery Mechanism: The Port File
|
||||
### 2. Discovery Mechanism: The Lock File
|
||||
|
||||
For Qwen Code to connect, it needs to discover which IDE instance it's running in and what port your server is using. The plugin **MUST** facilitate this by creating a "discovery file."
|
||||
For Qwen Code to connect, it needs to discover what port your server is using. The plugin **MUST** facilitate this by creating a "lock file" and setting the port environment variable.
|
||||
|
||||
- **How the CLI Finds the File:** The CLI determines the Process ID (PID) of the IDE it's running in by traversing the process tree. It then looks for a discovery file that contains this PID in its name.
|
||||
- **File Location:** The file must be created in a specific directory: `os.tmpdir()/qwen/ide/`. Your plugin must create this directory if it doesn't exist.
|
||||
- **How the CLI Finds the File:** The CLI reads the port from `QWEN_CODE_IDE_SERVER_PORT`, then reads `~/.qwen/ide/<PORT>.lock`. (Legacy fallbacks exist for older extensions; see note below.)
|
||||
- **File Location:** The file must be created in a specific directory: `~/.qwen/ide/`. Your plugin must create this directory if it doesn't exist.
|
||||
- **File Naming Convention:** The filename is critical and **MUST** follow the pattern:
|
||||
`qwen-code-ide-server-${PID}-${PORT}.json`
|
||||
- `${PID}`: The process ID of the parent IDE process. Your plugin must determine this PID and include it in the filename.
|
||||
- `${PORT}`: The port your MCP server is listening on.
|
||||
`<PORT>.lock`
|
||||
- `<PORT>`: The port your MCP server is listening on.
|
||||
- **File Content & Workspace Validation:** The file **MUST** contain a JSON object with the following structure:
|
||||
|
||||
```json
|
||||
@@ -33,21 +32,20 @@ For Qwen Code to connect, it needs to discover which IDE instance it's running i
|
||||
"port": 12345,
|
||||
"workspacePath": "/path/to/project1:/path/to/project2",
|
||||
"authToken": "a-very-secret-token",
|
||||
"ideInfo": {
|
||||
"name": "vscode",
|
||||
"displayName": "VS Code"
|
||||
}
|
||||
"ppid": 1234,
|
||||
"ideName": "VS Code"
|
||||
}
|
||||
```
|
||||
- `port` (number, required): The port of the MCP server.
|
||||
- `workspacePath` (string, required): A list of all open workspace root paths, delimited by the OS-specific path separator (`:` for Linux/macOS, `;` for Windows). The CLI uses this path to ensure it's running in the same project folder that's open in the IDE. If the CLI's current working directory is not a sub-directory of `workspacePath`, the connection will be rejected. Your plugin **MUST** provide the correct, absolute path(s) to the root of the open workspace(s).
|
||||
- `authToken` (string, required): A secret token for securing the connection. The CLI will include this token in an `Authorization: Bearer <token>` header on all requests.
|
||||
- `ideInfo` (object, required): Information about the IDE.
|
||||
- `name` (string, required): A short, lowercase identifier for the IDE (e.g., `vscode`, `jetbrains`).
|
||||
- `displayName` (string, required): A user-friendly name for the IDE (e.g., `VS Code`, `JetBrains IDE`).
|
||||
- `ppid` (number, required): The parent process ID of the IDE process.
|
||||
- `ideName` (string, required): A user-friendly name for the IDE (e.g., `VS Code`, `JetBrains IDE`).
|
||||
|
||||
- **Authentication:** To secure the connection, the plugin **MUST** generate a unique, secret token and include it in the discovery file. The CLI will then include this token in the `Authorization` header for all requests to the MCP server (e.g., `Authorization: Bearer a-very-secret-token`). Your server **MUST** validate this token on every request and reject any that are unauthorized.
|
||||
- **Tie-Breaking with Environment Variables (Recommended):** For the most reliable experience, your plugin **SHOULD** both create the discovery file and set the `QWEN_CODE_IDE_SERVER_PORT` environment variable in the integrated terminal. The file serves as the primary discovery mechanism, but the environment variable is crucial for tie-breaking. If a user has multiple IDE windows open for the same workspace, the CLI uses the `QWEN_CODE_IDE_SERVER_PORT` variable to identify and connect to the correct window's server.
|
||||
- **Environment Variables (Required):** Your plugin **MUST** set `QWEN_CODE_IDE_SERVER_PORT` in the integrated terminal so the CLI can locate the correct `<PORT>.lock` file.
|
||||
|
||||
**Legacy note:** For extensions older than v0.5.1, Qwen Code may fall back to reading JSON files in the system temp directory named `qwen-code-ide-server-<PID>.json` or `qwen-code-ide-server-<PORT>.json`. New integrations should not rely on these legacy files.
|
||||
|
||||
## II. The Context Interface
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Qwen Code can integrate with your IDE to provide a more seamless and context-aware experience. This integration allows the CLI to understand your workspace better and enables powerful features like native in-editor diffing.
|
||||
|
||||
Currently, the only supported IDE is [Visual Studio Code](https://code.visualstudio.com/) and other editors that support VS Code extensions. To build support for other editors, see the [IDE Companion Extension Spec](/users/ide-integration/ide-companion-spec).
|
||||
Currently, the only supported IDE is [Visual Studio Code](https://code.visualstudio.com/) and other editors that support VS Code extensions. To build support for other editors, see the [IDE Companion Extension Spec](../ide-integration/ide-companion-spec).
|
||||
|
||||
## Features
|
||||
|
||||
|
||||
@@ -6,41 +6,14 @@
|
||||
|
||||
Use it to perform GitHub pull request reviews, triage issues, perform code analysis and modification, and more using [Qwen Code] conversationally (e.g., `@qwencoder fix this issue`) directly inside your GitHub repositories.
|
||||
|
||||
- [qwen-code-action](#qwen-code-action)
|
||||
- [Overview](#overview)
|
||||
- [Features](#features)
|
||||
- [Quick Start](#quick-start)
|
||||
- [1. Get a Qwen API Key](#1-get-a-qwen-api-key)
|
||||
- [2. Add it as a GitHub Secret](#2-add-it-as-a-github-secret)
|
||||
- [3. Update your .gitignore](#3-update-your-gitignore)
|
||||
- [4. Choose a Workflow](#4-choose-a-workflow)
|
||||
- [5. Try it out](#5-try-it-out)
|
||||
- [Workflows](#workflows)
|
||||
- [Qwen Code Dispatch](#qwen-code-dispatch)
|
||||
- [Issue Triage](#issue-triage)
|
||||
- [Pull Request Review](#pull-request-review)
|
||||
- [Qwen Code CLI Assistant](#qwen-code-cli-assistant)
|
||||
- [Configuration](#configuration)
|
||||
- [Inputs](#inputs)
|
||||
- [Outputs](#outputs)
|
||||
- [Repository Variables](#repository-variables)
|
||||
- [Secrets](#secrets)
|
||||
- [Authentication](#authentication)
|
||||
- [GitHub Authentication](#github-authentication)
|
||||
- [Extensions](#extensions)
|
||||
- [Best Practices](#best-practices)
|
||||
- [Customization](#customization)
|
||||
- [Contributing](#contributing)
|
||||
|
||||
## Features
|
||||
|
||||
- **Automation**: Trigger workflows based on events (e.g. issue opening) or schedules (e.g. nightly).
|
||||
- **On-demand Collaboration**: Trigger workflows in issue and pull request
|
||||
comments by mentioning the [Qwen Code CLI] (e.g., `@qwencoder /review`).
|
||||
- **Extensible with Tools**: Leverage [Qwen Code] models' tool-calling capabilities to
|
||||
interact with other CLIs like the [GitHub CLI] (`gh`).
|
||||
comments by mentioning the [Qwen Code CLI](./features/commands) (e.g., `@qwencoder /review`).
|
||||
- **Extensible with Tools**: Leverage [Qwen Code](../developers/tools/introduction.md) models' tool-calling capabilities to interact with other CLIs like the [GitHub CLI] (`gh`).
|
||||
- **Customizable**: Use a `QWEN.md` file in your repository to provide
|
||||
project-specific instructions and context to [Qwen Code CLI].
|
||||
project-specific instructions and context to [Qwen Code CLI](./features/commands).
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -48,7 +21,7 @@ Get started with Qwen Code CLI in your repository in just a few minutes:
|
||||
|
||||
### 1. Get a Qwen API Key
|
||||
|
||||
Obtain your API key from [DashScope] (Alibaba Cloud's AI platform)
|
||||
Obtain your API key from [DashScope](https://help.aliyun.com/zh/model-studio/qwen-code) (Alibaba Cloud's AI platform)
|
||||
|
||||
### 2. Add it as a GitHub Secret
|
||||
|
||||
@@ -90,7 +63,7 @@ You have two options to set up a workflow:
|
||||
|
||||
**Option B: Manually copy workflows**
|
||||
|
||||
1. Copy the pre-built workflows from the [`examples/workflows`](./examples/workflows) directory to your repository's `.github/workflows` directory. Note: the `qwen-dispatch.yml` workflow must also be copied, which triggers the workflows to run.
|
||||
1. Copy the pre-built workflows from the [`examples/workflows`](./common-workflow) directory to your repository's `.github/workflows` directory. Note: the `qwen-dispatch.yml` workflow must also be copied, which triggers the workflows to run.
|
||||
|
||||
### 5. Try it out
|
||||
|
||||
@@ -119,30 +92,19 @@ This action provides several pre-built workflows for different use cases. Each w
|
||||
|
||||
### Qwen Code Dispatch
|
||||
|
||||
This workflow acts as a central dispatcher for Qwen Code CLI, routing requests to
|
||||
the appropriate workflow based on the triggering event and the command provided
|
||||
in the comment. For a detailed guide on how to set up the dispatch workflow, go
|
||||
to the
|
||||
[Qwen Code Dispatch workflow documentation](./examples/workflows/qwen-dispatch).
|
||||
This workflow acts as a central dispatcher for Qwen Code CLI, routing requests to the appropriate workflow based on the triggering event and the command provided in the comment. For a detailed guide on how to set up the dispatch workflow, go to the [Qwen Code Dispatch workflow documentation](./common-workflow).
|
||||
|
||||
### Issue Triage
|
||||
|
||||
This action can be used to triage GitHub Issues automatically or on a schedule.
|
||||
For a detailed guide on how to set up the issue triage system, go to the
|
||||
[GitHub Issue Triage workflow documentation](./examples/workflows/issue-triage).
|
||||
This action can be used to triage GitHub Issues automatically or on a schedule. For a detailed guide on how to set up the issue triage system, go to the [GitHub Issue Triage workflow documentation](./examples/workflows/issue-triage).
|
||||
|
||||
### Pull Request Review
|
||||
|
||||
This action can be used to automatically review pull requests when they are
|
||||
opened. For a detailed guide on how to set up the pull request review system,
|
||||
go to the [GitHub PR Review workflow documentation](./examples/workflows/pr-review).
|
||||
This action can be used to automatically review pull requests when they are opened. For a detailed guide on how to set up the pull request review system, go to the [GitHub PR Review workflow documentation](./common-workflow).
|
||||
|
||||
### Qwen Code CLI Assistant
|
||||
|
||||
This type of action can be used to invoke a general-purpose, conversational Qwen Code
|
||||
AI assistant within the pull requests and issues to perform a wide range of
|
||||
tasks. For a detailed guide on how to set up the general-purpose Qwen Code CLI workflow,
|
||||
go to the [Qwen Code Assistant workflow documentation](./examples/workflows/qwen-assistant).
|
||||
This type of action can be used to invoke a general-purpose, conversational Qwen Code AI assistant within the pull requests and issues to perform a wide range of tasks. For a detailed guide on how to set up the general-purpose Qwen Code CLI workflow, go to the [Qwen Code Assistant workflow documentation](./common-workflow).
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -222,8 +184,7 @@ To add a secret:
|
||||
2. Enter the secret name and value.
|
||||
3. Save.
|
||||
|
||||
For more information, refer to the
|
||||
[official GitHub documentation on creating and using encrypted secrets][secrets].
|
||||
For more information, refer to the [official GitHub documentation on creating and using encrypted secrets][secrets].
|
||||
|
||||
## Authentication
|
||||
|
||||
@@ -239,7 +200,7 @@ You can authenticate with GitHub in two ways:
|
||||
authentication, we recommend creating a custom GitHub App.
|
||||
|
||||
For detailed setup instructions for both Qwen and GitHub authentication, go to the
|
||||
[**Authentication documentation**](./docs/authentication.md).
|
||||
[**Authentication documentation**](./configuration/auth).
|
||||
|
||||
## Extensions
|
||||
|
||||
@@ -247,7 +208,7 @@ The Qwen Code CLI can be extended with additional functionality through extensio
|
||||
These extensions are installed from source from their GitHub repositories.
|
||||
|
||||
For detailed instructions on how to set up and configure extensions, go to the
|
||||
[Extensions documentation](./docs/extensions.md).
|
||||
[Extensions documentation](../developers/extensions/extension).
|
||||
|
||||
## Best Practices
|
||||
|
||||
@@ -258,20 +219,18 @@ Key recommendations include:
|
||||
- **Securing Your Repository:** Implementing branch and tag protection, and restricting pull request approvers.
|
||||
- **Monitoring and Auditing:** Regularly reviewing action logs and enabling OpenTelemetry for deeper insights into performance and behavior.
|
||||
|
||||
For a comprehensive guide on securing your repository and workflows, please refer to our [**Best Practices documentation**](./docs/best-practices.md).
|
||||
For a comprehensive guide on securing your repository and workflows, please refer to our [**Best Practices documentation**](./common-workflow).
|
||||
|
||||
## Customization
|
||||
|
||||
Create a [QWEN.md] file in the root of your repository to provide
|
||||
project-specific context and instructions to [Qwen Code CLI]. This is useful for defining
|
||||
Create a QWEN.md file in the root of your repository to provide
|
||||
project-specific context and instructions to [Qwen Code CLI](./common-workflow). This is useful for defining
|
||||
coding conventions, architectural patterns, or other guidelines the model should
|
||||
follow for a given repository.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! Check out the Qwen Code CLI
|
||||
[**Contributing Guide**](./CONTRIBUTING.md) for more details on how to get
|
||||
started.
|
||||
Contributions are welcome! Check out the Qwen Code CLI **Contributing Guide** for more details on how to get started.
|
||||
|
||||
[secrets]: https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions
|
||||
[Qwen Code]: https://github.com/QwenLM/qwen-code
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
<br/>
|
||||
|
||||
<video src="https://cloud.video.taobao.com/vod/JnvYMhUia2EKFAaiuErqNpzWE9mz3odG76vArAHNg94.mp4" controls width="800">
|
||||
<video src="https://cloud.video.taobao.com/vod/IKKwfM-kqNI3OJjM_U8uMCSMAoeEcJhs6VNCQmZxUfk.mp4" controls width="800">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
### Features
|
||||
|
||||
- **Native agent experience**: Integrated AI assistant panel within Zed's interface
|
||||
- **Agent Control Protocol**: Full support for ACP enabling advanced IDE interactions
|
||||
- **Agent Client Protocol**: Full support for ACP enabling advanced IDE interactions
|
||||
- **File management**: @-mention files to add them to the conversation context
|
||||
- **Conversation history**: Access to past conversations within Zed
|
||||
|
||||
|
||||
@@ -36,13 +36,13 @@ Select **Qwen OAuth (Free)** authentication and follow the prompts to log in. Th
|
||||
what does this project do?
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
You'll be prompted to log in on first use. That's it! [Continue with Quickstart (5 mins) →](/users/quickstart)
|
||||
You'll be prompted to log in on first use. That's it! [Continue with Quickstart (5 mins) →](./quickstart)
|
||||
|
||||
> [!tip]
|
||||
>
|
||||
> See [troubleshooting](/users/support/troubleshooting) if you hit issues.
|
||||
> See [troubleshooting](./support/troubleshooting) if you hit issues.
|
||||
|
||||
> [!note]
|
||||
>
|
||||
@@ -52,11 +52,11 @@ You'll be prompted to log in on first use. That's it! [Continue with Quickstart
|
||||
|
||||
- **Build features from descriptions**: Tell Qwen Code what you want to build in plain language. It will make a plan, write the code, and ensure it works.
|
||||
- **Debug and fix issues**: Describe a bug or paste an error message. Qwen Code will analyze your codebase, identify the problem, and implement a fix.
|
||||
- **Navigate any codebase**: Ask anything about your team's codebase, and get a thoughtful answer back. Qwen Code maintains awareness of your entire project structure, can find up-to-date information from the web, and with [MCP](/users/features/mcp) can pull from external datasources like Google Drive, Figma, and Slack.
|
||||
- **Navigate any codebase**: Ask anything about your team's codebase, and get a thoughtful answer back. Qwen Code maintains awareness of your entire project structure, can find up-to-date information from the web, and with [MCP](./features/mcp) can pull from external datasources like Google Drive, Figma, and Slack.
|
||||
- **Automate tedious tasks**: Fix fiddly lint issues, resolve merge conflicts, and write release notes. Do all this in a single command from your developer machines, or automatically in CI.
|
||||
|
||||
## Why developers love Qwen Code
|
||||
|
||||
- **Works in your terminal**: Not another chat window. Not another IDE. Qwen Code meets you where you already work, with the tools you already love.
|
||||
- **Takes action**: Qwen Code can directly edit files, run commands, and create commits. Need more? [MCP](/users/features/mcp) lets Qwen Code read your design docs in Google Drive, update your tickets in Jira, or use _your_ custom developer tooling.
|
||||
- **Takes action**: Qwen Code can directly edit files, run commands, and create commits. Need more? [MCP](./features/mcp) lets Qwen Code read your design docs in Google Drive, update your tickets in Jira, or use _your_ custom developer tooling.
|
||||
- **Unix philosophy**: Qwen Code is composable and scriptable. `tail -f app.log | qwen -p "Slack me if you see any anomalies appear in this log stream"` _works_. Your CI can run `qwen -p "If there are new text strings, translate them into French and raise a PR for @lang-fr-team to review"`.
|
||||
|
||||
@@ -206,7 +206,7 @@ Here are the most important commands for daily use:
|
||||
| → `output [language]` | Set LLM output language | `/language output Chinese` |
|
||||
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
|
||||
|
||||
See the [CLI reference](/users/reference/cli-reference) for a complete list of commands.
|
||||
See the [CLI reference](./features/commands) for a complete list of commands.
|
||||
|
||||
## Pro tips for beginners
|
||||
|
||||
@@ -225,9 +225,9 @@ See the [CLI reference](/users/reference/cli-reference) for a complete list of c
|
||||
3. build a webpage that allows users to see and edit their information
|
||||
```
|
||||
|
||||
**Let Claude explore first**
|
||||
**Let Qwen Code explore first**
|
||||
|
||||
- Before making changes, let Claude understand your code:
|
||||
- Before making changes, let Qwen Code understand your code:
|
||||
|
||||
```
|
||||
analyze the database schema
|
||||
|
||||
@@ -23,7 +23,7 @@ When you authenticate using your qwen.ai account, these Terms of Service and Pri
|
||||
- **Terms of Service:** Your use is governed by the [Qwen Terms of Service](https://qwen.ai/termsservice).
|
||||
- **Privacy Notice:** The collection and use of your data is described in the [Qwen Privacy Policy](https://qwen.ai/privacypolicy).
|
||||
|
||||
For details about authentication setup, quotas, and supported features, see [Authentication Setup](/users/configuration/settings).
|
||||
For details about authentication setup, quotas, and supported features, see [Authentication Setup](../configuration/settings).
|
||||
|
||||
## 2. If you are using OpenAI-Compatible API Authentication
|
||||
|
||||
@@ -37,7 +37,7 @@ Qwen Code supports various OpenAI-compatible providers. Please refer to your spe
|
||||
|
||||
## Usage Statistics and Telemetry
|
||||
|
||||
Qwen Code may collect anonymous usage statistics and [telemetry](/developers/development/telemetry) data to improve the user experience and product quality. This data collection is optional and can be controlled through configuration settings.
|
||||
Qwen Code may collect anonymous usage statistics and [telemetry](../../developers/development/telemetry) data to improve the user experience and product quality. This data collection is optional and can be controlled through configuration settings.
|
||||
|
||||
### What Data is Collected
|
||||
|
||||
@@ -91,4 +91,4 @@ You can switch between Qwen OAuth and OpenAI-compatible API authentication at an
|
||||
2. **Within the CLI**: Use the `/auth` command to reconfigure your authentication method
|
||||
3. **Environment variables**: Set up `.env` files for automatic OpenAI-compatible API authentication
|
||||
|
||||
For detailed instructions, see the [Authentication Setup](/users/configuration/settings#environment-variables-for-api-access) documentation.
|
||||
For detailed instructions, see the [Authentication Setup](../configuration/settings#environment-variables-for-api-access) documentation.
|
||||
|
||||
@@ -31,7 +31,7 @@ This guide provides solutions to common issues and debugging tips, including top
|
||||
1. In your home directory: `~/.qwen/settings.json`.
|
||||
2. In your project's root directory: `./.qwen/settings.json`.
|
||||
|
||||
Refer to [Qwen Code Configuration](/users/configuration/settings) for more details.
|
||||
Refer to [Qwen Code Configuration](../configuration/settings) for more details.
|
||||
|
||||
- **Q: Why don't I see cached token counts in my stats output?**
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Qwen API key or Google Cloud Vertex AI) but not for OAuth users (such as Google Personal/Enterprise accounts like Google Gmail or Google Workspace, respectively). This is because the Qwen Code Assist API does not support cached content creation. You can still view your total token usage using the `/stats` command.
|
||||
@@ -59,7 +59,7 @@ This guide provides solutions to common issues and debugging tips, including top
|
||||
|
||||
- **Error: "Operation not permitted", "Permission denied", or similar.**
|
||||
- **Cause:** When sandboxing is enabled, Qwen Code may attempt operations that are restricted by your sandbox configuration, such as writing outside the project directory or system temp directory.
|
||||
- **Solution:** Refer to the [Configuration: Sandboxing](/users/features/sandbox) documentation for more information, including how to customize your sandbox configuration.
|
||||
- **Solution:** Refer to the [Configuration: Sandboxing](../features/sandbox) documentation for more information, including how to customize your sandbox configuration.
|
||||
|
||||
- **Qwen Code is not running in interactive mode in "CI" environments**
|
||||
- **Issue:** Qwen Code does not enter interactive mode (no prompt appears) if an environment variable starting with `CI_` (e.g. `CI_TOKEN`) is set. This is because the `is-in-ci` package, used by the underlying UI framework, detects these variables and assumes a non-interactive CI environment.
|
||||
|
||||
2090
package-lock.json
generated
2090
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.5.1",
|
||||
"version": "0.6.0",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,14 +13,11 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.5.1"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "cross-env node scripts/start.js",
|
||||
"debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js",
|
||||
"auth:npm": "npx google-artifactregistry-auth",
|
||||
"auth:docker": "gcloud auth configure-docker us-west1-docker.pkg.dev",
|
||||
"auth": "npm run auth:npm && npm run auth:docker",
|
||||
"generate": "node scripts/generate-git-commit-info.js",
|
||||
"build": "node scripts/build.js",
|
||||
"build-and-start": "npm run build && npm run start",
|
||||
@@ -95,7 +92,6 @@
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"glob": "^10.5.0",
|
||||
"globals": "^16.0.0",
|
||||
"google-artifactregistry-auth": "^3.4.0",
|
||||
"husky": "^9.1.7",
|
||||
"json": "^11.0.0",
|
||||
"lint-staged": "^16.1.6",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.5.1",
|
||||
"version": "0.6.0",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -33,13 +33,13 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.5.1"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@google/genai": "1.30.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@qwen-code/qwen-code-core": "file:../core",
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@types/update-notifier": "^6.0.8",
|
||||
"ansi-regex": "^6.2.2",
|
||||
"command-exists": "^1.2.9",
|
||||
@@ -64,7 +64,7 @@
|
||||
"strip-ansi": "^7.1.0",
|
||||
"strip-json-comments": "^3.1.1",
|
||||
"tar": "^7.5.2",
|
||||
"undici": "^7.10.0",
|
||||
"undici": "^6.22.0",
|
||||
"extract-zip": "^2.0.1",
|
||||
"update-notifier": "^7.3.1",
|
||||
"wrap-ansi": "9.0.2",
|
||||
|
||||
@@ -26,5 +26,23 @@ export function validateAuthMethod(authMethod: string): string | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
const hasApiKey = process.env['GEMINI_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'GEMINI_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_VERTEX_AI) {
|
||||
const hasApiKey = process.env['GOOGLE_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'GOOGLE_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
|
||||
}
|
||||
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
return null;
|
||||
}
|
||||
|
||||
return 'Invalid auth method selected.';
|
||||
}
|
||||
|
||||
@@ -206,6 +206,18 @@ describe('parseArguments', () => {
|
||||
expect(argv.prompt).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should allow -r flag as alias for --resume', async () => {
|
||||
process.argv = ['node', 'script.js', '-r', 'session-123'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
expect(argv.resume).toBe('session-123');
|
||||
});
|
||||
|
||||
it('should allow -c flag as alias for --continue', async () => {
|
||||
process.argv = ['node', 'script.js', '-c'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
expect(argv.continue).toBe(true);
|
||||
});
|
||||
|
||||
it('should convert positional query argument to prompt by default', async () => {
|
||||
process.argv = ['node', 'script.js', 'Hi Gemini'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
|
||||
@@ -299,7 +299,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
'Set the approval mode: plan (plan only), default (prompt for approval), auto-edit (auto-approve edit tools), yolo (auto-approve all tools)',
|
||||
})
|
||||
.option('checkpointing', {
|
||||
alias: 'c',
|
||||
type: 'boolean',
|
||||
description: 'Enables checkpointing of file edits',
|
||||
default: false,
|
||||
@@ -422,12 +421,14 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
default: false,
|
||||
})
|
||||
.option('continue', {
|
||||
alias: 'c',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Resume the most recent session for the current project.',
|
||||
default: false,
|
||||
})
|
||||
.option('resume', {
|
||||
alias: 'r',
|
||||
type: 'string',
|
||||
description:
|
||||
'Resume a specific session by its ID. Use without an ID to show session picker.',
|
||||
@@ -459,7 +460,12 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
})
|
||||
.option('auth-type', {
|
||||
type: 'string',
|
||||
choices: [AuthType.USE_OPENAI, AuthType.QWEN_OAUTH],
|
||||
choices: [
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
],
|
||||
description: 'Authentication type',
|
||||
})
|
||||
.deprecateOption(
|
||||
|
||||
@@ -56,6 +56,17 @@ vi.mock('simple-git', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock('./extensions/github.js', async (importOriginal) => {
|
||||
const actual =
|
||||
await importOriginal<typeof import('./extensions/github.js')>();
|
||||
return {
|
||||
...actual,
|
||||
downloadFromGitHubRelease: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('os', async (importOriginal) => {
|
||||
const mockedOs = await importOriginal<typeof os>();
|
||||
return {
|
||||
|
||||
@@ -41,6 +41,17 @@ vi.mock('simple-git', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock('../extensions/github.js', async (importOriginal) => {
|
||||
const actual =
|
||||
await importOriginal<typeof import('../extensions/github.js')>();
|
||||
return {
|
||||
...actual,
|
||||
downloadFromGitHubRelease: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('os', async (importOriginal) => {
|
||||
const mockedOs = await importOriginal<typeof os>();
|
||||
return {
|
||||
|
||||
@@ -659,6 +659,22 @@ const SETTINGS_SCHEMA = {
|
||||
childKey: 'disableCacheControl',
|
||||
showInDialog: true,
|
||||
},
|
||||
schemaCompliance: {
|
||||
type: 'enum',
|
||||
label: 'Tool Schema Compliance',
|
||||
category: 'Generation Configuration',
|
||||
requiresRestart: false,
|
||||
default: 'auto',
|
||||
description:
|
||||
'The compliance mode for tool schemas sent to the model. Use "openapi_30" for strict OpenAPI 3.0 compatibility (e.g., for Gemini).',
|
||||
parentKey: 'generationConfig',
|
||||
childKey: 'schemaCompliance',
|
||||
showInDialog: true,
|
||||
options: [
|
||||
{ value: 'auto', label: 'Auto (Default)' },
|
||||
{ value: 'openapi_30', label: 'OpenAPI 3.0 Strict' },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -4,13 +4,8 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
AuthType,
|
||||
getOauthClient,
|
||||
InputFormat,
|
||||
logUserPrompt,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { Config, AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { InputFormat, logUserPrompt } from '@qwen-code/qwen-code-core';
|
||||
import { render } from 'ink';
|
||||
import dns from 'node:dns';
|
||||
import os from 'node:os';
|
||||
@@ -399,15 +394,6 @@ export async function main() {
|
||||
initializationResult = await initializeApp(config, settings);
|
||||
}
|
||||
|
||||
if (
|
||||
settings.merged.security?.auth?.selectedType ===
|
||||
AuthType.LOGIN_WITH_GOOGLE &&
|
||||
config.isBrowserLaunchSuppressed()
|
||||
) {
|
||||
// Do oauth before app renders to make copying the link possible.
|
||||
await getOauthClient(settings.merged.security.auth.selectedType, config);
|
||||
}
|
||||
|
||||
if (config.getExperimentalZedIntegration()) {
|
||||
return runAcpAgent(config, settings, extensions, argv);
|
||||
}
|
||||
|
||||
@@ -310,6 +310,7 @@ export default {
|
||||
'Tool Output Truncation Lines': 'Tool Output Truncation Lines',
|
||||
'Folder Trust': 'Folder Trust',
|
||||
'Vision Model Preview': 'Vision Model Preview',
|
||||
'Tool Schema Compliance': 'Tool Schema Compliance',
|
||||
// Settings enum options
|
||||
'Auto (detect from system)': 'Auto (detect from system)',
|
||||
Text: 'Text',
|
||||
|
||||
@@ -300,6 +300,7 @@ export default {
|
||||
'Tool Output Truncation Lines': '工具输出截断行数',
|
||||
'Folder Trust': '文件夹信任',
|
||||
'Vision Model Preview': '视觉模型预览',
|
||||
'Tool Schema Compliance': '工具 Schema 兼容性',
|
||||
// Settings enum options
|
||||
'Auto (detect from system)': '自动(从系统检测)',
|
||||
Text: '文本',
|
||||
|
||||
@@ -610,8 +610,6 @@ export abstract class BaseJsonOutputAdapter {
|
||||
const errorText = parseAndFormatApiError(
|
||||
event.value.error,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
this.config.getModel(),
|
||||
);
|
||||
this.appendText(state, errorText, null);
|
||||
break;
|
||||
|
||||
@@ -221,8 +221,6 @@ export async function runNonInteractive(
|
||||
const errorText = parseAndFormatApiError(
|
||||
event.value.error,
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
);
|
||||
process.stderr.write(`${errorText}\n`);
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ const mockPrompt = {
|
||||
{ name: 'trail', required: false, description: "The animal's trail." },
|
||||
],
|
||||
invoke: vi.fn().mockResolvedValue({
|
||||
messages: [{ content: { text: 'Hello, world!' } }],
|
||||
messages: [{ content: { type: 'text', text: 'Hello, world!' } }],
|
||||
}),
|
||||
};
|
||||
|
||||
|
||||
@@ -123,7 +123,10 @@ export class McpPromptLoader implements ICommandLoader {
|
||||
};
|
||||
}
|
||||
|
||||
if (!result.messages?.[0]?.content?.['text']) {
|
||||
const firstMessage = result.messages?.[0];
|
||||
const content = firstMessage?.content;
|
||||
|
||||
if (content?.type !== 'text') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
@@ -134,7 +137,7 @@ export class McpPromptLoader implements ICommandLoader {
|
||||
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: JSON.stringify(result.messages[0].content.text),
|
||||
content: JSON.stringify(content.text),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
|
||||
@@ -23,7 +23,6 @@ import {
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { LoadedSettings } from '../config/settings.js';
|
||||
import type { InitializationResult } from '../core/initializer.js';
|
||||
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
|
||||
import { UIStateContext, type UIState } from './contexts/UIStateContext.js';
|
||||
import {
|
||||
UIActionsContext,
|
||||
@@ -56,7 +55,6 @@ vi.mock('./App.js', () => ({
|
||||
App: TestContextConsumer,
|
||||
}));
|
||||
|
||||
vi.mock('./hooks/useQuotaAndFallback.js');
|
||||
vi.mock('./hooks/useHistoryManager.js');
|
||||
vi.mock('./hooks/useThemeCommand.js');
|
||||
vi.mock('./auth/useAuth.js');
|
||||
@@ -122,7 +120,6 @@ describe('AppContainer State Management', () => {
|
||||
let mockInitResult: InitializationResult;
|
||||
|
||||
// Create typed mocks for all hooks
|
||||
const mockedUseQuotaAndFallback = useQuotaAndFallback as Mock;
|
||||
const mockedUseHistory = useHistory as Mock;
|
||||
const mockedUseThemeCommand = useThemeCommand as Mock;
|
||||
const mockedUseAuthCommand = useAuthCommand as Mock;
|
||||
@@ -164,10 +161,6 @@ describe('AppContainer State Management', () => {
|
||||
capturedUIActions = null!;
|
||||
|
||||
// **Provide a default return value for EVERY mocked hook.**
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: null,
|
||||
handleProQuotaChoice: vi.fn(),
|
||||
});
|
||||
mockedUseHistory.mockReturnValue({
|
||||
history: [],
|
||||
addItem: vi.fn(),
|
||||
@@ -567,75 +560,6 @@ describe('AppContainer State Management', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Quota and Fallback Integration', () => {
|
||||
it('passes a null proQuotaRequest to UIStateContext by default', () => {
|
||||
// The default mock from beforeEach already sets proQuotaRequest to null
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert that the context value is as expected
|
||||
expect(capturedUIState.proQuotaRequest).toBeNull();
|
||||
});
|
||||
|
||||
it('passes a valid proQuotaRequest to UIStateContext when provided by the hook', () => {
|
||||
// Arrange: Create a mock request object that a UI dialog would receive
|
||||
const mockRequest = {
|
||||
failedModel: 'gemini-pro',
|
||||
fallbackModel: 'gemini-flash',
|
||||
resolve: vi.fn(),
|
||||
};
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: mockRequest,
|
||||
handleProQuotaChoice: vi.fn(),
|
||||
});
|
||||
|
||||
// Act: Render the container
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert: The mock request is correctly passed through the context
|
||||
expect(capturedUIState.proQuotaRequest).toEqual(mockRequest);
|
||||
});
|
||||
|
||||
it('passes the handleProQuotaChoice function to UIActionsContext', () => {
|
||||
// Arrange: Create a mock handler function
|
||||
const mockHandler = vi.fn();
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: null,
|
||||
handleProQuotaChoice: mockHandler,
|
||||
});
|
||||
|
||||
// Act: Render the container
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert: The action in the context is the mock handler we provided
|
||||
expect(capturedUIActions.handleProQuotaChoice).toBe(mockHandler);
|
||||
|
||||
// You can even verify that the plumbed function is callable
|
||||
capturedUIActions.handleProQuotaChoice('auth');
|
||||
expect(mockHandler).toHaveBeenCalledWith('auth');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Terminal Title Update Feature', () => {
|
||||
beforeEach(() => {
|
||||
// Reset mock stdout for each test
|
||||
|
||||
@@ -32,7 +32,6 @@ import {
|
||||
type Config,
|
||||
type IdeInfo,
|
||||
type IdeContext,
|
||||
type UserTierId,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
IdeClient,
|
||||
ideContextStore,
|
||||
@@ -48,7 +47,6 @@ import { useHistory } from './hooks/useHistoryManager.js';
|
||||
import { useMemoryMonitor } from './hooks/useMemoryMonitor.js';
|
||||
import { useThemeCommand } from './hooks/useThemeCommand.js';
|
||||
import { useAuthCommand } from './auth/useAuth.js';
|
||||
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
|
||||
import { useEditorSettings } from './hooks/useEditorSettings.js';
|
||||
import { useSettingsCommand } from './hooks/useSettingsCommand.js';
|
||||
import { useModelCommand } from './hooks/useModelCommand.js';
|
||||
@@ -192,8 +190,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
|
||||
const [currentModel, setCurrentModel] = useState(getEffectiveModel());
|
||||
|
||||
const [userTier] = useState<UserTierId | undefined>(undefined);
|
||||
|
||||
const [isConfigInitialized, setConfigInitialized] = useState(false);
|
||||
|
||||
const [userMessages, setUserMessages] = useState<string[]>([]);
|
||||
@@ -367,14 +363,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
cancelAuthentication,
|
||||
} = useAuthCommand(settings, config, historyManager.addItem);
|
||||
|
||||
const { proQuotaRequest, handleProQuotaChoice } = useQuotaAndFallback({
|
||||
config,
|
||||
historyManager,
|
||||
userTier,
|
||||
setAuthState,
|
||||
setModelSwitchedFromQuotaError,
|
||||
});
|
||||
|
||||
useInitializationAuthError(initializationResult.authError, onAuthError);
|
||||
|
||||
// Sync user tier from config when authentication changes
|
||||
@@ -752,8 +740,7 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
!initError &&
|
||||
!isProcessing &&
|
||||
(streamingState === StreamingState.Idle ||
|
||||
streamingState === StreamingState.Responding) &&
|
||||
!proQuotaRequest;
|
||||
streamingState === StreamingState.Responding);
|
||||
|
||||
const [controlsHeight, setControlsHeight] = useState(0);
|
||||
|
||||
@@ -1206,7 +1193,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
isAuthenticating ||
|
||||
isEditorDialogOpen ||
|
||||
showIdeRestartPrompt ||
|
||||
!!proQuotaRequest ||
|
||||
isSubagentCreateDialogOpen ||
|
||||
isAgentsManagerDialogOpen ||
|
||||
isApprovalModeDialogOpen ||
|
||||
@@ -1277,8 +1263,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
showWorkspaceMigrationDialog,
|
||||
workspaceExtensions,
|
||||
currentModel,
|
||||
userTier,
|
||||
proQuotaRequest,
|
||||
contextFileNames,
|
||||
errorCount,
|
||||
availableTerminalHeight,
|
||||
@@ -1367,8 +1351,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
showAutoAcceptIndicator,
|
||||
showWorkspaceMigrationDialog,
|
||||
workspaceExtensions,
|
||||
userTier,
|
||||
proQuotaRequest,
|
||||
contextFileNames,
|
||||
errorCount,
|
||||
availableTerminalHeight,
|
||||
@@ -1430,7 +1412,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
handleClearScreen,
|
||||
onWorkspaceMigrationDialogOpen,
|
||||
onWorkspaceMigrationDialogClose,
|
||||
handleProQuotaChoice,
|
||||
// Vision switch dialog
|
||||
handleVisionSwitchSelect,
|
||||
// Welcome back dialog
|
||||
@@ -1468,7 +1449,6 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
handleClearScreen,
|
||||
onWorkspaceMigrationDialogOpen,
|
||||
onWorkspaceMigrationDialogClose,
|
||||
handleProQuotaChoice,
|
||||
handleVisionSwitchSelect,
|
||||
handleWelcomeBackSelection,
|
||||
handleWelcomeBackClose,
|
||||
|
||||
@@ -168,7 +168,7 @@ describe('AuthDialog', () => {
|
||||
|
||||
it('should not show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to something else', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'foobar';
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.LOGIN_WITH_GOOGLE;
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
|
||||
|
||||
const settings: LoadedSettings = new LoadedSettings(
|
||||
{
|
||||
@@ -212,7 +212,7 @@ describe('AuthDialog', () => {
|
||||
|
||||
it('should show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to use api key', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'foobar';
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_GEMINI;
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
|
||||
|
||||
const settings: LoadedSettings = new LoadedSettings(
|
||||
{
|
||||
@@ -504,12 +504,12 @@ describe('AuthDialog', () => {
|
||||
},
|
||||
{
|
||||
settings: {
|
||||
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
|
||||
security: { auth: { selectedType: AuthType.USE_OPENAI } },
|
||||
ui: { customThemes: {} },
|
||||
mcpServers: {},
|
||||
},
|
||||
originalSettings: {
|
||||
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
|
||||
security: { auth: { selectedType: AuthType.USE_OPENAI } },
|
||||
ui: { customThemes: {} },
|
||||
mcpServers: {},
|
||||
},
|
||||
|
||||
@@ -225,16 +225,24 @@ export const useAuthCommand = (
|
||||
const defaultAuthType = process.env['QWEN_DEFAULT_AUTH_TYPE'];
|
||||
if (
|
||||
defaultAuthType &&
|
||||
![AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].includes(
|
||||
defaultAuthType as AuthType,
|
||||
)
|
||||
![
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].includes(defaultAuthType as AuthType)
|
||||
) {
|
||||
onAuthError(
|
||||
t(
|
||||
'Invalid QWEN_DEFAULT_AUTH_TYPE value: "{{value}}". Valid values are: {{validValues}}',
|
||||
{
|
||||
value: defaultAuthType,
|
||||
validValues: [AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].join(', '),
|
||||
validValues: [
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].join(', '),
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
@@ -15,7 +15,6 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
const original = await importOriginal<typeof core>();
|
||||
return {
|
||||
...original,
|
||||
getOauthClient: vi.fn(original.getOauthClient),
|
||||
getIdeInstaller: vi.fn(original.getIdeInstaller),
|
||||
IdeClient: {
|
||||
getInstance: vi.fn(),
|
||||
|
||||
@@ -17,7 +17,6 @@ import { AuthDialog } from '../auth/AuthDialog.js';
|
||||
import { OpenAIKeyPrompt } from './OpenAIKeyPrompt.js';
|
||||
import { EditorSettingsDialog } from './EditorSettingsDialog.js';
|
||||
import { WorkspaceMigrationDialog } from './WorkspaceMigrationDialog.js';
|
||||
import { ProQuotaDialog } from './ProQuotaDialog.js';
|
||||
import { PermissionsModifyTrustDialog } from './PermissionsModifyTrustDialog.js';
|
||||
import { ModelDialog } from './ModelDialog.js';
|
||||
import { ApprovalModeDialog } from './ApprovalModeDialog.js';
|
||||
@@ -87,15 +86,6 @@ export const DialogManager = ({
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.proQuotaRequest) {
|
||||
return (
|
||||
<ProQuotaDialog
|
||||
failedModel={uiState.proQuotaRequest.failedModel}
|
||||
fallbackModel={uiState.proQuotaRequest.fallbackModel}
|
||||
onChoice={uiActions.handleProQuotaChoice}
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.shouldShowIdePrompt) {
|
||||
return (
|
||||
<IdeIntegrationNudge
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { render } from 'ink-testing-library';
|
||||
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
|
||||
import { ProQuotaDialog } from './ProQuotaDialog.js';
|
||||
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
|
||||
|
||||
// Mock the child component to make it easier to test the parent
|
||||
vi.mock('./shared/RadioButtonSelect.js', () => ({
|
||||
RadioButtonSelect: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('ProQuotaDialog', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should render with correct title and options', () => {
|
||||
const { lastFrame } = render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={() => {}}
|
||||
/>,
|
||||
);
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Pro quota limit reached for gemini-2.5-pro.');
|
||||
|
||||
// Check that RadioButtonSelect was called with the correct items
|
||||
expect(RadioButtonSelect).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
items: [
|
||||
{
|
||||
label: 'Change auth (executes the /auth command)',
|
||||
value: 'auth',
|
||||
key: 'auth',
|
||||
},
|
||||
{
|
||||
label: `Continue with gemini-2.5-flash`,
|
||||
value: 'continue',
|
||||
key: 'continue',
|
||||
},
|
||||
],
|
||||
}),
|
||||
undefined,
|
||||
);
|
||||
});
|
||||
|
||||
it('should call onChoice with "auth" when "Change auth" is selected', () => {
|
||||
const mockOnChoice = vi.fn();
|
||||
render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={mockOnChoice}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Get the onSelect function passed to RadioButtonSelect
|
||||
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
|
||||
|
||||
// Simulate the selection
|
||||
onSelect('auth');
|
||||
|
||||
expect(mockOnChoice).toHaveBeenCalledWith('auth');
|
||||
});
|
||||
|
||||
it('should call onChoice with "continue" when "Continue with flash" is selected', () => {
|
||||
const mockOnChoice = vi.fn();
|
||||
render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={mockOnChoice}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Get the onSelect function passed to RadioButtonSelect
|
||||
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
|
||||
|
||||
// Simulate the selection
|
||||
onSelect('continue');
|
||||
|
||||
expect(mockOnChoice).toHaveBeenCalledWith('continue');
|
||||
});
|
||||
});
|
||||
@@ -1,55 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type React from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
|
||||
import { theme } from '../semantic-colors.js';
|
||||
import { t } from '../../i18n/index.js';
|
||||
|
||||
interface ProQuotaDialogProps {
|
||||
failedModel: string;
|
||||
fallbackModel: string;
|
||||
onChoice: (choice: 'auth' | 'continue') => void;
|
||||
}
|
||||
|
||||
export function ProQuotaDialog({
|
||||
failedModel,
|
||||
fallbackModel,
|
||||
onChoice,
|
||||
}: ProQuotaDialogProps): React.JSX.Element {
|
||||
const items = [
|
||||
{
|
||||
label: t('Change auth (executes the /auth command)'),
|
||||
value: 'auth' as const,
|
||||
key: 'auth',
|
||||
},
|
||||
{
|
||||
label: t('Continue with {{model}}', { model: fallbackModel }),
|
||||
value: 'continue' as const,
|
||||
key: 'continue',
|
||||
},
|
||||
];
|
||||
|
||||
const handleSelect = (choice: 'auth' | 'continue') => {
|
||||
onChoice(choice);
|
||||
};
|
||||
|
||||
return (
|
||||
<Box borderStyle="round" flexDirection="column" paddingX={1}>
|
||||
<Text bold color={theme.status.warning}>
|
||||
{t('Pro quota limit reached for {{model}}.', { model: failedModel })}
|
||||
</Text>
|
||||
<Box marginTop={1}>
|
||||
<RadioButtonSelect
|
||||
items={items}
|
||||
initialIndex={1}
|
||||
onSelect={handleSelect}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -55,7 +55,6 @@ export interface UIActions {
|
||||
handleClearScreen: () => void;
|
||||
onWorkspaceMigrationDialogOpen: () => void;
|
||||
onWorkspaceMigrationDialogClose: () => void;
|
||||
handleProQuotaChoice: (choice: 'auth' | 'continue') => void;
|
||||
// Vision switch dialog
|
||||
handleVisionSwitchSelect: (outcome: VisionSwitchOutcome) => void;
|
||||
// Welcome back dialog
|
||||
|
||||
@@ -22,21 +22,13 @@ import type {
|
||||
AuthType,
|
||||
IdeContext,
|
||||
ApprovalMode,
|
||||
UserTierId,
|
||||
IdeInfo,
|
||||
FallbackIntent,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { DOMElement } from 'ink';
|
||||
import type { SessionStatsState } from '../contexts/SessionContext.js';
|
||||
import type { ExtensionUpdateState } from '../state/extensions.js';
|
||||
import type { UpdateObject } from '../utils/updateCheck.js';
|
||||
|
||||
export interface ProQuotaDialogRequest {
|
||||
failedModel: string;
|
||||
fallbackModel: string;
|
||||
resolve: (intent: FallbackIntent) => void;
|
||||
}
|
||||
|
||||
import { type UseHistoryManagerReturn } from '../hooks/useHistoryManager.js';
|
||||
import { type RestartReason } from '../hooks/useIdeTrustListener.js';
|
||||
|
||||
@@ -99,8 +91,6 @@ export interface UIState {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
workspaceExtensions: any[]; // Extension[]
|
||||
// Quota-related state
|
||||
userTier: UserTierId | undefined;
|
||||
proQuotaRequest: ProQuotaDialogRequest | null;
|
||||
currentModel: string;
|
||||
contextFileNames: string[];
|
||||
errorCount: number;
|
||||
|
||||
@@ -1323,7 +1323,7 @@ describe('useGeminiStream', () => {
|
||||
it('should call parseAndFormatApiError with the correct authType on stream initialization failure', async () => {
|
||||
// 1. Setup
|
||||
const mockError = new Error('Rate limit exceeded');
|
||||
const mockAuthType = AuthType.LOGIN_WITH_GOOGLE;
|
||||
const mockAuthType = AuthType.USE_VERTEX_AI;
|
||||
mockParseAndFormatApiError.mockClear();
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
@@ -1374,9 +1374,6 @@ describe('useGeminiStream', () => {
|
||||
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
|
||||
'Rate limit exceeded',
|
||||
mockAuthType,
|
||||
undefined,
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -2493,9 +2490,6 @@ describe('useGeminiStream', () => {
|
||||
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
|
||||
{ message: 'Test error' },
|
||||
expect.any(String),
|
||||
undefined,
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -26,7 +26,6 @@ import {
|
||||
GitService,
|
||||
UnauthorizedError,
|
||||
UserPromptEvent,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
logConversationFinishedEvent,
|
||||
ConversationFinishedEvent,
|
||||
ApprovalMode,
|
||||
@@ -600,9 +599,6 @@ export const useGeminiStream = (
|
||||
text: parseAndFormatApiError(
|
||||
eventValue.error,
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
),
|
||||
},
|
||||
userMessageTimestamp,
|
||||
@@ -654,6 +650,9 @@ export const useGeminiStream = (
|
||||
'Response stopped due to image safety violations.',
|
||||
[FinishReason.UNEXPECTED_TOOL_CALL]:
|
||||
'Response stopped due to unexpected tool call.',
|
||||
[FinishReason.IMAGE_PROHIBITED_CONTENT]:
|
||||
'Response stopped due to image prohibited content.',
|
||||
[FinishReason.NO_IMAGE]: 'Response stopped due to no image.',
|
||||
};
|
||||
|
||||
const message = finishReasonMessages[finishReason];
|
||||
@@ -770,11 +769,17 @@ export const useGeminiStream = (
|
||||
for await (const event of stream) {
|
||||
switch (event.type) {
|
||||
case ServerGeminiEventType.Thought:
|
||||
thoughtBuffer = handleThoughtEvent(
|
||||
event.value,
|
||||
thoughtBuffer,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
// If the thought has a subject, it's a discrete status update rather than
|
||||
// a streamed textual thought, so we update the thought state directly.
|
||||
if (event.value.subject) {
|
||||
setThought(event.value);
|
||||
} else {
|
||||
thoughtBuffer = handleThoughtEvent(
|
||||
event.value,
|
||||
thoughtBuffer,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
}
|
||||
break;
|
||||
case ServerGeminiEventType.Content:
|
||||
geminiMessageBuffer = handleContentEvent(
|
||||
@@ -845,6 +850,7 @@ export const useGeminiStream = (
|
||||
handleMaxSessionTurnsEvent,
|
||||
handleSessionTokenLimitExceededEvent,
|
||||
handleCitationEvent,
|
||||
setThought,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -987,9 +993,6 @@ export const useGeminiStream = (
|
||||
text: parseAndFormatApiError(
|
||||
getErrorMessage(error) || 'Unknown error',
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
),
|
||||
},
|
||||
userMessageTimestamp,
|
||||
|
||||
@@ -1,391 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
vi,
|
||||
describe,
|
||||
it,
|
||||
expect,
|
||||
beforeEach,
|
||||
afterEach,
|
||||
type Mock,
|
||||
} from 'vitest';
|
||||
import { act, renderHook } from '@testing-library/react';
|
||||
import {
|
||||
type Config,
|
||||
type FallbackModelHandler,
|
||||
UserTierId,
|
||||
AuthType,
|
||||
isGenericQuotaExceededError,
|
||||
isProQuotaExceededError,
|
||||
makeFakeConfig,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { useQuotaAndFallback } from './useQuotaAndFallback.js';
|
||||
import type { UseHistoryManagerReturn } from './useHistoryManager.js';
|
||||
import { AuthState, MessageType } from '../types.js';
|
||||
|
||||
// Mock the error checking functions from the core package to control test scenarios
|
||||
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
const original =
|
||||
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
|
||||
return {
|
||||
...original,
|
||||
isGenericQuotaExceededError: vi.fn(),
|
||||
isProQuotaExceededError: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
// Use a type alias for SpyInstance as it's not directly exported
|
||||
type SpyInstance = ReturnType<typeof vi.spyOn>;
|
||||
|
||||
describe('useQuotaAndFallback', () => {
|
||||
let mockConfig: Config;
|
||||
let mockHistoryManager: UseHistoryManagerReturn;
|
||||
let mockSetAuthState: Mock;
|
||||
let mockSetModelSwitchedFromQuotaError: Mock;
|
||||
let setFallbackHandlerSpy: SpyInstance;
|
||||
|
||||
const mockedIsGenericQuotaExceededError = isGenericQuotaExceededError as Mock;
|
||||
const mockedIsProQuotaExceededError = isProQuotaExceededError as Mock;
|
||||
|
||||
beforeEach(() => {
|
||||
mockConfig = makeFakeConfig();
|
||||
|
||||
// Spy on the method that requires the private field and mock its return.
|
||||
// This is cleaner than modifying the config class for tests.
|
||||
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
|
||||
model: 'test-model',
|
||||
authType: AuthType.LOGIN_WITH_GOOGLE,
|
||||
});
|
||||
|
||||
mockHistoryManager = {
|
||||
addItem: vi.fn(),
|
||||
history: [],
|
||||
updateItem: vi.fn(),
|
||||
clearItems: vi.fn(),
|
||||
loadHistory: vi.fn(),
|
||||
};
|
||||
mockSetAuthState = vi.fn();
|
||||
mockSetModelSwitchedFromQuotaError = vi.fn();
|
||||
|
||||
setFallbackHandlerSpy = vi.spyOn(mockConfig, 'setFallbackModelHandler');
|
||||
vi.spyOn(mockConfig, 'setQuotaErrorOccurred');
|
||||
|
||||
mockedIsGenericQuotaExceededError.mockReturnValue(false);
|
||||
mockedIsProQuotaExceededError.mockReturnValue(false);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should register a fallback handler on initialization', () => {
|
||||
renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
expect(setFallbackHandlerSpy).toHaveBeenCalledTimes(1);
|
||||
expect(setFallbackHandlerSpy.mock.calls[0][0]).toBeInstanceOf(Function);
|
||||
});
|
||||
|
||||
describe('Fallback Handler Logic', () => {
|
||||
// Helper function to render the hook and extract the registered handler
|
||||
const getRegisteredHandler = (
|
||||
userTier: UserTierId = UserTierId.FREE,
|
||||
): FallbackModelHandler => {
|
||||
renderHook(
|
||||
(props) =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: props.userTier,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
{ initialProps: { userTier } },
|
||||
);
|
||||
return setFallbackHandlerSpy.mock.calls[0][0] as FallbackModelHandler;
|
||||
};
|
||||
|
||||
it('should return null and take no action if already in fallback mode', async () => {
|
||||
vi.spyOn(mockConfig, 'isInFallbackMode').mockReturnValue(true);
|
||||
const handler = getRegisteredHandler();
|
||||
const result = await handler('gemini-pro', 'gemini-flash', new Error());
|
||||
|
||||
expect(result).toBeNull();
|
||||
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return null and take no action if authType is not LOGIN_WITH_GOOGLE', async () => {
|
||||
// Override the default mock from beforeEach for this specific test
|
||||
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
|
||||
model: 'test-model',
|
||||
authType: AuthType.USE_GEMINI,
|
||||
});
|
||||
|
||||
const handler = getRegisteredHandler();
|
||||
const result = await handler('gemini-pro', 'gemini-flash', new Error());
|
||||
|
||||
expect(result).toBeNull();
|
||||
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
describe('Automatic Fallback Scenarios', () => {
|
||||
const testCases = [
|
||||
{
|
||||
errorType: 'generic',
|
||||
tier: UserTierId.FREE,
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B',
|
||||
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
|
||||
],
|
||||
},
|
||||
{
|
||||
errorType: 'generic',
|
||||
tier: UserTierId.STANDARD, // Paid tier
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B',
|
||||
'switch to using a paid API key from AI Studio',
|
||||
],
|
||||
},
|
||||
{
|
||||
errorType: 'other',
|
||||
tier: UserTierId.FREE,
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B for faster responses',
|
||||
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
|
||||
],
|
||||
},
|
||||
{
|
||||
errorType: 'other',
|
||||
tier: UserTierId.LEGACY, // Paid tier
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B for faster responses',
|
||||
'switch to using a paid API key from AI Studio',
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for (const { errorType, tier, expectedMessageSnippets } of testCases) {
|
||||
it(`should handle ${errorType} error for ${tier} tier correctly`, async () => {
|
||||
mockedIsGenericQuotaExceededError.mockReturnValue(
|
||||
errorType === 'generic',
|
||||
);
|
||||
|
||||
const handler = getRegisteredHandler(tier);
|
||||
const result = await handler(
|
||||
'model-A',
|
||||
'model-B',
|
||||
new Error('quota exceeded'),
|
||||
);
|
||||
|
||||
// Automatic fallbacks should return 'stop'
|
||||
expect(result).toBe('stop');
|
||||
|
||||
expect(mockHistoryManager.addItem).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ type: MessageType.INFO }),
|
||||
expect.any(Number),
|
||||
);
|
||||
|
||||
const message = (mockHistoryManager.addItem as Mock).mock.calls[0][0]
|
||||
.text;
|
||||
for (const snippet of expectedMessageSnippets) {
|
||||
expect(message).toContain(snippet);
|
||||
}
|
||||
|
||||
expect(mockSetModelSwitchedFromQuotaError).toHaveBeenCalledWith(true);
|
||||
expect(mockConfig.setQuotaErrorOccurred).toHaveBeenCalledWith(true);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe('Interactive Fallback (Pro Quota Error)', () => {
|
||||
beforeEach(() => {
|
||||
mockedIsProQuotaExceededError.mockReturnValue(true);
|
||||
});
|
||||
|
||||
it('should set an interactive request and wait for user choice', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
|
||||
// Call the handler but do not await it, to check the intermediate state
|
||||
const promise = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota'),
|
||||
);
|
||||
|
||||
await act(async () => {});
|
||||
|
||||
// The hook should now have a pending request for the UI to handle
|
||||
expect(result.current.proQuotaRequest).not.toBeNull();
|
||||
expect(result.current.proQuotaRequest?.failedModel).toBe('gemini-pro');
|
||||
|
||||
// Simulate the user choosing to continue with the fallback model
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('continue');
|
||||
});
|
||||
|
||||
// The original promise from the handler should now resolve
|
||||
const intent = await promise;
|
||||
expect(intent).toBe('retry');
|
||||
|
||||
// The pending request should be cleared from the state
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle race conditions by stopping subsequent requests', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
|
||||
const promise1 = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota 1'),
|
||||
);
|
||||
await act(async () => {});
|
||||
|
||||
const firstRequest = result.current.proQuotaRequest;
|
||||
expect(firstRequest).not.toBeNull();
|
||||
|
||||
const result2 = await handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota 2'),
|
||||
);
|
||||
|
||||
// The lock should have stopped the second request
|
||||
expect(result2).toBe('stop');
|
||||
expect(result.current.proQuotaRequest).toBe(firstRequest);
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('continue');
|
||||
});
|
||||
|
||||
const intent1 = await promise1;
|
||||
expect(intent1).toBe('retry');
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('handleProQuotaChoice', () => {
|
||||
beforeEach(() => {
|
||||
mockedIsProQuotaExceededError.mockReturnValue(true);
|
||||
});
|
||||
|
||||
it('should do nothing if there is no pending pro quota request', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('auth');
|
||||
});
|
||||
|
||||
expect(mockSetAuthState).not.toHaveBeenCalled();
|
||||
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should resolve intent to "auth" and trigger auth state update', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
const promise = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota'),
|
||||
);
|
||||
await act(async () => {}); // Allow state to update
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('auth');
|
||||
});
|
||||
|
||||
const intent = await promise;
|
||||
expect(intent).toBe('auth');
|
||||
expect(mockSetAuthState).toHaveBeenCalledWith(AuthState.Updating);
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
});
|
||||
|
||||
it('should resolve intent to "retry" and add info message on continue', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
// The first `addItem` call is for the initial quota error message
|
||||
const promise = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota'),
|
||||
);
|
||||
await act(async () => {}); // Allow state to update
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('continue');
|
||||
});
|
||||
|
||||
const intent = await promise;
|
||||
expect(intent).toBe('retry');
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
|
||||
// Check for the second "Switched to fallback model" message
|
||||
expect(mockHistoryManager.addItem).toHaveBeenCalledTimes(2);
|
||||
const lastCall = (mockHistoryManager.addItem as Mock).mock.calls[1][0];
|
||||
expect(lastCall.type).toBe(MessageType.INFO);
|
||||
expect(lastCall.text).toContain('Switched to fallback model.');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,175 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
AuthType,
|
||||
type Config,
|
||||
type FallbackModelHandler,
|
||||
type FallbackIntent,
|
||||
isGenericQuotaExceededError,
|
||||
isProQuotaExceededError,
|
||||
UserTierId,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { type UseHistoryManagerReturn } from './useHistoryManager.js';
|
||||
import { AuthState, MessageType } from '../types.js';
|
||||
import { type ProQuotaDialogRequest } from '../contexts/UIStateContext.js';
|
||||
|
||||
interface UseQuotaAndFallbackArgs {
|
||||
config: Config;
|
||||
historyManager: UseHistoryManagerReturn;
|
||||
userTier: UserTierId | undefined;
|
||||
setAuthState: (state: AuthState) => void;
|
||||
setModelSwitchedFromQuotaError: (value: boolean) => void;
|
||||
}
|
||||
|
||||
export function useQuotaAndFallback({
|
||||
config,
|
||||
historyManager,
|
||||
userTier,
|
||||
setAuthState,
|
||||
setModelSwitchedFromQuotaError,
|
||||
}: UseQuotaAndFallbackArgs) {
|
||||
const [proQuotaRequest, setProQuotaRequest] =
|
||||
useState<ProQuotaDialogRequest | null>(null);
|
||||
const isDialogPending = useRef(false);
|
||||
|
||||
// Set up Flash fallback handler
|
||||
useEffect(() => {
|
||||
const fallbackHandler: FallbackModelHandler = async (
|
||||
failedModel,
|
||||
fallbackModel,
|
||||
error,
|
||||
): Promise<FallbackIntent | null> => {
|
||||
if (config.isInFallbackMode()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Fallbacks are currently only handled for OAuth users.
|
||||
const contentGeneratorConfig = config.getContentGeneratorConfig();
|
||||
if (
|
||||
!contentGeneratorConfig ||
|
||||
contentGeneratorConfig.authType !== AuthType.LOGIN_WITH_GOOGLE
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Use actual user tier if available; otherwise, default to FREE tier behavior (safe default)
|
||||
const isPaidTier =
|
||||
userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD;
|
||||
|
||||
let message: string;
|
||||
|
||||
if (error && isProQuotaExceededError(error)) {
|
||||
// Pro Quota specific messages (Interactive)
|
||||
if (isPaidTier) {
|
||||
message = `⚡ You have reached your daily ${failedModel} quota limit.
|
||||
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
|
||||
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
|
||||
} else {
|
||||
message = `⚡ You have reached your daily ${failedModel} quota limit.
|
||||
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
|
||||
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
|
||||
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
|
||||
⚡ You can switch authentication methods by typing /auth`;
|
||||
}
|
||||
} else if (error && isGenericQuotaExceededError(error)) {
|
||||
// Generic Quota (Automatic fallback)
|
||||
const actionMessage = `⚡ You have reached your daily quota limit.\n⚡ Automatically switching from ${failedModel} to ${fallbackModel} for the remainder of this session.`;
|
||||
|
||||
if (isPaidTier) {
|
||||
message = `${actionMessage}
|
||||
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
|
||||
} else {
|
||||
message = `${actionMessage}
|
||||
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
|
||||
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
|
||||
⚡ You can switch authentication methods by typing /auth`;
|
||||
}
|
||||
} else {
|
||||
// Consecutive 429s or other errors (Automatic fallback)
|
||||
const actionMessage = `⚡ Automatically switching from ${failedModel} to ${fallbackModel} for faster responses for the remainder of this session.`;
|
||||
|
||||
if (isPaidTier) {
|
||||
message = `${actionMessage}
|
||||
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
|
||||
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
|
||||
} else {
|
||||
message = `${actionMessage}
|
||||
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
|
||||
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
|
||||
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
|
||||
⚡ You can switch authentication methods by typing /auth`;
|
||||
}
|
||||
}
|
||||
|
||||
// Add message to UI history
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: message,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
setModelSwitchedFromQuotaError(true);
|
||||
config.setQuotaErrorOccurred(true);
|
||||
|
||||
// Interactive Fallback for Pro quota
|
||||
if (error && isProQuotaExceededError(error)) {
|
||||
if (isDialogPending.current) {
|
||||
return 'stop'; // A dialog is already active, so just stop this request.
|
||||
}
|
||||
isDialogPending.current = true;
|
||||
|
||||
const intent: FallbackIntent = await new Promise<FallbackIntent>(
|
||||
(resolve) => {
|
||||
setProQuotaRequest({
|
||||
failedModel,
|
||||
fallbackModel,
|
||||
resolve,
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
return intent;
|
||||
}
|
||||
|
||||
return 'stop';
|
||||
};
|
||||
|
||||
config.setFallbackModelHandler(fallbackHandler);
|
||||
}, [config, historyManager, userTier, setModelSwitchedFromQuotaError]);
|
||||
|
||||
const handleProQuotaChoice = useCallback(
|
||||
(choice: 'auth' | 'continue') => {
|
||||
if (!proQuotaRequest) return;
|
||||
|
||||
const intent: FallbackIntent = choice === 'auth' ? 'auth' : 'retry';
|
||||
proQuotaRequest.resolve(intent);
|
||||
setProQuotaRequest(null);
|
||||
isDialogPending.current = false; // Reset the flag here
|
||||
|
||||
if (choice === 'auth') {
|
||||
setAuthState(AuthState.Updating);
|
||||
} else {
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: 'Switched to fallback model. Tip: Press Ctrl+P (or Up Arrow) to recall your previous prompt and submit it again if you wish.',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
},
|
||||
[proQuotaRequest, setAuthState, historyManager],
|
||||
);
|
||||
|
||||
return {
|
||||
proQuotaRequest,
|
||||
handleProQuotaChoice,
|
||||
};
|
||||
}
|
||||
@@ -411,7 +411,7 @@ describe('useQwenAuth', () => {
|
||||
expect(geminiResult.current.qwenAuthState.authStatus).toBe('idle');
|
||||
|
||||
const { result: oauthResult } = renderHook(() =>
|
||||
useQwenAuth(AuthType.LOGIN_WITH_GOOGLE, true),
|
||||
useQwenAuth(AuthType.USE_OPENAI, true),
|
||||
);
|
||||
expect(oauthResult.current.qwenAuthState.authStatus).toBe('idle');
|
||||
});
|
||||
|
||||
@@ -62,7 +62,7 @@ const mockConfig = {
|
||||
getAllowedTools: vi.fn(() => []),
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getUseSmartEdit: () => false,
|
||||
getUseModelRouter: () => false,
|
||||
|
||||
@@ -76,6 +76,105 @@ describe('getGitHubRepoInfo', async () => {
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
// Tests for credential formats
|
||||
|
||||
it('returns the owner and repo for URL with classic PAT token (ghp_)', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://ghp_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@github.com/owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
it('returns the owner and repo for URL with fine-grained PAT token (github_pat_)', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://github_pat_xxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@github.com/owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
it('returns the owner and repo for URL with username:password format', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://username:password@github.com/owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
it('returns the owner and repo for URL with OAuth token (oauth2:token)', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://oauth2:gho_xxxxxxxxxxxx@github.com/owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
it('returns the owner and repo for URL with GitHub Actions token (x-access-token)', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://x-access-token:ghs_xxxxxxxxxxxx@github.com/owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
// Tests for case insensitivity
|
||||
|
||||
it('returns the owner and repo for URL with uppercase GITHUB.COM', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://GITHUB.COM/owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
it('returns the owner and repo for URL with mixed case GitHub.Com', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://GitHub.Com/owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
// Tests for SSH format
|
||||
|
||||
it('returns the owner and repo for SSH URL', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'git@github.com:owner/repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
it('throws for non-GitHub SSH URL', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'git@gitlab.com:owner/repo.git',
|
||||
);
|
||||
expect(() => {
|
||||
getGitHubRepoInfo();
|
||||
}).toThrowError(/Owner & repo could not be extracted from remote URL/);
|
||||
});
|
||||
|
||||
// Tests for edge cases
|
||||
|
||||
it('returns the owner and repo for URL without .git suffix', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://github.com/owner/repo',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({ owner: 'owner', repo: 'repo' });
|
||||
});
|
||||
|
||||
it('throws for non-GitHub HTTPS URL', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://gitlab.com/owner/repo.git',
|
||||
);
|
||||
expect(() => {
|
||||
getGitHubRepoInfo();
|
||||
}).toThrowError(/Owner & repo could not be extracted from remote URL/);
|
||||
});
|
||||
|
||||
it('handles repo names containing .git substring', async () => {
|
||||
vi.mocked(child_process.execSync).mockReturnValueOnce(
|
||||
'https://github.com/owner/my.git.repo.git',
|
||||
);
|
||||
expect(getGitHubRepoInfo()).toEqual({
|
||||
owner: 'owner',
|
||||
repo: 'my.git.repo',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getGitRepoRoot', async () => {
|
||||
|
||||
@@ -103,17 +103,38 @@ export function getGitHubRepoInfo(): { owner: string; repo: string } {
|
||||
encoding: 'utf-8',
|
||||
}).trim();
|
||||
|
||||
// Matches either https://github.com/owner/repo.git or git@github.com:owner/repo.git
|
||||
const match = remoteUrl.match(
|
||||
/(?:https?:\/\/|git@)github\.com(?::|\/)([^/]+)\/([^/]+?)(?:\.git)?$/,
|
||||
);
|
||||
|
||||
// If the regex fails match, throw an error.
|
||||
if (!match || !match[1] || !match[2]) {
|
||||
// Handle SCP-style SSH URLs (git@github.com:owner/repo.git)
|
||||
let urlToParse = remoteUrl;
|
||||
if (remoteUrl.startsWith('git@github.com:')) {
|
||||
urlToParse = remoteUrl.replace('git@github.com:', '');
|
||||
} else if (remoteUrl.startsWith('git@')) {
|
||||
// SSH URL for a different provider (GitLab, Bitbucket, etc.)
|
||||
throw new Error(
|
||||
`Owner & repo could not be extracted from remote URL: ${remoteUrl}`,
|
||||
);
|
||||
}
|
||||
|
||||
return { owner: match[1], repo: match[2] };
|
||||
let parsedUrl: URL;
|
||||
try {
|
||||
parsedUrl = new URL(urlToParse, 'https://github.com');
|
||||
} catch {
|
||||
throw new Error(
|
||||
`Owner & repo could not be extracted from remote URL: ${remoteUrl}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (parsedUrl.host !== 'github.com') {
|
||||
throw new Error(
|
||||
`Owner & repo could not be extracted from remote URL: ${remoteUrl}`,
|
||||
);
|
||||
}
|
||||
|
||||
const parts = parsedUrl.pathname.split('/').filter((part) => part !== '');
|
||||
if (parts.length !== 2 || !parts[0] || !parts[1]) {
|
||||
throw new Error(
|
||||
`Owner & repo could not be extracted from remote URL: ${remoteUrl}`,
|
||||
);
|
||||
}
|
||||
|
||||
return { owner: parts[0], repo: parts[1].replace(/\.git$/, '') };
|
||||
}
|
||||
|
||||
@@ -21,6 +21,13 @@ function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
return AuthType.QWEN_OAUTH;
|
||||
}
|
||||
|
||||
if (process.env['GEMINI_API_KEY']) {
|
||||
return AuthType.USE_GEMINI;
|
||||
}
|
||||
if (process.env['GOOGLE_API_KEY']) {
|
||||
return AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.5.1",
|
||||
"version": "0.6.0",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -23,8 +23,8 @@
|
||||
"scripts/postinstall.js"
|
||||
],
|
||||
"dependencies": {
|
||||
"@google/genai": "1.16.0",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@google/genai": "1.30.0",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"async-mutex": "^0.5.0",
|
||||
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
|
||||
@@ -34,7 +34,6 @@
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "^0.203.0",
|
||||
"@opentelemetry/exporter-trace-otlp-http": "^0.203.0",
|
||||
"@opentelemetry/instrumentation-http": "^0.203.0",
|
||||
"@opentelemetry/resource-detector-gcp": "^0.40.0",
|
||||
"@opentelemetry/sdk-node": "^0.203.0",
|
||||
"@types/html-to-text": "^9.0.4",
|
||||
"@xterm/headless": "5.5.0",
|
||||
@@ -48,7 +47,7 @@
|
||||
"fdir": "^6.4.6",
|
||||
"fzf": "^0.5.2",
|
||||
"glob": "^10.5.0",
|
||||
"google-auth-library": "^9.11.0",
|
||||
"google-auth-library": "^10.5.0",
|
||||
"html-to-text": "^9.0.5",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ignore": "^7.0.0",
|
||||
@@ -63,7 +62,7 @@
|
||||
"simple-git": "^3.28.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"tiktoken": "^1.0.21",
|
||||
"undici": "^7.10.0",
|
||||
"undici": "^6.22.0",
|
||||
"uuid": "^9.0.1",
|
||||
"ws": "^8.18.0"
|
||||
},
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { ContentGenerator } from '../core/contentGenerator.js';
|
||||
import { AuthType } from '../core/contentGenerator.js';
|
||||
import { getOauthClient } from './oauth2.js';
|
||||
import { setupUser } from './setup.js';
|
||||
import type { HttpOptions } from './server.js';
|
||||
import { CodeAssistServer } from './server.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from '../core/loggingContentGenerator.js';
|
||||
|
||||
export async function createCodeAssistContentGenerator(
|
||||
httpOptions: HttpOptions,
|
||||
authType: AuthType,
|
||||
config: Config,
|
||||
sessionId?: string,
|
||||
): Promise<ContentGenerator> {
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
const authClient = await getOauthClient(authType, config);
|
||||
const userData = await setupUser(authClient);
|
||||
return new CodeAssistServer(
|
||||
authClient,
|
||||
userData.projectId,
|
||||
httpOptions,
|
||||
sessionId,
|
||||
userData.userTier,
|
||||
);
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported authType: ${authType}`);
|
||||
}
|
||||
|
||||
export function getCodeAssistServer(
|
||||
config: Config,
|
||||
): CodeAssistServer | undefined {
|
||||
let server = config.getContentGenerator();
|
||||
|
||||
// Unwrap LoggingContentGenerator if present
|
||||
if (server instanceof LoggingContentGenerator) {
|
||||
server = server.getWrapped();
|
||||
}
|
||||
|
||||
if (!(server instanceof CodeAssistServer)) {
|
||||
return undefined;
|
||||
}
|
||||
return server;
|
||||
}
|
||||
@@ -1,456 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import type { CaGenerateContentResponse } from './converter.js';
|
||||
import {
|
||||
toGenerateContentRequest,
|
||||
fromGenerateContentResponse,
|
||||
toContents,
|
||||
} from './converter.js';
|
||||
import type {
|
||||
ContentListUnion,
|
||||
GenerateContentParameters,
|
||||
} from '@google/genai';
|
||||
import {
|
||||
GenerateContentResponse,
|
||||
FinishReason,
|
||||
BlockedReason,
|
||||
type Part,
|
||||
} from '@google/genai';
|
||||
|
||||
describe('converter', () => {
|
||||
describe('toCodeAssistRequest', () => {
|
||||
it('should convert a simple request with project', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq).toEqual({
|
||||
model: 'gemini-pro',
|
||||
project: 'my-project',
|
||||
request: {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
systemInstruction: undefined,
|
||||
cachedContent: undefined,
|
||||
tools: undefined,
|
||||
toolConfig: undefined,
|
||||
labels: undefined,
|
||||
safetySettings: undefined,
|
||||
generationConfig: undefined,
|
||||
session_id: 'my-session',
|
||||
},
|
||||
user_prompt_id: 'my-prompt',
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert a request without a project', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
undefined,
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq).toEqual({
|
||||
model: 'gemini-pro',
|
||||
project: undefined,
|
||||
request: {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
systemInstruction: undefined,
|
||||
cachedContent: undefined,
|
||||
tools: undefined,
|
||||
toolConfig: undefined,
|
||||
labels: undefined,
|
||||
safetySettings: undefined,
|
||||
generationConfig: undefined,
|
||||
session_id: 'my-session',
|
||||
},
|
||||
user_prompt_id: 'my-prompt',
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert a request with sessionId', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'session-123',
|
||||
);
|
||||
expect(codeAssistReq).toEqual({
|
||||
model: 'gemini-pro',
|
||||
project: 'my-project',
|
||||
request: {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
systemInstruction: undefined,
|
||||
cachedContent: undefined,
|
||||
tools: undefined,
|
||||
toolConfig: undefined,
|
||||
labels: undefined,
|
||||
safetySettings: undefined,
|
||||
generationConfig: undefined,
|
||||
session_id: 'session-123',
|
||||
},
|
||||
user_prompt_id: 'my-prompt',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle string content', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.contents).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'Hello' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle Part[] content', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ text: 'Hello' }, { text: 'World' }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.contents).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'Hello' }] },
|
||||
{ role: 'user', parts: [{ text: 'World' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle system instructions', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
systemInstruction: 'You are a helpful assistant.',
|
||||
},
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.systemInstruction).toEqual({
|
||||
role: 'user',
|
||||
parts: [{ text: 'You are a helpful assistant.' }],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle generation config', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
temperature: 0.8,
|
||||
topK: 40,
|
||||
},
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.generationConfig).toEqual({
|
||||
temperature: 0.8,
|
||||
topK: 40,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle all generation config fields', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
temperature: 0.1,
|
||||
topP: 0.2,
|
||||
topK: 3,
|
||||
candidateCount: 4,
|
||||
maxOutputTokens: 5,
|
||||
stopSequences: ['a'],
|
||||
responseLogprobs: true,
|
||||
logprobs: 6,
|
||||
presencePenalty: 0.7,
|
||||
frequencyPenalty: 0.8,
|
||||
seed: 9,
|
||||
responseMimeType: 'application/json',
|
||||
},
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.generationConfig).toEqual({
|
||||
temperature: 0.1,
|
||||
topP: 0.2,
|
||||
topK: 3,
|
||||
candidateCount: 4,
|
||||
maxOutputTokens: 5,
|
||||
stopSequences: ['a'],
|
||||
responseLogprobs: true,
|
||||
logprobs: 6,
|
||||
presencePenalty: 0.7,
|
||||
frequencyPenalty: 0.8,
|
||||
seed: 9,
|
||||
responseMimeType: 'application/json',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('fromCodeAssistResponse', () => {
|
||||
it('should convert a simple response', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'Hi there!' }],
|
||||
},
|
||||
finishReason: FinishReason.STOP,
|
||||
safetyRatings: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes).toBeInstanceOf(GenerateContentResponse);
|
||||
expect(genaiRes.candidates).toEqual(codeAssistRes.response.candidates);
|
||||
});
|
||||
|
||||
it('should handle prompt feedback and usage metadata', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [],
|
||||
promptFeedback: {
|
||||
blockReason: BlockedReason.SAFETY,
|
||||
safetyRatings: [],
|
||||
},
|
||||
usageMetadata: {
|
||||
promptTokenCount: 10,
|
||||
candidatesTokenCount: 20,
|
||||
totalTokenCount: 30,
|
||||
},
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes.promptFeedback).toEqual(
|
||||
codeAssistRes.response.promptFeedback,
|
||||
);
|
||||
expect(genaiRes.usageMetadata).toEqual(
|
||||
codeAssistRes.response.usageMetadata,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle automatic function calling history', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [],
|
||||
automaticFunctionCallingHistory: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: {
|
||||
name: 'test_function',
|
||||
args: {
|
||||
foo: 'bar',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes.automaticFunctionCallingHistory).toEqual(
|
||||
codeAssistRes.response.automaticFunctionCallingHistory,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle modelVersion', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [],
|
||||
modelVersion: 'qwen3-coder-plus',
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes.modelVersion).toEqual('qwen3-coder-plus');
|
||||
});
|
||||
});
|
||||
|
||||
describe('toContents', () => {
|
||||
it('should handle Content', () => {
|
||||
const content: ContentListUnion = {
|
||||
role: 'user',
|
||||
parts: [{ text: 'hello' }],
|
||||
};
|
||||
expect(toContents(content)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'hello' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle array of Contents', () => {
|
||||
const contents: ContentListUnion = [
|
||||
{ role: 'user', parts: [{ text: 'hello' }] },
|
||||
{ role: 'model', parts: [{ text: 'hi' }] },
|
||||
];
|
||||
expect(toContents(contents)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'hello' }] },
|
||||
{ role: 'model', parts: [{ text: 'hi' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle Part', () => {
|
||||
const part: ContentListUnion = { text: 'a part' };
|
||||
expect(toContents(part)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'a part' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle array of Parts', () => {
|
||||
const parts = [{ text: 'part 1' }, 'part 2'];
|
||||
expect(toContents(parts)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'part 1' }] },
|
||||
{ role: 'user', parts: [{ text: 'part 2' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle string', () => {
|
||||
const str: ContentListUnion = 'a string';
|
||||
expect(toContents(str)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'a string' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle array of strings', () => {
|
||||
const strings: ContentListUnion = ['string 1', 'string 2'];
|
||||
expect(toContents(strings)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'string 1' }] },
|
||||
{ role: 'user', parts: [{ text: 'string 2' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should convert thought parts to text parts for API compatibility', () => {
|
||||
const contentWithThought: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'regular text' },
|
||||
{ thought: 'thinking about the problem' } as Part & {
|
||||
thought: string;
|
||||
},
|
||||
{ text: 'more text' },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithThought)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'regular text' },
|
||||
{ text: '[Thought: thinking about the problem]' },
|
||||
{ text: 'more text' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should combine text and thought for text parts with thoughts', () => {
|
||||
const contentWithTextAndThought: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 'Here is my response',
|
||||
thought: 'I need to be careful here',
|
||||
} as Part & { thought: string },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithTextAndThought)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 'Here is my response\n[Thought: I need to be careful here]',
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should preserve non-thought properties while removing thought', () => {
|
||||
const contentWithComplexPart: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: { name: 'calculate', args: { x: 5, y: 10 } },
|
||||
thought: 'Performing calculation',
|
||||
} as Part & { thought: string },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithComplexPart)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: { name: 'calculate', args: { x: 5, y: 10 } },
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should convert invalid text content to valid text part with thought', () => {
|
||||
const contentWithInvalidText: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 123, // Invalid - should be string
|
||||
thought: 'Processing number',
|
||||
} as Part & { thought: string; text: number },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithInvalidText)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: '123\n[Thought: Processing number]',
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,285 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Content,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
GenerateContentConfig,
|
||||
GenerateContentParameters,
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
GenerationConfigRoutingConfig,
|
||||
MediaResolution,
|
||||
Candidate,
|
||||
ModelSelectionConfig,
|
||||
GenerateContentResponsePromptFeedback,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
Part,
|
||||
SafetySetting,
|
||||
PartUnion,
|
||||
SpeechConfigUnion,
|
||||
ThinkingConfig,
|
||||
ToolListUnion,
|
||||
ToolConfig,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
|
||||
export interface CAGenerateContentRequest {
|
||||
model: string;
|
||||
project?: string;
|
||||
user_prompt_id?: string;
|
||||
request: VertexGenerateContentRequest;
|
||||
}
|
||||
|
||||
interface VertexGenerateContentRequest {
|
||||
contents: Content[];
|
||||
systemInstruction?: Content;
|
||||
cachedContent?: string;
|
||||
tools?: ToolListUnion;
|
||||
toolConfig?: ToolConfig;
|
||||
labels?: Record<string, string>;
|
||||
safetySettings?: SafetySetting[];
|
||||
generationConfig?: VertexGenerationConfig;
|
||||
session_id?: string;
|
||||
}
|
||||
|
||||
interface VertexGenerationConfig {
|
||||
temperature?: number;
|
||||
topP?: number;
|
||||
topK?: number;
|
||||
candidateCount?: number;
|
||||
maxOutputTokens?: number;
|
||||
stopSequences?: string[];
|
||||
responseLogprobs?: boolean;
|
||||
logprobs?: number;
|
||||
presencePenalty?: number;
|
||||
frequencyPenalty?: number;
|
||||
seed?: number;
|
||||
responseMimeType?: string;
|
||||
responseJsonSchema?: unknown;
|
||||
responseSchema?: unknown;
|
||||
routingConfig?: GenerationConfigRoutingConfig;
|
||||
modelSelectionConfig?: ModelSelectionConfig;
|
||||
responseModalities?: string[];
|
||||
mediaResolution?: MediaResolution;
|
||||
speechConfig?: SpeechConfigUnion;
|
||||
audioTimestamp?: boolean;
|
||||
thinkingConfig?: ThinkingConfig;
|
||||
}
|
||||
|
||||
export interface CaGenerateContentResponse {
|
||||
response: VertexGenerateContentResponse;
|
||||
}
|
||||
|
||||
interface VertexGenerateContentResponse {
|
||||
candidates: Candidate[];
|
||||
automaticFunctionCallingHistory?: Content[];
|
||||
promptFeedback?: GenerateContentResponsePromptFeedback;
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata;
|
||||
modelVersion?: string;
|
||||
}
|
||||
|
||||
export interface CaCountTokenRequest {
|
||||
request: VertexCountTokenRequest;
|
||||
}
|
||||
|
||||
interface VertexCountTokenRequest {
|
||||
model: string;
|
||||
contents: Content[];
|
||||
}
|
||||
|
||||
export interface CaCountTokenResponse {
|
||||
totalTokens: number;
|
||||
}
|
||||
|
||||
export function toCountTokenRequest(
|
||||
req: CountTokensParameters,
|
||||
): CaCountTokenRequest {
|
||||
return {
|
||||
request: {
|
||||
model: 'models/' + req.model,
|
||||
contents: toContents(req.contents),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function fromCountTokenResponse(
|
||||
res: CaCountTokenResponse,
|
||||
): CountTokensResponse {
|
||||
return {
|
||||
totalTokens: res.totalTokens,
|
||||
};
|
||||
}
|
||||
|
||||
export function toGenerateContentRequest(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
project?: string,
|
||||
sessionId?: string,
|
||||
): CAGenerateContentRequest {
|
||||
return {
|
||||
model: req.model,
|
||||
project,
|
||||
user_prompt_id: userPromptId,
|
||||
request: toVertexGenerateContentRequest(req, sessionId),
|
||||
};
|
||||
}
|
||||
|
||||
export function fromGenerateContentResponse(
|
||||
res: CaGenerateContentResponse,
|
||||
): GenerateContentResponse {
|
||||
const inres = res.response;
|
||||
const out = new GenerateContentResponse();
|
||||
out.candidates = inres.candidates;
|
||||
out.automaticFunctionCallingHistory = inres.automaticFunctionCallingHistory;
|
||||
out.promptFeedback = inres.promptFeedback;
|
||||
out.usageMetadata = inres.usageMetadata;
|
||||
out.modelVersion = inres.modelVersion;
|
||||
return out;
|
||||
}
|
||||
|
||||
function toVertexGenerateContentRequest(
|
||||
req: GenerateContentParameters,
|
||||
sessionId?: string,
|
||||
): VertexGenerateContentRequest {
|
||||
return {
|
||||
contents: toContents(req.contents),
|
||||
systemInstruction: maybeToContent(req.config?.systemInstruction),
|
||||
cachedContent: req.config?.cachedContent,
|
||||
tools: req.config?.tools,
|
||||
toolConfig: req.config?.toolConfig,
|
||||
labels: req.config?.labels,
|
||||
safetySettings: req.config?.safetySettings,
|
||||
generationConfig: toVertexGenerationConfig(req.config),
|
||||
session_id: sessionId,
|
||||
};
|
||||
}
|
||||
|
||||
export function toContents(contents: ContentListUnion): Content[] {
|
||||
if (Array.isArray(contents)) {
|
||||
// it's a Content[] or a PartsUnion[]
|
||||
return contents.map(toContent);
|
||||
}
|
||||
// it's a Content or a PartsUnion
|
||||
return [toContent(contents)];
|
||||
}
|
||||
|
||||
function maybeToContent(content?: ContentUnion): Content | undefined {
|
||||
if (!content) {
|
||||
return undefined;
|
||||
}
|
||||
return toContent(content);
|
||||
}
|
||||
|
||||
function toContent(content: ContentUnion): Content {
|
||||
if (Array.isArray(content)) {
|
||||
// it's a PartsUnion[]
|
||||
return {
|
||||
role: 'user',
|
||||
parts: toParts(content),
|
||||
};
|
||||
}
|
||||
if (typeof content === 'string') {
|
||||
// it's a string
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [{ text: content }],
|
||||
};
|
||||
}
|
||||
if ('parts' in content) {
|
||||
// it's a Content - process parts to handle thought filtering
|
||||
return {
|
||||
...content,
|
||||
parts: content.parts
|
||||
? toParts(content.parts.filter((p) => p != null))
|
||||
: [],
|
||||
};
|
||||
}
|
||||
// it's a Part
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [toPart(content as Part)],
|
||||
};
|
||||
}
|
||||
|
||||
export function toParts(parts: PartUnion[]): Part[] {
|
||||
return parts.map(toPart);
|
||||
}
|
||||
|
||||
function toPart(part: PartUnion): Part {
|
||||
if (typeof part === 'string') {
|
||||
// it's a string
|
||||
return { text: part };
|
||||
}
|
||||
|
||||
// Handle thought parts for CountToken API compatibility
|
||||
// The CountToken API expects parts to have certain required "oneof" fields initialized,
|
||||
// but thought parts don't conform to this schema and cause API failures
|
||||
if ('thought' in part && part.thought) {
|
||||
const thoughtText = `[Thought: ${part.thought}]`;
|
||||
|
||||
const newPart = { ...part };
|
||||
delete (newPart as Record<string, unknown>)['thought'];
|
||||
|
||||
const hasApiContent =
|
||||
'functionCall' in newPart ||
|
||||
'functionResponse' in newPart ||
|
||||
'inlineData' in newPart ||
|
||||
'fileData' in newPart;
|
||||
|
||||
if (hasApiContent) {
|
||||
// It's a functionCall or other non-text part. Just strip the thought.
|
||||
return newPart;
|
||||
}
|
||||
|
||||
// If no other valid API content, this must be a text part.
|
||||
// Combine existing text (if any) with the thought, preserving other properties.
|
||||
const text = (newPart as { text?: unknown }).text;
|
||||
const existingText = text ? String(text) : '';
|
||||
const combinedText = existingText
|
||||
? `${existingText}\n${thoughtText}`
|
||||
: thoughtText;
|
||||
|
||||
return {
|
||||
...newPart,
|
||||
text: combinedText,
|
||||
};
|
||||
}
|
||||
|
||||
return part;
|
||||
}
|
||||
|
||||
function toVertexGenerationConfig(
|
||||
config?: GenerateContentConfig,
|
||||
): VertexGenerationConfig | undefined {
|
||||
if (!config) {
|
||||
return undefined;
|
||||
}
|
||||
return {
|
||||
temperature: config.temperature,
|
||||
topP: config.topP,
|
||||
topK: config.topK,
|
||||
candidateCount: config.candidateCount,
|
||||
maxOutputTokens: config.maxOutputTokens,
|
||||
stopSequences: config.stopSequences,
|
||||
responseLogprobs: config.responseLogprobs,
|
||||
logprobs: config.logprobs,
|
||||
presencePenalty: config.presencePenalty,
|
||||
frequencyPenalty: config.frequencyPenalty,
|
||||
seed: config.seed,
|
||||
responseMimeType: config.responseMimeType,
|
||||
responseSchema: config.responseSchema,
|
||||
responseJsonSchema: config.responseJsonSchema,
|
||||
routingConfig: config.routingConfig,
|
||||
modelSelectionConfig: config.modelSelectionConfig,
|
||||
responseModalities: config.responseModalities,
|
||||
mediaResolution: config.mediaResolution,
|
||||
speechConfig: config.speechConfig,
|
||||
audioTimestamp: config.audioTimestamp,
|
||||
thinkingConfig: config.thinkingConfig,
|
||||
};
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { type Credentials } from 'google-auth-library';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { OAuthCredentialStorage } from './oauth-credential-storage.js';
|
||||
import type { OAuthCredentials } from '../mcp/token-storage/types.js';
|
||||
|
||||
import * as path from 'node:path';
|
||||
import * as os from 'node:os';
|
||||
import { promises as fs } from 'node:fs';
|
||||
|
||||
// Mock external dependencies
|
||||
const mockHybridTokenStorage = vi.hoisted(() => ({
|
||||
getCredentials: vi.fn(),
|
||||
setCredentials: vi.fn(),
|
||||
deleteCredentials: vi.fn(),
|
||||
}));
|
||||
vi.mock('../mcp/token-storage/hybrid-token-storage.js', () => ({
|
||||
HybridTokenStorage: vi.fn(() => mockHybridTokenStorage),
|
||||
}));
|
||||
vi.mock('node:fs', () => ({
|
||||
promises: {
|
||||
readFile: vi.fn(),
|
||||
rm: vi.fn(),
|
||||
},
|
||||
}));
|
||||
vi.mock('node:os');
|
||||
vi.mock('node:path');
|
||||
|
||||
describe('OAuthCredentialStorage', () => {
|
||||
const mockCredentials: Credentials = {
|
||||
access_token: 'mock_access_token',
|
||||
refresh_token: 'mock_refresh_token',
|
||||
expiry_date: Date.now() + 3600 * 1000,
|
||||
token_type: 'Bearer',
|
||||
scope: 'email profile',
|
||||
};
|
||||
|
||||
const mockMcpCredentials: OAuthCredentials = {
|
||||
serverName: 'main-account',
|
||||
token: {
|
||||
accessToken: 'mock_access_token',
|
||||
refreshToken: 'mock_refresh_token',
|
||||
tokenType: 'Bearer',
|
||||
scope: 'email profile',
|
||||
expiresAt: mockCredentials.expiry_date!,
|
||||
},
|
||||
updatedAt: expect.any(Number),
|
||||
};
|
||||
|
||||
const oldFilePath = '/mock/home/.qwen/oauth.json';
|
||||
|
||||
beforeEach(() => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(null);
|
||||
vi.spyOn(mockHybridTokenStorage, 'setCredentials').mockResolvedValue(
|
||||
undefined,
|
||||
);
|
||||
vi.spyOn(mockHybridTokenStorage, 'deleteCredentials').mockResolvedValue(
|
||||
undefined,
|
||||
);
|
||||
|
||||
vi.spyOn(fs, 'readFile').mockRejectedValue(new Error('File not found'));
|
||||
vi.spyOn(fs, 'rm').mockResolvedValue(undefined);
|
||||
|
||||
vi.spyOn(os, 'homedir').mockReturnValue('/mock/home');
|
||||
vi.spyOn(path, 'join').mockReturnValue(oldFilePath);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('loadCredentials', () => {
|
||||
it('should load credentials from HybridTokenStorage if available', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
mockMcpCredentials,
|
||||
);
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(mockHybridTokenStorage.getCredentials).toHaveBeenCalledWith(
|
||||
'main-account',
|
||||
);
|
||||
expect(result).toEqual(mockCredentials);
|
||||
});
|
||||
|
||||
it('should fallback to migrateFromFileStorage if no credentials in HybridTokenStorage', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
null,
|
||||
);
|
||||
vi.spyOn(fs, 'readFile').mockResolvedValue(
|
||||
JSON.stringify(mockCredentials),
|
||||
);
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(mockHybridTokenStorage.getCredentials).toHaveBeenCalledWith(
|
||||
'main-account',
|
||||
);
|
||||
expect(fs.readFile).toHaveBeenCalledWith(oldFilePath, 'utf-8');
|
||||
expect(mockHybridTokenStorage.setCredentials).toHaveBeenCalled(); // Verify credentials were saved
|
||||
expect(fs.rm).toHaveBeenCalledWith(oldFilePath, { force: true }); // Verify old file was removed
|
||||
expect(result).toEqual(mockCredentials);
|
||||
});
|
||||
|
||||
it('should return null if no credentials found and no old file to migrate', async () => {
|
||||
vi.spyOn(fs, 'readFile').mockRejectedValue({
|
||||
message: 'File not found',
|
||||
code: 'ENOENT',
|
||||
});
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should throw an error if loading fails', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockRejectedValue(
|
||||
new Error('Loading error'),
|
||||
);
|
||||
|
||||
await expect(OAuthCredentialStorage.loadCredentials()).rejects.toThrow(
|
||||
'Failed to load OAuth credentials',
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error if read file fails', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
null,
|
||||
);
|
||||
vi.spyOn(fs, 'readFile').mockRejectedValue(
|
||||
new Error('Permission denied'),
|
||||
);
|
||||
|
||||
await expect(OAuthCredentialStorage.loadCredentials()).rejects.toThrow(
|
||||
'Failed to load OAuth credentials',
|
||||
);
|
||||
});
|
||||
|
||||
it('should not throw error if migration file removal failed', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
null,
|
||||
);
|
||||
vi.spyOn(fs, 'readFile').mockResolvedValue(
|
||||
JSON.stringify(mockCredentials),
|
||||
);
|
||||
vi.spyOn(OAuthCredentialStorage, 'saveCredentials').mockResolvedValue(
|
||||
undefined,
|
||||
);
|
||||
vi.spyOn(fs, 'rm').mockRejectedValue(new Error('Deletion failed'));
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(result).toEqual(mockCredentials);
|
||||
});
|
||||
});
|
||||
|
||||
describe('saveCredentials', () => {
|
||||
it('should save credentials to HybridTokenStorage', async () => {
|
||||
await OAuthCredentialStorage.saveCredentials(mockCredentials);
|
||||
|
||||
expect(mockHybridTokenStorage.setCredentials).toHaveBeenCalledWith(
|
||||
mockMcpCredentials,
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error if access_token is missing', async () => {
|
||||
const invalidCredentials: Credentials = {
|
||||
...mockCredentials,
|
||||
access_token: undefined,
|
||||
};
|
||||
await expect(
|
||||
OAuthCredentialStorage.saveCredentials(invalidCredentials),
|
||||
).rejects.toThrow(
|
||||
'Attempted to save credentials without an access token.',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('clearCredentials', () => {
|
||||
it('should delete credentials from HybridTokenStorage', async () => {
|
||||
await OAuthCredentialStorage.clearCredentials();
|
||||
|
||||
expect(mockHybridTokenStorage.deleteCredentials).toHaveBeenCalledWith(
|
||||
'main-account',
|
||||
);
|
||||
});
|
||||
|
||||
it('should attempt to remove the old file-based storage', async () => {
|
||||
await OAuthCredentialStorage.clearCredentials();
|
||||
|
||||
expect(fs.rm).toHaveBeenCalledWith(oldFilePath, { force: true });
|
||||
});
|
||||
|
||||
it('should not throw an error if deleting old file fails', async () => {
|
||||
vi.spyOn(fs, 'rm').mockRejectedValue(new Error('File deletion failed'));
|
||||
|
||||
await expect(
|
||||
OAuthCredentialStorage.clearCredentials(),
|
||||
).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it('should throw an error if clearing from HybridTokenStorage fails', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'deleteCredentials').mockRejectedValue(
|
||||
new Error('Deletion error'),
|
||||
);
|
||||
|
||||
await expect(OAuthCredentialStorage.clearCredentials()).rejects.toThrow(
|
||||
'Failed to clear OAuth credentials',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,130 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { type Credentials } from 'google-auth-library';
|
||||
import { HybridTokenStorage } from '../mcp/token-storage/hybrid-token-storage.js';
|
||||
import { OAUTH_FILE } from '../config/storage.js';
|
||||
import type { OAuthCredentials } from '../mcp/token-storage/types.js';
|
||||
import * as path from 'node:path';
|
||||
import * as os from 'node:os';
|
||||
import { promises as fs } from 'node:fs';
|
||||
|
||||
const QWEN_DIR = '.qwen';
|
||||
const KEYCHAIN_SERVICE_NAME = 'qwen-code-oauth';
|
||||
const MAIN_ACCOUNT_KEY = 'main-account';
|
||||
|
||||
export class OAuthCredentialStorage {
|
||||
private static storage: HybridTokenStorage = new HybridTokenStorage(
|
||||
KEYCHAIN_SERVICE_NAME,
|
||||
);
|
||||
|
||||
/**
|
||||
* Load cached OAuth credentials
|
||||
*/
|
||||
static async loadCredentials(): Promise<Credentials | null> {
|
||||
try {
|
||||
const credentials = await this.storage.getCredentials(MAIN_ACCOUNT_KEY);
|
||||
|
||||
if (credentials?.token) {
|
||||
const { accessToken, refreshToken, expiresAt, tokenType, scope } =
|
||||
credentials.token;
|
||||
// Convert from OAuthCredentials format to Google Credentials format
|
||||
const googleCreds: Credentials = {
|
||||
access_token: accessToken,
|
||||
refresh_token: refreshToken || undefined,
|
||||
token_type: tokenType || undefined,
|
||||
scope: scope || undefined,
|
||||
};
|
||||
|
||||
if (expiresAt) {
|
||||
googleCreds.expiry_date = expiresAt;
|
||||
}
|
||||
|
||||
return googleCreds;
|
||||
}
|
||||
|
||||
// Fallback: Try to migrate from old file-based storage
|
||||
return await this.migrateFromFileStorage();
|
||||
} catch (error: unknown) {
|
||||
console.error(error);
|
||||
throw new Error('Failed to load OAuth credentials');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save OAuth credentials
|
||||
*/
|
||||
static async saveCredentials(credentials: Credentials): Promise<void> {
|
||||
if (!credentials.access_token) {
|
||||
throw new Error('Attempted to save credentials without an access token.');
|
||||
}
|
||||
|
||||
// Convert Google Credentials to OAuthCredentials format
|
||||
const mcpCredentials: OAuthCredentials = {
|
||||
serverName: MAIN_ACCOUNT_KEY,
|
||||
token: {
|
||||
accessToken: credentials.access_token,
|
||||
refreshToken: credentials.refresh_token || undefined,
|
||||
tokenType: credentials.token_type || 'Bearer',
|
||||
scope: credentials.scope || undefined,
|
||||
expiresAt: credentials.expiry_date || undefined,
|
||||
},
|
||||
updatedAt: Date.now(),
|
||||
};
|
||||
|
||||
await this.storage.setCredentials(mcpCredentials);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached OAuth credentials
|
||||
*/
|
||||
static async clearCredentials(): Promise<void> {
|
||||
try {
|
||||
await this.storage.deleteCredentials(MAIN_ACCOUNT_KEY);
|
||||
|
||||
// Also try to remove the old file if it exists
|
||||
const oldFilePath = path.join(os.homedir(), QWEN_DIR, OAUTH_FILE);
|
||||
await fs.rm(oldFilePath, { force: true }).catch(() => {});
|
||||
} catch (error: unknown) {
|
||||
console.error(error);
|
||||
throw new Error('Failed to clear OAuth credentials');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrate credentials from old file-based storage to keychain
|
||||
*/
|
||||
private static async migrateFromFileStorage(): Promise<Credentials | null> {
|
||||
const oldFilePath = path.join(os.homedir(), QWEN_DIR, OAUTH_FILE);
|
||||
|
||||
let credsJson: string;
|
||||
try {
|
||||
credsJson = await fs.readFile(oldFilePath, 'utf-8');
|
||||
} catch (error: unknown) {
|
||||
if (
|
||||
typeof error === 'object' &&
|
||||
error !== null &&
|
||||
'code' in error &&
|
||||
error.code === 'ENOENT'
|
||||
) {
|
||||
// File doesn't exist, so no migration.
|
||||
return null;
|
||||
}
|
||||
// Other read errors should propagate.
|
||||
throw error;
|
||||
}
|
||||
|
||||
const credentials = JSON.parse(credsJson) as Credentials;
|
||||
|
||||
// Save to new storage
|
||||
await this.saveCredentials(credentials);
|
||||
|
||||
// Remove old file after successful migration
|
||||
await fs.rm(oldFilePath, { force: true }).catch(() => {});
|
||||
|
||||
return credentials;
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,563 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Credentials } from 'google-auth-library';
|
||||
import {
|
||||
CodeChallengeMethod,
|
||||
Compute,
|
||||
OAuth2Client,
|
||||
} from 'google-auth-library';
|
||||
import crypto from 'node:crypto';
|
||||
import { promises as fs } from 'node:fs';
|
||||
import * as http from 'node:http';
|
||||
import * as net from 'node:net';
|
||||
import path from 'node:path';
|
||||
import readline from 'node:readline';
|
||||
import url from 'node:url';
|
||||
import open from 'open';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { Storage } from '../config/storage.js';
|
||||
import { AuthType } from '../core/contentGenerator.js';
|
||||
import { FatalAuthenticationError, getErrorMessage } from '../utils/errors.js';
|
||||
import { UserAccountManager } from '../utils/userAccountManager.js';
|
||||
import { OAuthCredentialStorage } from './oauth-credential-storage.js';
|
||||
import { FORCE_ENCRYPTED_FILE_ENV_VAR } from '../mcp/token-storage/index.js';
|
||||
|
||||
const userAccountManager = new UserAccountManager();
|
||||
|
||||
// OAuth Client ID used to initiate OAuth2Client class.
|
||||
const OAUTH_CLIENT_ID =
|
||||
'681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com';
|
||||
|
||||
// OAuth Secret value used to initiate OAuth2Client class.
|
||||
// Note: It's ok to save this in git because this is an installed application
|
||||
// as described here: https://developers.google.com/identity/protocols/oauth2#installed
|
||||
// "The process results in a client ID and, in some cases, a client secret,
|
||||
// which you embed in the source code of your application. (In this context,
|
||||
// the client secret is obviously not treated as a secret.)"
|
||||
const OAUTH_CLIENT_SECRET = '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';
|
||||
|
||||
// OAuth Scopes for Cloud Code authorization.
|
||||
const OAUTH_SCOPE = [
|
||||
'https://www.googleapis.com/auth/cloud-platform',
|
||||
'https://www.googleapis.com/auth/userinfo.email',
|
||||
'https://www.googleapis.com/auth/userinfo.profile',
|
||||
];
|
||||
|
||||
const HTTP_REDIRECT = 301;
|
||||
const SIGN_IN_SUCCESS_URL =
|
||||
'https://developers.google.com/gemini-code-assist/auth_success_gemini';
|
||||
const SIGN_IN_FAILURE_URL =
|
||||
'https://developers.google.com/gemini-code-assist/auth_failure_gemini';
|
||||
|
||||
/**
|
||||
* An Authentication URL for updating the credentials of a Oauth2Client
|
||||
* as well as a promise that will resolve when the credentials have
|
||||
* been refreshed (or which throws error when refreshing credentials failed).
|
||||
*/
|
||||
export interface OauthWebLogin {
|
||||
authUrl: string;
|
||||
loginCompletePromise: Promise<void>;
|
||||
}
|
||||
|
||||
const oauthClientPromises = new Map<AuthType, Promise<OAuth2Client>>();
|
||||
|
||||
function getUseEncryptedStorageFlag() {
|
||||
return process.env[FORCE_ENCRYPTED_FILE_ENV_VAR] === 'true';
|
||||
}
|
||||
|
||||
async function initOauthClient(
|
||||
authType: AuthType,
|
||||
config: Config,
|
||||
): Promise<OAuth2Client> {
|
||||
const client = new OAuth2Client({
|
||||
clientId: OAUTH_CLIENT_ID,
|
||||
clientSecret: OAUTH_CLIENT_SECRET,
|
||||
transporterOptions: {
|
||||
proxy: config.getProxy(),
|
||||
},
|
||||
});
|
||||
const useEncryptedStorage = getUseEncryptedStorageFlag();
|
||||
|
||||
if (
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] &&
|
||||
process.env['GOOGLE_CLOUD_ACCESS_TOKEN']
|
||||
) {
|
||||
client.setCredentials({
|
||||
access_token: process.env['GOOGLE_CLOUD_ACCESS_TOKEN'],
|
||||
});
|
||||
await fetchAndCacheUserInfo(client);
|
||||
return client;
|
||||
}
|
||||
|
||||
client.on('tokens', async (tokens: Credentials) => {
|
||||
if (useEncryptedStorage) {
|
||||
await OAuthCredentialStorage.saveCredentials(tokens);
|
||||
} else {
|
||||
await cacheCredentials(tokens);
|
||||
}
|
||||
});
|
||||
|
||||
// If there are cached creds on disk, they always take precedence
|
||||
if (await loadCachedCredentials(client)) {
|
||||
// Found valid cached credentials.
|
||||
// Check if we need to retrieve Google Account ID or Email
|
||||
if (!userAccountManager.getCachedGoogleAccount()) {
|
||||
try {
|
||||
await fetchAndCacheUserInfo(client);
|
||||
} catch (error) {
|
||||
// Non-fatal, continue with existing auth.
|
||||
console.warn('Failed to fetch user info:', getErrorMessage(error));
|
||||
}
|
||||
}
|
||||
console.log('Loaded cached credentials.');
|
||||
return client;
|
||||
}
|
||||
|
||||
// In Google Cloud Shell, we can use Application Default Credentials (ADC)
|
||||
// provided via its metadata server to authenticate non-interactively using
|
||||
// the identity of the user logged into Cloud Shell.
|
||||
if (authType === AuthType.CLOUD_SHELL) {
|
||||
try {
|
||||
console.log("Attempting to authenticate via Cloud Shell VM's ADC.");
|
||||
const computeClient = new Compute({
|
||||
// We can leave this empty, since the metadata server will provide
|
||||
// the service account email.
|
||||
});
|
||||
await computeClient.getAccessToken();
|
||||
console.log('Authentication successful.');
|
||||
|
||||
// Do not cache creds in this case; note that Compute client will handle its own refresh
|
||||
return computeClient;
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
`Could not authenticate using Cloud Shell credentials. Please select a different authentication method or ensure you are in a properly configured environment. Error: ${getErrorMessage(
|
||||
e,
|
||||
)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (config.isBrowserLaunchSuppressed()) {
|
||||
let success = false;
|
||||
const maxRetries = 2;
|
||||
for (let i = 0; !success && i < maxRetries; i++) {
|
||||
success = await authWithUserCode(client);
|
||||
if (!success) {
|
||||
console.error(
|
||||
'\nFailed to authenticate with user code.',
|
||||
i === maxRetries - 1 ? '' : 'Retrying...\n',
|
||||
);
|
||||
}
|
||||
}
|
||||
if (!success) {
|
||||
throw new FatalAuthenticationError(
|
||||
'Failed to authenticate with user code.',
|
||||
);
|
||||
}
|
||||
} else {
|
||||
const webLogin = await authWithWeb(client);
|
||||
|
||||
console.log(
|
||||
`\n\nCode Assist login required.\n` +
|
||||
`Attempting to open authentication page in your browser.\n` +
|
||||
`Otherwise navigate to:\n\n${webLogin.authUrl}\n\n`,
|
||||
);
|
||||
try {
|
||||
// Attempt to open the authentication URL in the default browser.
|
||||
// We do not use the `wait` option here because the main script's execution
|
||||
// is already paused by `loginCompletePromise`, which awaits the server callback.
|
||||
const childProcess = await open(webLogin.authUrl);
|
||||
|
||||
// IMPORTANT: Attach an error handler to the returned child process.
|
||||
// Without this, if `open` fails to spawn a process (e.g., `xdg-open` is not found
|
||||
// in a minimal Docker container), it will emit an unhandled 'error' event,
|
||||
// causing the entire Node.js process to crash.
|
||||
childProcess.on('error', (error) => {
|
||||
console.error(
|
||||
'Failed to open browser automatically. Please try running again with NO_BROWSER=true set.',
|
||||
);
|
||||
console.error('Browser error details:', getErrorMessage(error));
|
||||
});
|
||||
} catch (err) {
|
||||
console.error(
|
||||
'An unexpected error occurred while trying to open the browser:',
|
||||
getErrorMessage(err),
|
||||
'\nThis might be due to browser compatibility issues or system configuration.',
|
||||
'\nPlease try running again with NO_BROWSER=true set for manual authentication.',
|
||||
);
|
||||
throw new FatalAuthenticationError(
|
||||
`Failed to open browser: ${getErrorMessage(err)}`,
|
||||
);
|
||||
}
|
||||
console.log('Waiting for authentication...');
|
||||
|
||||
// Add timeout to prevent infinite waiting when browser tab gets stuck
|
||||
const authTimeout = 5 * 60 * 1000; // 5 minutes timeout
|
||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||
setTimeout(() => {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'Authentication timed out after 5 minutes. The browser tab may have gotten stuck in a loading state. ' +
|
||||
'Please try again or use NO_BROWSER=true for manual authentication.',
|
||||
),
|
||||
);
|
||||
}, authTimeout);
|
||||
});
|
||||
|
||||
await Promise.race([webLogin.loginCompletePromise, timeoutPromise]);
|
||||
}
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
export async function getOauthClient(
|
||||
authType: AuthType,
|
||||
config: Config,
|
||||
): Promise<OAuth2Client> {
|
||||
if (!oauthClientPromises.has(authType)) {
|
||||
oauthClientPromises.set(authType, initOauthClient(authType, config));
|
||||
}
|
||||
return oauthClientPromises.get(authType)!;
|
||||
}
|
||||
|
||||
async function authWithUserCode(client: OAuth2Client): Promise<boolean> {
|
||||
const redirectUri = 'https://codeassist.google.com/authcode';
|
||||
const codeVerifier = await client.generateCodeVerifierAsync();
|
||||
const state = crypto.randomBytes(32).toString('hex');
|
||||
const authUrl: string = client.generateAuthUrl({
|
||||
redirect_uri: redirectUri,
|
||||
access_type: 'offline',
|
||||
scope: OAUTH_SCOPE,
|
||||
code_challenge_method: CodeChallengeMethod.S256,
|
||||
code_challenge: codeVerifier.codeChallenge,
|
||||
state,
|
||||
});
|
||||
console.log('Please visit the following URL to authorize the application:');
|
||||
console.log('');
|
||||
console.log(authUrl);
|
||||
console.log('');
|
||||
|
||||
const code = await new Promise<string>((resolve) => {
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
});
|
||||
rl.question('Enter the authorization code: ', (code) => {
|
||||
rl.close();
|
||||
resolve(code.trim());
|
||||
});
|
||||
});
|
||||
|
||||
if (!code) {
|
||||
console.error('Authorization code is required.');
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const { tokens } = await client.getToken({
|
||||
code,
|
||||
codeVerifier: codeVerifier.codeVerifier,
|
||||
redirect_uri: redirectUri,
|
||||
});
|
||||
client.setCredentials(tokens);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
'Failed to authenticate with authorization code:',
|
||||
getErrorMessage(error),
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
async function authWithWeb(client: OAuth2Client): Promise<OauthWebLogin> {
|
||||
const port = await getAvailablePort();
|
||||
// The hostname used for the HTTP server binding (e.g., '0.0.0.0' in Docker).
|
||||
const host = process.env['OAUTH_CALLBACK_HOST'] || 'localhost';
|
||||
// The `redirectUri` sent to Google's authorization server MUST use a loopback IP literal
|
||||
// (i.e., 'localhost' or '127.0.0.1'). This is a strict security policy for credentials of
|
||||
// type 'Desktop app' or 'Web application' (when using loopback flow) to mitigate
|
||||
// authorization code interception attacks.
|
||||
const redirectUri = `http://localhost:${port}/oauth2callback`;
|
||||
const state = crypto.randomBytes(32).toString('hex');
|
||||
const authUrl = client.generateAuthUrl({
|
||||
redirect_uri: redirectUri,
|
||||
access_type: 'offline',
|
||||
scope: OAUTH_SCOPE,
|
||||
state,
|
||||
});
|
||||
|
||||
const loginCompletePromise = new Promise<void>((resolve, reject) => {
|
||||
const server = http.createServer(async (req, res) => {
|
||||
try {
|
||||
if (req.url!.indexOf('/oauth2callback') === -1) {
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
||||
res.end();
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'OAuth callback not received. Unexpected request: ' + req.url,
|
||||
),
|
||||
);
|
||||
}
|
||||
// acquire the code from the querystring, and close the web server.
|
||||
const qs = new url.URL(req.url!, 'http://localhost:3000').searchParams;
|
||||
if (qs.get('error')) {
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
||||
res.end();
|
||||
|
||||
const errorCode = qs.get('error');
|
||||
const errorDescription =
|
||||
qs.get('error_description') || 'No additional details provided';
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`Google OAuth error: ${errorCode}. ${errorDescription}`,
|
||||
),
|
||||
);
|
||||
} else if (qs.get('state') !== state) {
|
||||
res.end('State mismatch. Possible CSRF attack');
|
||||
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'OAuth state mismatch. Possible CSRF attack or browser session issue.',
|
||||
),
|
||||
);
|
||||
} else if (qs.get('code')) {
|
||||
try {
|
||||
const { tokens } = await client.getToken({
|
||||
code: qs.get('code')!,
|
||||
redirect_uri: redirectUri,
|
||||
});
|
||||
client.setCredentials(tokens);
|
||||
|
||||
// Retrieve and cache Google Account ID during authentication
|
||||
try {
|
||||
await fetchAndCacheUserInfo(client);
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
'Failed to retrieve Google Account ID during authentication:',
|
||||
getErrorMessage(error),
|
||||
);
|
||||
// Don't fail the auth flow if Google Account ID retrieval fails
|
||||
}
|
||||
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_SUCCESS_URL });
|
||||
res.end();
|
||||
resolve();
|
||||
} catch (error) {
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
||||
res.end();
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`Failed to exchange authorization code for tokens: ${getErrorMessage(error)}`,
|
||||
),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'No authorization code received from Google OAuth. Please try authenticating again.',
|
||||
),
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
// Provide more specific error message for unexpected errors during OAuth flow
|
||||
if (e instanceof FatalAuthenticationError) {
|
||||
reject(e);
|
||||
} else {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`Unexpected error during OAuth authentication: ${getErrorMessage(e)}`,
|
||||
),
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
server.listen(port, host, () => {
|
||||
// Server started successfully
|
||||
});
|
||||
|
||||
server.on('error', (err) => {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`OAuth callback server error: ${getErrorMessage(err)}`,
|
||||
),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
return {
|
||||
authUrl,
|
||||
loginCompletePromise,
|
||||
};
|
||||
}
|
||||
|
||||
export function getAvailablePort(): Promise<number> {
|
||||
return new Promise((resolve, reject) => {
|
||||
let port = 0;
|
||||
try {
|
||||
const portStr = process.env['OAUTH_CALLBACK_PORT'];
|
||||
if (portStr) {
|
||||
port = parseInt(portStr, 10);
|
||||
if (isNaN(port) || port <= 0 || port > 65535) {
|
||||
return reject(
|
||||
new Error(`Invalid value for OAUTH_CALLBACK_PORT: "${portStr}"`),
|
||||
);
|
||||
}
|
||||
return resolve(port);
|
||||
}
|
||||
const server = net.createServer();
|
||||
server.listen(0, () => {
|
||||
const address = server.address()! as net.AddressInfo;
|
||||
port = address.port;
|
||||
});
|
||||
server.on('listening', () => {
|
||||
server.close();
|
||||
server.unref();
|
||||
});
|
||||
server.on('error', (e) => reject(e));
|
||||
server.on('close', () => resolve(port));
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function loadCachedCredentials(client: OAuth2Client): Promise<boolean> {
|
||||
const useEncryptedStorage = getUseEncryptedStorageFlag();
|
||||
if (useEncryptedStorage) {
|
||||
const credentials = await OAuthCredentialStorage.loadCredentials();
|
||||
if (credentials) {
|
||||
client.setCredentials(credentials);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const pathsToTry = [
|
||||
Storage.getOAuthCredsPath(),
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'],
|
||||
].filter((p): p is string => !!p);
|
||||
|
||||
for (const keyFile of pathsToTry) {
|
||||
try {
|
||||
const creds = await fs.readFile(keyFile, 'utf-8');
|
||||
client.setCredentials(JSON.parse(creds));
|
||||
|
||||
// This will verify locally that the credentials look good.
|
||||
const { token } = await client.getAccessToken();
|
||||
if (!token) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// This will check with the server to see if it hasn't been revoked.
|
||||
await client.getTokenInfo(token);
|
||||
|
||||
return true;
|
||||
} catch (error) {
|
||||
// Log specific error for debugging, but continue trying other paths
|
||||
console.debug(
|
||||
`Failed to load credentials from ${keyFile}:`,
|
||||
getErrorMessage(error),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async function cacheCredentials(credentials: Credentials) {
|
||||
const filePath = Storage.getOAuthCredsPath();
|
||||
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
||||
|
||||
const credString = JSON.stringify(credentials, null, 2);
|
||||
await fs.writeFile(filePath, credString, { mode: 0o600 });
|
||||
try {
|
||||
await fs.chmod(filePath, 0o600);
|
||||
} catch {
|
||||
/* empty */
|
||||
}
|
||||
}
|
||||
|
||||
export function clearOauthClientCache() {
|
||||
oauthClientPromises.clear();
|
||||
}
|
||||
|
||||
export async function clearCachedCredentialFile() {
|
||||
try {
|
||||
const useEncryptedStorage = getUseEncryptedStorageFlag();
|
||||
if (useEncryptedStorage) {
|
||||
await OAuthCredentialStorage.clearCredentials();
|
||||
} else {
|
||||
await fs.rm(Storage.getOAuthCredsPath(), { force: true });
|
||||
}
|
||||
// Clear the Google Account ID cache when credentials are cleared
|
||||
await userAccountManager.clearCachedGoogleAccount();
|
||||
// Clear the in-memory OAuth client cache to force re-authentication
|
||||
clearOauthClientCache();
|
||||
|
||||
/**
|
||||
* Also clear Qwen SharedTokenManager cache and credentials file to prevent stale credentials
|
||||
* when switching between auth types
|
||||
* TODO: We do not depend on code_assist, we'll have to build an independent auth-cleaning procedure.
|
||||
*/
|
||||
try {
|
||||
const { SharedTokenManager } = await import(
|
||||
'../qwen/sharedTokenManager.js'
|
||||
);
|
||||
const { clearQwenCredentials } = await import('../qwen/qwenOAuth2.js');
|
||||
|
||||
const sharedManager = SharedTokenManager.getInstance();
|
||||
sharedManager.clearCache();
|
||||
|
||||
await clearQwenCredentials();
|
||||
} catch (qwenError) {
|
||||
console.debug('Could not clear Qwen credentials:', qwenError);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to clear cached credentials:', e);
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchAndCacheUserInfo(client: OAuth2Client): Promise<void> {
|
||||
try {
|
||||
const { token } = await client.getAccessToken();
|
||||
if (!token) {
|
||||
return;
|
||||
}
|
||||
|
||||
const response = await fetch(
|
||||
'https://www.googleapis.com/oauth2/v2/userinfo',
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
console.error(
|
||||
'Failed to fetch user info:',
|
||||
response.status,
|
||||
response.statusText,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const userInfo = await response.json();
|
||||
await userAccountManager.cacheGoogleAccount(userInfo.email);
|
||||
} catch (error) {
|
||||
console.error('Error retrieving user info:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to ensure test isolation
|
||||
export function resetOauthClientForTesting() {
|
||||
oauthClientPromises.clear();
|
||||
}
|
||||
@@ -1,255 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, it, expect, vi } from 'vitest';
|
||||
import { CodeAssistServer } from './server.js';
|
||||
import { OAuth2Client } from 'google-auth-library';
|
||||
import { UserTierId } from './types.js';
|
||||
|
||||
vi.mock('google-auth-library');
|
||||
|
||||
describe('CodeAssistServer', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
it('should be able to be constructed', () => {
|
||||
const auth = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
auth,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
expect(server).toBeInstanceOf(CodeAssistServer);
|
||||
});
|
||||
|
||||
it('should call the generateContent endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'response' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
safetyRatings: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.generateContent(
|
||||
{
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
},
|
||||
'user-prompt-id',
|
||||
);
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'generateContent',
|
||||
expect.any(Object),
|
||||
undefined,
|
||||
);
|
||||
expect(response.candidates?.[0]?.content?.parts?.[0]?.text).toBe(
|
||||
'response',
|
||||
);
|
||||
});
|
||||
|
||||
it('should call the generateContentStream endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = (async function* () {
|
||||
yield {
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'response' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
safetyRatings: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
})();
|
||||
vi.spyOn(server, 'requestStreamingPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const stream = await server.generateContentStream(
|
||||
{
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
},
|
||||
'user-prompt-id',
|
||||
);
|
||||
|
||||
for await (const res of stream) {
|
||||
expect(server.requestStreamingPost).toHaveBeenCalledWith(
|
||||
'streamGenerateContent',
|
||||
expect.any(Object),
|
||||
undefined,
|
||||
);
|
||||
expect(res.candidates?.[0]?.content?.parts?.[0]?.text).toBe('response');
|
||||
}
|
||||
});
|
||||
|
||||
it('should call the onboardUser endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
name: 'operations/123',
|
||||
done: true,
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.onboardUser({
|
||||
tierId: 'test-tier',
|
||||
cloudaicompanionProject: 'test-project',
|
||||
metadata: {},
|
||||
});
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'onboardUser',
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(response.name).toBe('operations/123');
|
||||
});
|
||||
|
||||
it('should call the loadCodeAssist endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
currentTier: {
|
||||
id: UserTierId.FREE,
|
||||
name: 'Free',
|
||||
description: 'free tier',
|
||||
},
|
||||
allowedTiers: [],
|
||||
ineligibleTiers: [],
|
||||
cloudaicompanionProject: 'projects/test',
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.loadCodeAssist({
|
||||
metadata: {},
|
||||
});
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'loadCodeAssist',
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(response).toEqual(mockResponse);
|
||||
});
|
||||
|
||||
it('should return 0 for countTokens', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
totalTokens: 100,
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.countTokens({
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
});
|
||||
expect(response.totalTokens).toBe(100);
|
||||
});
|
||||
|
||||
it('should throw an error for embedContent', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
await expect(
|
||||
server.embedContent({
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
}),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle VPC-SC errors when calling loadCodeAssist', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockVpcScError = {
|
||||
response: {
|
||||
data: {
|
||||
error: {
|
||||
details: [
|
||||
{
|
||||
reason: 'SECURITY_POLICY_VIOLATED',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockRejectedValue(mockVpcScError);
|
||||
|
||||
const response = await server.loadCodeAssist({
|
||||
metadata: {},
|
||||
});
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'loadCodeAssist',
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(response).toEqual({
|
||||
currentTier: { id: UserTierId.STANDARD },
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,253 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { OAuth2Client } from 'google-auth-library';
|
||||
import type {
|
||||
CodeAssistGlobalUserSettingResponse,
|
||||
GoogleRpcResponse,
|
||||
LoadCodeAssistRequest,
|
||||
LoadCodeAssistResponse,
|
||||
LongRunningOperationResponse,
|
||||
OnboardUserRequest,
|
||||
SetCodeAssistGlobalUserSettingRequest,
|
||||
} from './types.js';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
} from '@google/genai';
|
||||
import * as readline from 'node:readline';
|
||||
import type { ContentGenerator } from '../core/contentGenerator.js';
|
||||
import { UserTierId } from './types.js';
|
||||
import type {
|
||||
CaCountTokenResponse,
|
||||
CaGenerateContentResponse,
|
||||
} from './converter.js';
|
||||
import {
|
||||
fromCountTokenResponse,
|
||||
fromGenerateContentResponse,
|
||||
toCountTokenRequest,
|
||||
toGenerateContentRequest,
|
||||
} from './converter.js';
|
||||
|
||||
/** HTTP options to be used in each of the requests. */
|
||||
export interface HttpOptions {
|
||||
/** Additional HTTP headers to be sent with the request. */
|
||||
headers?: Record<string, string>;
|
||||
}
|
||||
|
||||
export const CODE_ASSIST_ENDPOINT = 'https://localhost:0'; // Disable Google Code Assist API Request
|
||||
export const CODE_ASSIST_API_VERSION = 'v1internal';
|
||||
|
||||
export class CodeAssistServer implements ContentGenerator {
|
||||
constructor(
|
||||
readonly client: OAuth2Client,
|
||||
readonly projectId?: string,
|
||||
readonly httpOptions: HttpOptions = {},
|
||||
readonly sessionId?: string,
|
||||
readonly userTier?: UserTierId,
|
||||
) {}
|
||||
|
||||
async generateContentStream(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const resps = await this.requestStreamingPost<CaGenerateContentResponse>(
|
||||
'streamGenerateContent',
|
||||
toGenerateContentRequest(
|
||||
req,
|
||||
userPromptId,
|
||||
this.projectId,
|
||||
this.sessionId,
|
||||
),
|
||||
req.config?.abortSignal,
|
||||
);
|
||||
return (async function* (): AsyncGenerator<GenerateContentResponse> {
|
||||
for await (const resp of resps) {
|
||||
yield fromGenerateContentResponse(resp);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const resp = await this.requestPost<CaGenerateContentResponse>(
|
||||
'generateContent',
|
||||
toGenerateContentRequest(
|
||||
req,
|
||||
userPromptId,
|
||||
this.projectId,
|
||||
this.sessionId,
|
||||
),
|
||||
req.config?.abortSignal,
|
||||
);
|
||||
return fromGenerateContentResponse(resp);
|
||||
}
|
||||
|
||||
async onboardUser(
|
||||
req: OnboardUserRequest,
|
||||
): Promise<LongRunningOperationResponse> {
|
||||
return await this.requestPost<LongRunningOperationResponse>(
|
||||
'onboardUser',
|
||||
req,
|
||||
);
|
||||
}
|
||||
|
||||
async loadCodeAssist(
|
||||
req: LoadCodeAssistRequest,
|
||||
): Promise<LoadCodeAssistResponse> {
|
||||
try {
|
||||
return await this.requestPost<LoadCodeAssistResponse>(
|
||||
'loadCodeAssist',
|
||||
req,
|
||||
);
|
||||
} catch (e) {
|
||||
if (isVpcScAffectedUser(e)) {
|
||||
return {
|
||||
currentTier: { id: UserTierId.STANDARD },
|
||||
};
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async getCodeAssistGlobalUserSetting(): Promise<CodeAssistGlobalUserSettingResponse> {
|
||||
return await this.requestGet<CodeAssistGlobalUserSettingResponse>(
|
||||
'getCodeAssistGlobalUserSetting',
|
||||
);
|
||||
}
|
||||
|
||||
async setCodeAssistGlobalUserSetting(
|
||||
req: SetCodeAssistGlobalUserSettingRequest,
|
||||
): Promise<CodeAssistGlobalUserSettingResponse> {
|
||||
return await this.requestPost<CodeAssistGlobalUserSettingResponse>(
|
||||
'setCodeAssistGlobalUserSetting',
|
||||
req,
|
||||
);
|
||||
}
|
||||
|
||||
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
|
||||
const resp = await this.requestPost<CaCountTokenResponse>(
|
||||
'countTokens',
|
||||
toCountTokenRequest(req),
|
||||
);
|
||||
return fromCountTokenResponse(resp);
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
_req: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
throw Error();
|
||||
}
|
||||
|
||||
async requestPost<T>(
|
||||
method: string,
|
||||
req: object,
|
||||
signal?: AbortSignal,
|
||||
): Promise<T> {
|
||||
const res = await this.client.request({
|
||||
url: this.getMethodUrl(method),
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...this.httpOptions.headers,
|
||||
},
|
||||
responseType: 'json',
|
||||
body: JSON.stringify(req),
|
||||
signal,
|
||||
});
|
||||
return res.data as T;
|
||||
}
|
||||
|
||||
async requestGet<T>(method: string, signal?: AbortSignal): Promise<T> {
|
||||
const res = await this.client.request({
|
||||
url: this.getMethodUrl(method),
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...this.httpOptions.headers,
|
||||
},
|
||||
responseType: 'json',
|
||||
signal,
|
||||
});
|
||||
return res.data as T;
|
||||
}
|
||||
|
||||
async requestStreamingPost<T>(
|
||||
method: string,
|
||||
req: object,
|
||||
signal?: AbortSignal,
|
||||
): Promise<AsyncGenerator<T>> {
|
||||
const res = await this.client.request({
|
||||
url: this.getMethodUrl(method),
|
||||
method: 'POST',
|
||||
params: {
|
||||
alt: 'sse',
|
||||
},
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...this.httpOptions.headers,
|
||||
},
|
||||
responseType: 'stream',
|
||||
body: JSON.stringify(req),
|
||||
signal,
|
||||
});
|
||||
|
||||
return (async function* (): AsyncGenerator<T> {
|
||||
const rl = readline.createInterface({
|
||||
input: res.data as NodeJS.ReadableStream,
|
||||
crlfDelay: Infinity, // Recognizes '\r\n' and '\n' as line breaks
|
||||
});
|
||||
|
||||
let bufferedLines: string[] = [];
|
||||
for await (const line of rl) {
|
||||
// blank lines are used to separate JSON objects in the stream
|
||||
if (line === '') {
|
||||
if (bufferedLines.length === 0) {
|
||||
continue; // no data to yield
|
||||
}
|
||||
yield JSON.parse(bufferedLines.join('\n')) as T;
|
||||
bufferedLines = []; // Reset the buffer after yielding
|
||||
} else if (line.startsWith('data: ')) {
|
||||
bufferedLines.push(line.slice(6).trim());
|
||||
} else {
|
||||
throw new Error(`Unexpected line format in response: ${line}`);
|
||||
}
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
getMethodUrl(method: string): string {
|
||||
const endpoint =
|
||||
process.env['CODE_ASSIST_ENDPOINT'] ?? CODE_ASSIST_ENDPOINT;
|
||||
return `${endpoint}/${CODE_ASSIST_API_VERSION}:${method}`;
|
||||
}
|
||||
}
|
||||
|
||||
function isVpcScAffectedUser(error: unknown): boolean {
|
||||
if (error && typeof error === 'object' && 'response' in error) {
|
||||
const gaxiosError = error as {
|
||||
response?: {
|
||||
data?: unknown;
|
||||
};
|
||||
};
|
||||
const response = gaxiosError.response?.data as
|
||||
| GoogleRpcResponse
|
||||
| undefined;
|
||||
if (Array.isArray(response?.error?.details)) {
|
||||
return response.error.details.some(
|
||||
(detail) => detail.reason === 'SECURITY_POLICY_VIOLATED',
|
||||
);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { setupUser, ProjectIdRequiredError } from './setup.js';
|
||||
import { CodeAssistServer } from '../code_assist/server.js';
|
||||
import type { OAuth2Client } from 'google-auth-library';
|
||||
import type { GeminiUserTier } from './types.js';
|
||||
import { UserTierId } from './types.js';
|
||||
|
||||
vi.mock('../code_assist/server.js');
|
||||
|
||||
const mockPaidTier: GeminiUserTier = {
|
||||
id: UserTierId.STANDARD,
|
||||
name: 'paid',
|
||||
description: 'Paid tier',
|
||||
isDefault: true,
|
||||
};
|
||||
|
||||
const mockFreeTier: GeminiUserTier = {
|
||||
id: UserTierId.FREE,
|
||||
name: 'free',
|
||||
description: 'Free tier',
|
||||
isDefault: true,
|
||||
};
|
||||
|
||||
describe('setupUser for existing user', () => {
|
||||
let mockLoad: ReturnType<typeof vi.fn>;
|
||||
let mockOnboardUser: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
mockLoad = vi.fn();
|
||||
mockOnboardUser = vi.fn().mockResolvedValue({
|
||||
done: true,
|
||||
response: {
|
||||
cloudaicompanionProject: {
|
||||
id: 'server-project',
|
||||
},
|
||||
},
|
||||
});
|
||||
vi.mocked(CodeAssistServer).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
loadCodeAssist: mockLoad,
|
||||
onboardUser: mockOnboardUser,
|
||||
}) as unknown as CodeAssistServer,
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should use GOOGLE_CLOUD_PROJECT when set and project from server is undefined', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
currentTier: mockPaidTier,
|
||||
});
|
||||
await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
'test-project',
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
});
|
||||
|
||||
it('should ignore GOOGLE_CLOUD_PROJECT when project from server is set', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
cloudaicompanionProject: 'server-project',
|
||||
currentTier: mockPaidTier,
|
||||
});
|
||||
const projectId = await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
'test-project',
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
expect(projectId).toEqual({
|
||||
projectId: 'server-project',
|
||||
userTier: 'standard-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
// And the server itself requires a project ID internally
|
||||
vi.mocked(CodeAssistServer).mockImplementation(() => {
|
||||
throw new ProjectIdRequiredError();
|
||||
});
|
||||
|
||||
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
|
||||
ProjectIdRequiredError,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('setupUser for new user', () => {
|
||||
let mockLoad: ReturnType<typeof vi.fn>;
|
||||
let mockOnboardUser: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
mockLoad = vi.fn();
|
||||
mockOnboardUser = vi.fn().mockResolvedValue({
|
||||
done: true,
|
||||
response: {
|
||||
cloudaicompanionProject: {
|
||||
id: 'server-project',
|
||||
},
|
||||
},
|
||||
});
|
||||
vi.mocked(CodeAssistServer).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
loadCodeAssist: mockLoad,
|
||||
onboardUser: mockOnboardUser,
|
||||
}) as unknown as CodeAssistServer,
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should use GOOGLE_CLOUD_PROJECT when set and onboard a new paid user', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockPaidTier],
|
||||
});
|
||||
const userData = await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
'test-project',
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
expect(mockLoad).toHaveBeenCalled();
|
||||
expect(mockOnboardUser).toHaveBeenCalledWith({
|
||||
tierId: 'standard-tier',
|
||||
cloudaicompanionProject: 'test-project',
|
||||
metadata: {
|
||||
ideType: 'IDE_UNSPECIFIED',
|
||||
platform: 'PLATFORM_UNSPECIFIED',
|
||||
pluginType: 'GEMINI',
|
||||
duetProject: 'test-project',
|
||||
},
|
||||
});
|
||||
expect(userData).toEqual({
|
||||
projectId: 'server-project',
|
||||
userTier: 'standard-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should onboard a new free user when GOOGLE_CLOUD_PROJECT is not set', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockFreeTier],
|
||||
});
|
||||
const userData = await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
undefined,
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
expect(mockLoad).toHaveBeenCalled();
|
||||
expect(mockOnboardUser).toHaveBeenCalledWith({
|
||||
tierId: 'free-tier',
|
||||
cloudaicompanionProject: undefined,
|
||||
metadata: {
|
||||
ideType: 'IDE_UNSPECIFIED',
|
||||
platform: 'PLATFORM_UNSPECIFIED',
|
||||
pluginType: 'GEMINI',
|
||||
},
|
||||
});
|
||||
expect(userData).toEqual({
|
||||
projectId: 'server-project',
|
||||
userTier: 'free-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should use GOOGLE_CLOUD_PROJECT when onboard response has no project ID', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockPaidTier],
|
||||
});
|
||||
mockOnboardUser.mockResolvedValue({
|
||||
done: true,
|
||||
response: {
|
||||
cloudaicompanionProject: undefined,
|
||||
},
|
||||
});
|
||||
const userData = await setupUser({} as OAuth2Client);
|
||||
expect(userData).toEqual({
|
||||
projectId: 'test-project',
|
||||
userTier: 'standard-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockPaidTier],
|
||||
});
|
||||
mockOnboardUser.mockResolvedValue({
|
||||
done: true,
|
||||
response: {},
|
||||
});
|
||||
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
|
||||
ProjectIdRequiredError,
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -1,124 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
ClientMetadata,
|
||||
GeminiUserTier,
|
||||
LoadCodeAssistResponse,
|
||||
OnboardUserRequest,
|
||||
} from './types.js';
|
||||
import { UserTierId } from './types.js';
|
||||
import { CodeAssistServer } from './server.js';
|
||||
import type { OAuth2Client } from 'google-auth-library';
|
||||
|
||||
export class ProjectIdRequiredError extends Error {
|
||||
constructor() {
|
||||
super(
|
||||
'This account requires setting the GOOGLE_CLOUD_PROJECT env var. See https://goo.gle/gemini-cli-auth-docs#workspace-gca',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export interface UserData {
|
||||
projectId: string;
|
||||
userTier: UserTierId;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param projectId the user's project id, if any
|
||||
* @returns the user's actual project id
|
||||
*/
|
||||
export async function setupUser(client: OAuth2Client): Promise<UserData> {
|
||||
const projectId = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
|
||||
const caServer = new CodeAssistServer(client, projectId, {}, '', undefined);
|
||||
const coreClientMetadata: ClientMetadata = {
|
||||
ideType: 'IDE_UNSPECIFIED',
|
||||
platform: 'PLATFORM_UNSPECIFIED',
|
||||
pluginType: 'GEMINI',
|
||||
};
|
||||
|
||||
const loadRes = await caServer.loadCodeAssist({
|
||||
cloudaicompanionProject: projectId,
|
||||
metadata: {
|
||||
...coreClientMetadata,
|
||||
duetProject: projectId,
|
||||
},
|
||||
});
|
||||
|
||||
if (loadRes.currentTier) {
|
||||
if (!loadRes.cloudaicompanionProject) {
|
||||
if (projectId) {
|
||||
return {
|
||||
projectId,
|
||||
userTier: loadRes.currentTier.id,
|
||||
};
|
||||
}
|
||||
throw new ProjectIdRequiredError();
|
||||
}
|
||||
return {
|
||||
projectId: loadRes.cloudaicompanionProject,
|
||||
userTier: loadRes.currentTier.id,
|
||||
};
|
||||
}
|
||||
|
||||
const tier = getOnboardTier(loadRes);
|
||||
|
||||
let onboardReq: OnboardUserRequest;
|
||||
if (tier.id === UserTierId.FREE) {
|
||||
// The free tier uses a managed google cloud project. Setting a project in the `onboardUser` request causes a `Precondition Failed` error.
|
||||
onboardReq = {
|
||||
tierId: tier.id,
|
||||
cloudaicompanionProject: undefined,
|
||||
metadata: coreClientMetadata,
|
||||
};
|
||||
} else {
|
||||
onboardReq = {
|
||||
tierId: tier.id,
|
||||
cloudaicompanionProject: projectId,
|
||||
metadata: {
|
||||
...coreClientMetadata,
|
||||
duetProject: projectId,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Poll onboardUser until long running operation is complete.
|
||||
let lroRes = await caServer.onboardUser(onboardReq);
|
||||
while (!lroRes.done) {
|
||||
await new Promise((f) => setTimeout(f, 5000));
|
||||
lroRes = await caServer.onboardUser(onboardReq);
|
||||
}
|
||||
|
||||
if (!lroRes.response?.cloudaicompanionProject?.id) {
|
||||
if (projectId) {
|
||||
return {
|
||||
projectId,
|
||||
userTier: tier.id,
|
||||
};
|
||||
}
|
||||
throw new ProjectIdRequiredError();
|
||||
}
|
||||
|
||||
return {
|
||||
projectId: lroRes.response.cloudaicompanionProject.id,
|
||||
userTier: tier.id,
|
||||
};
|
||||
}
|
||||
|
||||
function getOnboardTier(res: LoadCodeAssistResponse): GeminiUserTier {
|
||||
for (const tier of res.allowedTiers || []) {
|
||||
if (tier.isDefault) {
|
||||
return tier;
|
||||
}
|
||||
}
|
||||
return {
|
||||
name: '',
|
||||
description: '',
|
||||
id: UserTierId.LEGACY,
|
||||
userDefinedCloudaicompanionProject: true,
|
||||
};
|
||||
}
|
||||
@@ -1,201 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export interface ClientMetadata {
|
||||
ideType?: ClientMetadataIdeType;
|
||||
ideVersion?: string;
|
||||
pluginVersion?: string;
|
||||
platform?: ClientMetadataPlatform;
|
||||
updateChannel?: string;
|
||||
duetProject?: string;
|
||||
pluginType?: ClientMetadataPluginType;
|
||||
ideName?: string;
|
||||
}
|
||||
|
||||
export type ClientMetadataIdeType =
|
||||
| 'IDE_UNSPECIFIED'
|
||||
| 'VSCODE'
|
||||
| 'INTELLIJ'
|
||||
| 'VSCODE_CLOUD_WORKSTATION'
|
||||
| 'INTELLIJ_CLOUD_WORKSTATION'
|
||||
| 'CLOUD_SHELL';
|
||||
export type ClientMetadataPlatform =
|
||||
| 'PLATFORM_UNSPECIFIED'
|
||||
| 'DARWIN_AMD64'
|
||||
| 'DARWIN_ARM64'
|
||||
| 'LINUX_AMD64'
|
||||
| 'LINUX_ARM64'
|
||||
| 'WINDOWS_AMD64';
|
||||
export type ClientMetadataPluginType =
|
||||
| 'PLUGIN_UNSPECIFIED'
|
||||
| 'CLOUD_CODE'
|
||||
| 'GEMINI'
|
||||
| 'AIPLUGIN_INTELLIJ'
|
||||
| 'AIPLUGIN_STUDIO';
|
||||
|
||||
export interface LoadCodeAssistRequest {
|
||||
cloudaicompanionProject?: string;
|
||||
metadata: ClientMetadata;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents LoadCodeAssistResponse proto json field
|
||||
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=224
|
||||
*/
|
||||
export interface LoadCodeAssistResponse {
|
||||
currentTier?: GeminiUserTier | null;
|
||||
allowedTiers?: GeminiUserTier[] | null;
|
||||
ineligibleTiers?: IneligibleTier[] | null;
|
||||
cloudaicompanionProject?: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* GeminiUserTier reflects the structure received from the CodeAssist when calling LoadCodeAssist.
|
||||
*/
|
||||
export interface GeminiUserTier {
|
||||
id: UserTierId;
|
||||
name?: string;
|
||||
description?: string;
|
||||
// This value is used to declare whether a given tier requires the user to configure the project setting on the IDE settings or not.
|
||||
userDefinedCloudaicompanionProject?: boolean | null;
|
||||
isDefault?: boolean;
|
||||
privacyNotice?: PrivacyNotice;
|
||||
hasAcceptedTos?: boolean;
|
||||
hasOnboardedPreviously?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Includes information specifying the reasons for a user's ineligibility for a specific tier.
|
||||
* @param reasonCode mnemonic code representing the reason for in-eligibility.
|
||||
* @param reasonMessage message to display to the user.
|
||||
* @param tierId id of the tier.
|
||||
* @param tierName name of the tier.
|
||||
*/
|
||||
export interface IneligibleTier {
|
||||
reasonCode: IneligibleTierReasonCode;
|
||||
reasonMessage: string;
|
||||
tierId: UserTierId;
|
||||
tierName: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* List of predefined reason codes when a tier is blocked from a specific tier.
|
||||
* https://source.corp.google.com/piper///depot/google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=378
|
||||
*/
|
||||
export enum IneligibleTierReasonCode {
|
||||
// go/keep-sorted start
|
||||
DASHER_USER = 'DASHER_USER',
|
||||
INELIGIBLE_ACCOUNT = 'INELIGIBLE_ACCOUNT',
|
||||
NON_USER_ACCOUNT = 'NON_USER_ACCOUNT',
|
||||
RESTRICTED_AGE = 'RESTRICTED_AGE',
|
||||
RESTRICTED_NETWORK = 'RESTRICTED_NETWORK',
|
||||
UNKNOWN = 'UNKNOWN',
|
||||
UNKNOWN_LOCATION = 'UNKNOWN_LOCATION',
|
||||
UNSUPPORTED_LOCATION = 'UNSUPPORTED_LOCATION',
|
||||
// go/keep-sorted end
|
||||
}
|
||||
/**
|
||||
* UserTierId represents IDs returned from the Cloud Code Private API representing a user's tier
|
||||
*
|
||||
* //depot/google3/cloud/developer_experience/cloudcode/pa/service/usertier.go;l=16
|
||||
*/
|
||||
export enum UserTierId {
|
||||
FREE = 'free-tier',
|
||||
LEGACY = 'legacy-tier',
|
||||
STANDARD = 'standard-tier',
|
||||
}
|
||||
|
||||
/**
|
||||
* PrivacyNotice reflects the structure received from the CodeAssist in regards to a tier
|
||||
* privacy notice.
|
||||
*/
|
||||
export interface PrivacyNotice {
|
||||
showNotice: boolean;
|
||||
noticeText?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Proto signature of OnboardUserRequest as payload to OnboardUser call
|
||||
*/
|
||||
export interface OnboardUserRequest {
|
||||
tierId: string | undefined;
|
||||
cloudaicompanionProject: string | undefined;
|
||||
metadata: ClientMetadata | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents LongRunningOperation proto
|
||||
* http://google3/google/longrunning/operations.proto;rcl=698857719;l=107
|
||||
*/
|
||||
export interface LongRunningOperationResponse {
|
||||
name: string;
|
||||
done?: boolean;
|
||||
response?: OnboardUserResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents OnboardUserResponse proto
|
||||
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=215
|
||||
*/
|
||||
export interface OnboardUserResponse {
|
||||
// tslint:disable-next-line:enforce-name-casing This is the name of the field in the proto.
|
||||
cloudaicompanionProject?: {
|
||||
id: string;
|
||||
name: string;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Status code of user license status
|
||||
* it does not strictly correspond to the proto
|
||||
* Error value is an additional value assigned to error responses from OnboardUser
|
||||
*/
|
||||
export enum OnboardUserStatusCode {
|
||||
Default = 'DEFAULT',
|
||||
Notice = 'NOTICE',
|
||||
Warning = 'WARNING',
|
||||
Error = 'ERROR',
|
||||
}
|
||||
|
||||
/**
|
||||
* Status of user onboarded to gemini
|
||||
*/
|
||||
export interface OnboardUserStatus {
|
||||
statusCode: OnboardUserStatusCode;
|
||||
displayMessage: string;
|
||||
helpLink: HelpLinkUrl | undefined;
|
||||
}
|
||||
|
||||
export interface HelpLinkUrl {
|
||||
description: string;
|
||||
url: string;
|
||||
}
|
||||
|
||||
export interface SetCodeAssistGlobalUserSettingRequest {
|
||||
cloudaicompanionProject?: string;
|
||||
freeTierDataCollectionOptin: boolean;
|
||||
}
|
||||
|
||||
export interface CodeAssistGlobalUserSettingResponse {
|
||||
cloudaicompanionProject?: string;
|
||||
freeTierDataCollectionOptin: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Relevant fields that can be returned from a Google RPC response
|
||||
*/
|
||||
export interface GoogleRpcResponse {
|
||||
error?: {
|
||||
details?: GoogleRpcErrorInfo[];
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Relevant fields that can be returned in the details of an error returned from GoogleRPCs
|
||||
*/
|
||||
interface GoogleRpcErrorInfo {
|
||||
reason?: string;
|
||||
}
|
||||
@@ -283,23 +283,6 @@ describe('Server Config (config.ts)', () => {
|
||||
expect(config.isInFallbackMode()).toBe(false);
|
||||
});
|
||||
|
||||
it('should strip thoughts when switching from GenAI to Vertex', async () => {
|
||||
const config = new Config(baseParams);
|
||||
|
||||
vi.mocked(createContentGeneratorConfig).mockImplementation(
|
||||
(_: Config, authType: AuthType | undefined) =>
|
||||
({ authType }) as unknown as ContentGeneratorConfig,
|
||||
);
|
||||
|
||||
await config.refreshAuth(AuthType.USE_GEMINI);
|
||||
|
||||
await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE);
|
||||
|
||||
expect(
|
||||
config.getGeminiClient().stripThoughtsFromHistory,
|
||||
).toHaveBeenCalledWith();
|
||||
});
|
||||
|
||||
it('should not strip thoughts when switching from Vertex to GenAI', async () => {
|
||||
const config = new Config(baseParams);
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import { ProxyAgent, setGlobalDispatcher } from 'undici';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
AuthType,
|
||||
} from '../core/contentGenerator.js';
|
||||
import type { FallbackModelHandler } from '../fallback/types.js';
|
||||
import type { MCPOAuthConfig } from '../mcp/oauth-provider.js';
|
||||
@@ -26,7 +27,6 @@ import type { AnyToolInvocation } from '../tools/tools.js';
|
||||
import { BaseLlmClient } from '../core/baseLlmClient.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGenerator,
|
||||
createContentGeneratorConfig,
|
||||
} from '../core/contentGenerator.js';
|
||||
@@ -684,16 +684,6 @@ export class Config {
|
||||
}
|
||||
|
||||
async refreshAuth(authMethod: AuthType, isInitialAuth?: boolean) {
|
||||
// Vertex and Genai have incompatible encryption and sending history with
|
||||
// throughtSignature from Genai to Vertex will fail, we need to strip them
|
||||
if (
|
||||
this.contentGeneratorConfig?.authType === AuthType.USE_GEMINI &&
|
||||
authMethod === AuthType.LOGIN_WITH_GOOGLE
|
||||
) {
|
||||
// Restore the conversation history to the new client
|
||||
this.geminiClient.stripThoughtsFromHistory();
|
||||
}
|
||||
|
||||
const newContentGeneratorConfig = createContentGeneratorConfig(
|
||||
this,
|
||||
authMethod,
|
||||
|
||||
@@ -31,7 +31,7 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
config as unknown as { contentGeneratorConfig: unknown }
|
||||
).contentGeneratorConfig = {
|
||||
model: DEFAULT_GEMINI_MODEL,
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
};
|
||||
});
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ export const OAUTH_FILE = 'oauth_creds.json';
|
||||
const TMP_DIR_NAME = 'tmp';
|
||||
const BIN_DIR_NAME = 'bin';
|
||||
const PROJECT_DIR_NAME = 'projects';
|
||||
const IDE_DIR_NAME = 'ide';
|
||||
|
||||
export class Storage {
|
||||
private readonly targetDir: string;
|
||||
@@ -59,6 +60,10 @@ export class Storage {
|
||||
return path.join(Storage.getGlobalQwenDir(), TMP_DIR_NAME);
|
||||
}
|
||||
|
||||
static getGlobalIdeDir(): string {
|
||||
return path.join(Storage.getGlobalQwenDir(), IDE_DIR_NAME);
|
||||
}
|
||||
|
||||
static getGlobalBinDir(): string {
|
||||
return path.join(Storage.getGlobalQwenDir(), BIN_DIR_NAME);
|
||||
}
|
||||
|
||||
@@ -73,6 +73,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
}),
|
||||
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
// Create generator instance
|
||||
@@ -299,6 +300,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
}),
|
||||
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
new OpenAIContentGenerator(
|
||||
@@ -333,6 +335,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
}),
|
||||
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
new OpenAIContentGenerator(
|
||||
|
||||
@@ -146,13 +146,11 @@ describe('BaseLlmClient', () => {
|
||||
// Validate the parameters passed to the underlying generator
|
||||
expect(mockGenerateContent).toHaveBeenCalledTimes(1);
|
||||
expect(mockGenerateContent).toHaveBeenCalledWith(
|
||||
{
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
contents: defaultOptions.contents,
|
||||
config: {
|
||||
config: expect.objectContaining({
|
||||
abortSignal: defaultOptions.abortSignal,
|
||||
temperature: 0,
|
||||
topP: 1,
|
||||
tools: [
|
||||
{
|
||||
functionDeclarations: [
|
||||
@@ -164,9 +162,8 @@ describe('BaseLlmClient', () => {
|
||||
],
|
||||
},
|
||||
],
|
||||
// Crucial: systemInstruction should NOT be in the config object if not provided
|
||||
},
|
||||
},
|
||||
}),
|
||||
}),
|
||||
'test-prompt-id',
|
||||
);
|
||||
});
|
||||
@@ -189,7 +186,6 @@ describe('BaseLlmClient', () => {
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
temperature: 0.8,
|
||||
topP: 1, // Default should remain if not overridden
|
||||
topK: 10,
|
||||
tools: expect.any(Array),
|
||||
}),
|
||||
|
||||
@@ -64,12 +64,6 @@ export interface GenerateJsonOptions {
|
||||
* A client dedicated to stateless, utility-focused LLM calls.
|
||||
*/
|
||||
export class BaseLlmClient {
|
||||
// Default configuration for utility tasks
|
||||
private readonly defaultUtilityConfig: GenerateContentConfig = {
|
||||
temperature: 0,
|
||||
topP: 1,
|
||||
};
|
||||
|
||||
constructor(
|
||||
private readonly contentGenerator: ContentGenerator,
|
||||
private readonly config: Config,
|
||||
@@ -90,7 +84,6 @@ export class BaseLlmClient {
|
||||
|
||||
const requestConfig: GenerateContentConfig = {
|
||||
abortSignal,
|
||||
...this.defaultUtilityConfig,
|
||||
...options.config,
|
||||
...(systemInstruction && { systemInstruction }),
|
||||
};
|
||||
|
||||
@@ -15,11 +15,7 @@ import {
|
||||
} from 'vitest';
|
||||
|
||||
import type { Content, GenerateContentResponse, Part } from '@google/genai';
|
||||
import {
|
||||
isThinkingDefault,
|
||||
isThinkingSupported,
|
||||
GeminiClient,
|
||||
} from './client.js';
|
||||
import { GeminiClient } from './client.js';
|
||||
import { findCompressSplitPoint } from '../services/chatCompressionService.js';
|
||||
import {
|
||||
AuthType,
|
||||
@@ -247,40 +243,6 @@ describe('findCompressSplitPoint', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('isThinkingSupported', () => {
|
||||
it('should return true for gemini-2.5', () => {
|
||||
expect(isThinkingSupported('gemini-2.5')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for gemini-2.5-pro', () => {
|
||||
expect(isThinkingSupported('gemini-2.5-pro')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other models', () => {
|
||||
expect(isThinkingSupported('gemini-1.5-flash')).toBe(false);
|
||||
expect(isThinkingSupported('some-other-model')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isThinkingDefault', () => {
|
||||
it('should return false for gemini-2.5-flash-lite', () => {
|
||||
expect(isThinkingDefault('gemini-2.5-flash-lite')).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true for gemini-2.5', () => {
|
||||
expect(isThinkingDefault('gemini-2.5')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for gemini-2.5-pro', () => {
|
||||
expect(isThinkingDefault('gemini-2.5-pro')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other models', () => {
|
||||
expect(isThinkingDefault('gemini-1.5-flash')).toBe(false);
|
||||
expect(isThinkingDefault('some-other-model')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Gemini Client (client.ts)', () => {
|
||||
let mockContentGenerator: ContentGenerator;
|
||||
let mockConfig: Config;
|
||||
@@ -2304,16 +2266,15 @@ ${JSON.stringify(
|
||||
);
|
||||
|
||||
expect(mockContentGenerator.generateContent).toHaveBeenCalledWith(
|
||||
{
|
||||
expect.objectContaining({
|
||||
model: DEFAULT_GEMINI_FLASH_MODEL,
|
||||
config: {
|
||||
config: expect.objectContaining({
|
||||
abortSignal,
|
||||
systemInstruction: getCoreSystemPrompt(''),
|
||||
temperature: 0.5,
|
||||
topP: 1,
|
||||
},
|
||||
}),
|
||||
contents,
|
||||
},
|
||||
}),
|
||||
'test-session-id',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -15,11 +15,7 @@ import type {
|
||||
|
||||
// Config
|
||||
import { ApprovalMode, type Config } from '../config/config.js';
|
||||
import {
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
DEFAULT_GEMINI_MODEL_AUTO,
|
||||
DEFAULT_THINKING_MODE,
|
||||
} from '../config/models.js';
|
||||
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
||||
|
||||
// Core modules
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
@@ -78,25 +74,10 @@ import { type File, type IdeContext } from '../ide/types.js';
|
||||
// Fallback handling
|
||||
import { handleFallback } from '../fallback/handler.js';
|
||||
|
||||
export function isThinkingSupported(model: string) {
|
||||
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
|
||||
}
|
||||
|
||||
export function isThinkingDefault(model: string) {
|
||||
if (model.startsWith('gemini-2.5-flash-lite')) {
|
||||
return false;
|
||||
}
|
||||
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
|
||||
}
|
||||
|
||||
const MAX_TURNS = 100;
|
||||
|
||||
export class GeminiClient {
|
||||
private chat?: GeminiChat;
|
||||
private readonly generateContentConfig: GenerateContentConfig = {
|
||||
temperature: 0,
|
||||
topP: 1,
|
||||
};
|
||||
private sessionTurnCount = 0;
|
||||
|
||||
private readonly loopDetector: LoopDetectionService;
|
||||
@@ -208,20 +189,10 @@ export class GeminiClient {
|
||||
const model = this.config.getModel();
|
||||
const systemInstruction = getCoreSystemPrompt(userMemory, model);
|
||||
|
||||
const config: GenerateContentConfig = { ...this.generateContentConfig };
|
||||
|
||||
if (isThinkingSupported(model)) {
|
||||
config.thinkingConfig = {
|
||||
includeThoughts: true,
|
||||
thinkingBudget: DEFAULT_THINKING_MODE,
|
||||
};
|
||||
}
|
||||
|
||||
return new GeminiChat(
|
||||
this.config,
|
||||
{
|
||||
systemInstruction,
|
||||
...config,
|
||||
tools,
|
||||
},
|
||||
history,
|
||||
@@ -618,11 +589,6 @@ export class GeminiClient {
|
||||
): Promise<GenerateContentResponse> {
|
||||
let currentAttemptModel: string = model;
|
||||
|
||||
const configToUse: GenerateContentConfig = {
|
||||
...this.generateContentConfig,
|
||||
...generationConfig,
|
||||
};
|
||||
|
||||
try {
|
||||
const userMemory = this.config.getUserMemory();
|
||||
const finalSystemInstruction = generationConfig.systemInstruction
|
||||
@@ -631,7 +597,7 @@ export class GeminiClient {
|
||||
|
||||
const requestConfig: GenerateContentConfig = {
|
||||
abortSignal,
|
||||
...configToUse,
|
||||
...generationConfig,
|
||||
systemInstruction: finalSystemInstruction,
|
||||
};
|
||||
|
||||
@@ -672,7 +638,7 @@ export class GeminiClient {
|
||||
`Error generating content via API with model ${currentAttemptModel}.`,
|
||||
{
|
||||
requestContents: contents,
|
||||
requestConfig: configToUse,
|
||||
requestConfig: generationConfig,
|
||||
},
|
||||
'generateContent-api',
|
||||
);
|
||||
|
||||
@@ -5,42 +5,19 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
import { LoggingContentGenerator } from './geminiContentGenerator/loggingContentGenerator.js';
|
||||
|
||||
vi.mock('../code_assist/codeAssist.js');
|
||||
vi.mock('@google/genai');
|
||||
|
||||
const mockConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
describe('createContentGenerator', () => {
|
||||
it('should create a CodeAssistContentGenerator', async () => {
|
||||
const mockGenerator = {} as unknown as ContentGenerator;
|
||||
vi.mocked(createCodeAssistContentGenerator).mockResolvedValue(
|
||||
mockGenerator as never,
|
||||
);
|
||||
const generator = await createContentGenerator(
|
||||
{
|
||||
model: 'test-model',
|
||||
authType: AuthType.LOGIN_WITH_GOOGLE,
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
expect(createCodeAssistContentGenerator).toHaveBeenCalled();
|
||||
expect(generator).toEqual(
|
||||
new LoggingContentGenerator(mockGenerator, mockConfig),
|
||||
);
|
||||
});
|
||||
|
||||
it('should create a GoogleGenAI content generator', async () => {
|
||||
it('should create a Gemini content generator', async () => {
|
||||
const mockConfig = {
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getContentGeneratorConfig: () => ({}),
|
||||
getCliVersion: () => '1.0.0',
|
||||
} as unknown as Config;
|
||||
|
||||
const mockGenerator = {
|
||||
@@ -65,17 +42,17 @@ describe('createContentGenerator', () => {
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(generator).toEqual(
|
||||
new LoggingContentGenerator(
|
||||
(mockGenerator as GoogleGenAI).models,
|
||||
mockConfig,
|
||||
),
|
||||
);
|
||||
// We expect it to be a LoggingContentGenerator wrapping a GeminiContentGenerator
|
||||
expect(generator).toBeInstanceOf(LoggingContentGenerator);
|
||||
const wrapped = (generator as LoggingContentGenerator).getWrapped();
|
||||
expect(wrapped).toBeDefined();
|
||||
});
|
||||
|
||||
it('should create a GoogleGenAI content generator with client install id logging disabled', async () => {
|
||||
it('should create a Gemini content generator with client install id logging disabled', async () => {
|
||||
const mockConfig = {
|
||||
getUsageStatisticsEnabled: () => false,
|
||||
getContentGeneratorConfig: () => ({}),
|
||||
getCliVersion: () => '1.0.0',
|
||||
} as unknown as Config;
|
||||
const mockGenerator = {
|
||||
models: {},
|
||||
@@ -98,11 +75,6 @@ describe('createContentGenerator', () => {
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(generator).toEqual(
|
||||
new LoggingContentGenerator(
|
||||
(mockGenerator as GoogleGenAI).models,
|
||||
mockConfig,
|
||||
),
|
||||
);
|
||||
expect(generator).toBeInstanceOf(LoggingContentGenerator);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -12,15 +12,9 @@ import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
} from '@google/genai';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
|
||||
import type { UserTierId } from '../code_assist/types.js';
|
||||
import { InstallationManager } from '../utils/installationManager.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
/**
|
||||
* Interface abstracting the core functionalities for generating content and counting tokens.
|
||||
*/
|
||||
@@ -39,14 +33,12 @@ export interface ContentGenerator {
|
||||
|
||||
embedContent(request: EmbedContentParameters): Promise<EmbedContentResponse>;
|
||||
|
||||
userTier?: UserTierId;
|
||||
useSummarizedThinking(): boolean;
|
||||
}
|
||||
|
||||
export enum AuthType {
|
||||
LOGIN_WITH_GOOGLE = 'oauth-personal',
|
||||
USE_GEMINI = 'gemini-api-key',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
CLOUD_SHELL = 'cloud-shell',
|
||||
USE_OPENAI = 'openai',
|
||||
QWEN_OAUTH = 'qwen-oauth',
|
||||
}
|
||||
@@ -59,12 +51,9 @@ export type ContentGeneratorConfig = {
|
||||
authType?: AuthType | undefined;
|
||||
enableOpenAILogging?: boolean;
|
||||
openAILoggingDir?: string;
|
||||
// Timeout configuration in milliseconds
|
||||
timeout?: number;
|
||||
// Maximum retries for failed requests
|
||||
maxRetries?: number;
|
||||
// Disable cache control for DashScope providers
|
||||
disableCacheControl?: boolean;
|
||||
timeout?: number; // Timeout configuration in milliseconds
|
||||
maxRetries?: number; // Maximum retries for failed requests
|
||||
disableCacheControl?: boolean; // Disable cache control for DashScope providers
|
||||
samplingParams?: {
|
||||
top_p?: number;
|
||||
top_k?: number;
|
||||
@@ -74,8 +63,13 @@ export type ContentGeneratorConfig = {
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
};
|
||||
reasoning?: {
|
||||
effort?: 'low' | 'medium' | 'high';
|
||||
};
|
||||
proxy?: string | undefined;
|
||||
userAgent?: string;
|
||||
// Schema compliance mode for tool definitions
|
||||
schemaCompliance?: 'auto' | 'openapi_30';
|
||||
};
|
||||
|
||||
export function createContentGeneratorConfig(
|
||||
@@ -121,48 +115,14 @@ export async function createContentGenerator(
|
||||
gcConfig: Config,
|
||||
isInitialAuth?: boolean,
|
||||
): Promise<ContentGenerator> {
|
||||
const version = process.env['CLI_VERSION'] || process.version;
|
||||
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
const baseHeaders: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
if (
|
||||
config.authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
config.authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
const httpOptions = { headers: baseHeaders };
|
||||
return new LoggingContentGenerator(
|
||||
await createCodeAssistContentGenerator(
|
||||
httpOptions,
|
||||
config.authType,
|
||||
gcConfig,
|
||||
),
|
||||
gcConfig,
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
let headers: Record<string, string> = { ...baseHeaders };
|
||||
if (gcConfig?.getUsageStatisticsEnabled()) {
|
||||
const installationManager = new InstallationManager();
|
||||
const installationId = installationManager.getInstallationId();
|
||||
headers = {
|
||||
...headers,
|
||||
'x-gemini-api-privileged-user-id': `${installationId}`,
|
||||
};
|
||||
}
|
||||
const httpOptions = { headers };
|
||||
|
||||
const googleGenAI = new GoogleGenAI({
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
});
|
||||
return new LoggingContentGenerator(googleGenAI.models, gcConfig);
|
||||
const { createGeminiContentGenerator } = await import(
|
||||
'./geminiContentGenerator/index.js'
|
||||
);
|
||||
return createGeminiContentGenerator(config, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.USE_OPENAI) {
|
||||
|
||||
@@ -240,7 +240,7 @@ describe('CoreToolScheduler', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -318,7 +318,7 @@ describe('CoreToolScheduler', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -497,7 +497,7 @@ describe('CoreToolScheduler', () => {
|
||||
getExcludeTools: () => ['write_file', 'edit', 'run_shell_command'],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -584,7 +584,7 @@ describe('CoreToolScheduler', () => {
|
||||
getExcludeTools: () => ['write_file', 'edit'], // Different excluded tools
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -674,7 +674,7 @@ describe('CoreToolScheduler with payload', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1001,7 +1001,7 @@ describe('CoreToolScheduler edit cancellation', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1108,7 +1108,7 @@ describe('CoreToolScheduler YOLO mode', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1258,7 +1258,7 @@ describe('CoreToolScheduler cancellation during executing with live output', ()
|
||||
getApprovalMode: () => ApprovalMode.DEFAULT,
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getToolRegistry: () => mockToolRegistry,
|
||||
getShellExecutionConfig: () => ({
|
||||
@@ -1350,7 +1350,7 @@ describe('CoreToolScheduler request queueing', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1482,7 +1482,7 @@ describe('CoreToolScheduler request queueing', () => {
|
||||
getToolRegistry: () => toolRegistry,
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 80,
|
||||
@@ -1586,7 +1586,7 @@ describe('CoreToolScheduler request queueing', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1854,7 +1854,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1975,7 +1975,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
|
||||
@@ -100,6 +100,7 @@ describe('GeminiChat', () => {
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
batchEmbedContents: vi.fn(),
|
||||
useSummarizedThinking: vi.fn().mockReturnValue(false),
|
||||
} as unknown as ContentGenerator;
|
||||
|
||||
mockHandleFallback.mockClear();
|
||||
@@ -111,7 +112,7 @@ describe('GeminiChat', () => {
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getDebugMode: () => false,
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({
|
||||
authType: 'oauth-personal', // Ensure this is set for fallback tests
|
||||
authType: 'gemini-api-key', // Ensure this is set for fallback tests
|
||||
model: 'test-model',
|
||||
}),
|
||||
getModel: vi.fn().mockReturnValue('gemini-pro'),
|
||||
@@ -718,6 +719,99 @@ describe('GeminiChat', () => {
|
||||
1,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle summarized thinking by conditionally including thoughts in history', async () => {
|
||||
// Case 1: useSummarizedThinking is true -> thoughts NOT in history
|
||||
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
|
||||
true,
|
||||
);
|
||||
const stream1 = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ thought: true, text: 'T1' }, { text: 'A1' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream1,
|
||||
);
|
||||
|
||||
const res1 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p1');
|
||||
for await (const _ of res1);
|
||||
|
||||
const history1 = chat.getHistory();
|
||||
expect(history1[1].parts).toEqual([{ text: 'A1' }]);
|
||||
|
||||
// Case 2: useSummarizedThinking is false -> thoughts ARE in history
|
||||
chat.clearHistory();
|
||||
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
|
||||
false,
|
||||
);
|
||||
const stream2 = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ thought: true, text: 'T2' }, { text: 'A2' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream2,
|
||||
);
|
||||
|
||||
const res2 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p2');
|
||||
for await (const _ of res2);
|
||||
|
||||
const history2 = chat.getHistory();
|
||||
expect(history2[1].parts).toEqual([
|
||||
{ text: 'T2', thought: true },
|
||||
{ text: 'A2' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should keep parts with thoughtSignature when consolidating history', async () => {
|
||||
const stream = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 'p1',
|
||||
thoughtSignature: 's1',
|
||||
} as unknown as { text: string; thoughtSignature: string },
|
||||
],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream,
|
||||
);
|
||||
|
||||
const res = await chat.sendMessageStream('m1', { message: 'h1' }, 'p1');
|
||||
for await (const _ of res);
|
||||
|
||||
const history = chat.getHistory();
|
||||
expect(history[1].parts![0]).toEqual({
|
||||
text: 'p1',
|
||||
thoughtSignature: 's1',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('addHistory', () => {
|
||||
@@ -1382,7 +1476,7 @@ describe('GeminiChat', () => {
|
||||
});
|
||||
|
||||
it('should call handleFallback with the specific failed model and retry if handler returns true', async () => {
|
||||
const authType = AuthType.LOGIN_WITH_GOOGLE;
|
||||
const authType = AuthType.USE_GEMINI;
|
||||
vi.mocked(mockConfig.getContentGeneratorConfig).mockReturnValue({
|
||||
model: 'test-model',
|
||||
authType,
|
||||
@@ -1532,7 +1626,7 @@ describe('GeminiChat', () => {
|
||||
});
|
||||
|
||||
describe('stripThoughtsFromHistory', () => {
|
||||
it('should strip thought signatures', () => {
|
||||
it('should strip thoughts and thought signatures, and remove empty content objects', () => {
|
||||
chat.setHistory([
|
||||
{
|
||||
role: 'user',
|
||||
@@ -1544,10 +1638,15 @@ describe('GeminiChat', () => {
|
||||
{ text: 'thinking...', thought: true },
|
||||
{ text: 'hi' },
|
||||
{
|
||||
functionCall: { name: 'test', args: {} },
|
||||
},
|
||||
text: 'hidden metadata',
|
||||
thoughtSignature: 'abc',
|
||||
} as unknown as { text: string; thoughtSignature: string },
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'only thinking', thought: true }],
|
||||
},
|
||||
]);
|
||||
|
||||
chat.stripThoughtsFromHistory();
|
||||
@@ -1559,7 +1658,7 @@ describe('GeminiChat', () => {
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'hi' }, { functionCall: { name: 'test', args: {} } }],
|
||||
parts: [{ text: 'hi' }, { text: 'hidden metadata' }],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
@@ -92,6 +92,7 @@ export function isValidNonThoughtTextPart(part: Part): boolean {
|
||||
return (
|
||||
typeof part.text === 'string' &&
|
||||
!part.thought &&
|
||||
!part.thoughtSignature &&
|
||||
// Technically, the model should never generate parts that have text and
|
||||
// any of these but we don't trust them so check anyways.
|
||||
!part.functionCall &&
|
||||
@@ -109,18 +110,24 @@ function isValidContent(content: Content): boolean {
|
||||
if (part === undefined || Object.keys(part).length === 0) {
|
||||
return false;
|
||||
}
|
||||
if (
|
||||
!part.thought &&
|
||||
part.text !== undefined &&
|
||||
part.text === '' &&
|
||||
part.functionCall === undefined
|
||||
) {
|
||||
if (!isValidContentPart(part)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function isValidContentPart(part: Part): boolean {
|
||||
const isInvalid =
|
||||
!part.thought &&
|
||||
!part.thoughtSignature &&
|
||||
part.text !== undefined &&
|
||||
part.text === '' &&
|
||||
part.functionCall === undefined;
|
||||
|
||||
return !isInvalid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the history contains the correct roles.
|
||||
*
|
||||
@@ -448,15 +455,29 @@ export class GeminiChat {
|
||||
if (!content.parts) return content;
|
||||
|
||||
// Filter out thought parts entirely
|
||||
const filteredParts = content.parts.filter(
|
||||
(part) =>
|
||||
!(
|
||||
const filteredParts = content.parts
|
||||
.filter(
|
||||
(part) =>
|
||||
!(
|
||||
part &&
|
||||
typeof part === 'object' &&
|
||||
'thought' in part &&
|
||||
part.thought
|
||||
),
|
||||
)
|
||||
.map((part) => {
|
||||
if (
|
||||
part &&
|
||||
typeof part === 'object' &&
|
||||
'thought' in part &&
|
||||
part.thought
|
||||
),
|
||||
);
|
||||
'thoughtSignature' in part
|
||||
) {
|
||||
const newPart = { ...part };
|
||||
delete (newPart as { thoughtSignature?: string })
|
||||
.thoughtSignature;
|
||||
return newPart;
|
||||
}
|
||||
return part;
|
||||
});
|
||||
|
||||
return {
|
||||
...content,
|
||||
@@ -538,11 +559,15 @@ export class GeminiChat {
|
||||
yield chunk; // Yield every chunk to the UI immediately.
|
||||
}
|
||||
|
||||
const thoughtParts = allModelParts.filter((part) => part.thought);
|
||||
const thoughtText = thoughtParts
|
||||
.map((part) => part.text)
|
||||
.join('')
|
||||
.trim();
|
||||
let thoughtText = '';
|
||||
// Only include thoughts if not using summarized thinking.
|
||||
if (!this.config.getContentGenerator().useSummarizedThinking()) {
|
||||
thoughtText = allModelParts
|
||||
.filter((part) => part.thought)
|
||||
.map((part) => part.text)
|
||||
.join('')
|
||||
.trim();
|
||||
}
|
||||
|
||||
const contentParts = allModelParts.filter((part) => !part.thought);
|
||||
const consolidatedHistoryParts: Part[] = [];
|
||||
@@ -555,7 +580,7 @@ export class GeminiChat {
|
||||
isValidNonThoughtTextPart(part)
|
||||
) {
|
||||
lastPart.text += part.text;
|
||||
} else {
|
||||
} else if (isValidContentPart(part)) {
|
||||
consolidatedHistoryParts.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,173 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
|
||||
vi.mock('@google/genai', () => {
|
||||
const mockGenerateContent = vi.fn();
|
||||
const mockGenerateContentStream = vi.fn();
|
||||
const mockCountTokens = vi.fn();
|
||||
const mockEmbedContent = vi.fn();
|
||||
|
||||
return {
|
||||
GoogleGenAI: vi.fn().mockImplementation(() => ({
|
||||
models: {
|
||||
generateContent: mockGenerateContent,
|
||||
generateContentStream: mockGenerateContentStream,
|
||||
countTokens: mockCountTokens,
|
||||
embedContent: mockEmbedContent,
|
||||
},
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
describe('GeminiContentGenerator', () => {
|
||||
let generator: GeminiContentGenerator;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let mockGoogleGenAI: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
generator = new GeminiContentGenerator({
|
||||
apiKey: 'test-api-key',
|
||||
});
|
||||
mockGoogleGenAI = vi.mocked(GoogleGenAI).mock.results[0].value;
|
||||
});
|
||||
|
||||
it('should call generateContent on the underlying model', async () => {
|
||||
const request = { model: 'gemini-1.5-flash', contents: [] };
|
||||
const expectedResponse = { responseId: 'test-id' };
|
||||
mockGoogleGenAI.models.generateContent.mockResolvedValue(expectedResponse);
|
||||
|
||||
const response = await generator.generateContent(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
...request,
|
||||
config: expect.objectContaining({
|
||||
temperature: 1,
|
||||
topP: 0.95,
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'THINKING_LEVEL_UNSPECIFIED',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(response).toBe(expectedResponse);
|
||||
});
|
||||
|
||||
it('should call generateContentStream on the underlying model', async () => {
|
||||
const request = { model: 'gemini-1.5-flash', contents: [] };
|
||||
const mockStream = (async function* () {
|
||||
yield { responseId: '1' };
|
||||
})();
|
||||
mockGoogleGenAI.models.generateContentStream.mockResolvedValue(mockStream);
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContentStream).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
...request,
|
||||
config: expect.objectContaining({
|
||||
temperature: 1,
|
||||
topP: 0.95,
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'THINKING_LEVEL_UNSPECIFIED',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(stream).toBe(mockStream);
|
||||
});
|
||||
|
||||
it('should call countTokens on the underlying model', async () => {
|
||||
const request = { model: 'gemini-1.5-flash', contents: [] };
|
||||
const expectedResponse = { totalTokens: 10 };
|
||||
mockGoogleGenAI.models.countTokens.mockResolvedValue(expectedResponse);
|
||||
|
||||
const response = await generator.countTokens(request);
|
||||
|
||||
expect(mockGoogleGenAI.models.countTokens).toHaveBeenCalledWith(request);
|
||||
expect(response).toBe(expectedResponse);
|
||||
});
|
||||
|
||||
it('should call embedContent on the underlying model', async () => {
|
||||
const request = { model: 'embedding-model', contents: [] };
|
||||
const expectedResponse = { embeddings: [] };
|
||||
mockGoogleGenAI.models.embedContent.mockResolvedValue(expectedResponse);
|
||||
|
||||
const response = await generator.embedContent(request);
|
||||
|
||||
expect(mockGoogleGenAI.models.embedContent).toHaveBeenCalledWith(request);
|
||||
expect(response).toBe(expectedResponse);
|
||||
});
|
||||
|
||||
it('should prioritize contentGeneratorConfig samplingParams over request config', async () => {
|
||||
const generatorWithParams = new GeminiContentGenerator({ apiKey: 'test' }, {
|
||||
model: 'gemini-1.5-flash',
|
||||
samplingParams: {
|
||||
temperature: 0.1,
|
||||
top_p: 0.2,
|
||||
},
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any);
|
||||
|
||||
const request = {
|
||||
model: 'gemini-1.5-flash',
|
||||
contents: [],
|
||||
config: {
|
||||
temperature: 0.9,
|
||||
topP: 0.9,
|
||||
},
|
||||
};
|
||||
|
||||
await generatorWithParams.generateContent(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
temperature: 0.1,
|
||||
topP: 0.2,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should map reasoning effort to thinkingConfig', async () => {
|
||||
const generatorWithReasoning = new GeminiContentGenerator(
|
||||
{ apiKey: 'test' },
|
||||
{
|
||||
model: 'gemini-2.5-pro',
|
||||
reasoning: {
|
||||
effort: 'high',
|
||||
},
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any,
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'gemini-2.5-pro',
|
||||
contents: [],
|
||||
};
|
||||
|
||||
await generatorWithReasoning.generateContent(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'HIGH',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,144 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
GenerateContentConfig,
|
||||
ThinkingLevel,
|
||||
} from '@google/genai';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
|
||||
/**
|
||||
* A wrapper for GoogleGenAI that implements the ContentGenerator interface.
|
||||
*/
|
||||
export class GeminiContentGenerator implements ContentGenerator {
|
||||
private readonly googleGenAI: GoogleGenAI;
|
||||
private readonly contentGeneratorConfig?: ContentGeneratorConfig;
|
||||
|
||||
constructor(
|
||||
options: {
|
||||
apiKey?: string;
|
||||
vertexai?: boolean;
|
||||
httpOptions?: { headers: Record<string, string> };
|
||||
},
|
||||
contentGeneratorConfig?: ContentGeneratorConfig,
|
||||
) {
|
||||
this.googleGenAI = new GoogleGenAI(options);
|
||||
this.contentGeneratorConfig = contentGeneratorConfig;
|
||||
}
|
||||
|
||||
private buildSamplingParameters(
|
||||
request: GenerateContentParameters,
|
||||
): GenerateContentConfig {
|
||||
const configSamplingParams = this.contentGeneratorConfig?.samplingParams;
|
||||
const requestConfig = request.config || {};
|
||||
|
||||
// Helper function to get parameter value with priority: config > request > default
|
||||
const getParameterValue = <T>(
|
||||
configValue: T | undefined,
|
||||
requestKey: keyof GenerateContentConfig,
|
||||
defaultValue?: T,
|
||||
): T | undefined => {
|
||||
const requestValue = requestConfig[requestKey] as T | undefined;
|
||||
|
||||
if (configValue !== undefined) return configValue;
|
||||
if (requestValue !== undefined) return requestValue;
|
||||
return defaultValue;
|
||||
};
|
||||
|
||||
return {
|
||||
...requestConfig,
|
||||
temperature: getParameterValue<number>(
|
||||
configSamplingParams?.temperature,
|
||||
'temperature',
|
||||
1,
|
||||
),
|
||||
topP: getParameterValue<number>(
|
||||
configSamplingParams?.top_p,
|
||||
'topP',
|
||||
0.95,
|
||||
),
|
||||
topK: getParameterValue<number>(configSamplingParams?.top_k, 'topK', 64),
|
||||
maxOutputTokens: getParameterValue<number>(
|
||||
configSamplingParams?.max_tokens,
|
||||
'maxOutputTokens',
|
||||
),
|
||||
presencePenalty: getParameterValue<number>(
|
||||
configSamplingParams?.presence_penalty,
|
||||
'presencePenalty',
|
||||
),
|
||||
frequencyPenalty: getParameterValue<number>(
|
||||
configSamplingParams?.frequency_penalty,
|
||||
'frequencyPenalty',
|
||||
),
|
||||
thinkingConfig: getParameterValue(
|
||||
this.contentGeneratorConfig?.reasoning
|
||||
? {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: (this.contentGeneratorConfig.reasoning.effort ===
|
||||
'low'
|
||||
? 'LOW'
|
||||
: this.contentGeneratorConfig.reasoning.effort === 'high'
|
||||
? 'HIGH'
|
||||
: 'THINKING_LEVEL_UNSPECIFIED') as ThinkingLevel,
|
||||
}
|
||||
: undefined,
|
||||
'thinkingConfig',
|
||||
{
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'THINKING_LEVEL_UNSPECIFIED' as ThinkingLevel,
|
||||
},
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildSamplingParameters(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContent(finalRequest);
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildSamplingParameters(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContentStream(finalRequest);
|
||||
}
|
||||
|
||||
async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
return this.googleGenAI.models.countTokens(request);
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.googleGenAI.models.embedContent(request);
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
47
packages/core/src/core/geminiContentGenerator/index.test.ts
Normal file
47
packages/core/src/core/geminiContentGenerator/index.test.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createGeminiContentGenerator } from './index.js';
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { AuthType } from '../contentGenerator.js';
|
||||
|
||||
vi.mock('./geminiContentGenerator.js', () => ({
|
||||
GeminiContentGenerator: vi.fn().mockImplementation(() => ({})),
|
||||
}));
|
||||
|
||||
vi.mock('./loggingContentGenerator.js', () => ({
|
||||
LoggingContentGenerator: vi.fn().mockImplementation((wrapped) => wrapped),
|
||||
}));
|
||||
|
||||
describe('createGeminiContentGenerator', () => {
|
||||
let mockConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockConfig = {
|
||||
getUsageStatisticsEnabled: vi.fn().mockReturnValue(false),
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
it('should create a GeminiContentGenerator wrapped in LoggingContentGenerator', () => {
|
||||
const config = {
|
||||
model: 'gemini-1.5-flash',
|
||||
apiKey: 'test-key',
|
||||
authType: AuthType.USE_GEMINI,
|
||||
};
|
||||
|
||||
const generator = createGeminiContentGenerator(config, mockConfig);
|
||||
|
||||
expect(GeminiContentGenerator).toHaveBeenCalled();
|
||||
expect(LoggingContentGenerator).toHaveBeenCalled();
|
||||
expect(generator).toBeDefined();
|
||||
});
|
||||
});
|
||||
55
packages/core/src/core/geminiContentGenerator/index.ts
Normal file
55
packages/core/src/core/geminiContentGenerator/index.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { InstallationManager } from '../../utils/installationManager.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
export { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
export { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
/**
|
||||
* Create a Gemini content generator.
|
||||
*/
|
||||
export function createGeminiContentGenerator(
|
||||
config: ContentGeneratorConfig,
|
||||
gcConfig: Config,
|
||||
): ContentGenerator {
|
||||
const version = process.env['CLI_VERSION'] || process.version;
|
||||
const userAgent =
|
||||
config.userAgent ||
|
||||
`QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
const baseHeaders: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
let headers: Record<string, string> = { ...baseHeaders };
|
||||
if (gcConfig?.getUsageStatisticsEnabled()) {
|
||||
const installationManager = new InstallationManager();
|
||||
const installationId = installationManager.getInstallationId();
|
||||
headers = {
|
||||
...headers,
|
||||
'x-gemini-api-privileged-user-id': `${installationId}`,
|
||||
};
|
||||
}
|
||||
const httpOptions = { headers };
|
||||
|
||||
const geminiContentGenerator = new GeminiContentGenerator(
|
||||
{
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
},
|
||||
config,
|
||||
);
|
||||
|
||||
return new LoggingContentGenerator(geminiContentGenerator, gcConfig);
|
||||
}
|
||||
@@ -13,21 +13,24 @@ import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
GenerateContentResponse,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
Part,
|
||||
PartUnion,
|
||||
} from '@google/genai';
|
||||
import {
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
ApiErrorEvent,
|
||||
} from '../telemetry/types.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
} from '../../telemetry/types.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import {
|
||||
logApiError,
|
||||
logApiRequest,
|
||||
logApiResponse,
|
||||
} from '../telemetry/loggers.js';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { toContents } from '../code_assist/converter.js';
|
||||
import { isStructuredError } from '../utils/quotaErrorDetection.js';
|
||||
} from '../../telemetry/loggers.js';
|
||||
import type { ContentGenerator } from '../contentGenerator.js';
|
||||
import { isStructuredError } from '../../utils/quotaErrorDetection.js';
|
||||
|
||||
interface StructuredError {
|
||||
status: number;
|
||||
@@ -112,7 +115,7 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
try {
|
||||
const response = await this.wrapped.generateContent(req, userPromptId);
|
||||
const durationMs = Date.now() - startTime;
|
||||
@@ -137,7 +140,7 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
|
||||
let stream: AsyncGenerator<GenerateContentResponse>;
|
||||
try {
|
||||
@@ -205,4 +208,95 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.wrapped.embedContent(req);
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return this.wrapped.useSummarizedThinking();
|
||||
}
|
||||
|
||||
private toContents(contents: ContentListUnion): Content[] {
|
||||
if (Array.isArray(contents)) {
|
||||
// it's a Content[] or a PartsUnion[]
|
||||
return contents.map((c) => this.toContent(c));
|
||||
}
|
||||
// it's a Content or a PartsUnion
|
||||
return [this.toContent(contents)];
|
||||
}
|
||||
|
||||
private toContent(content: ContentUnion): Content {
|
||||
if (Array.isArray(content)) {
|
||||
// it's a PartsUnion[]
|
||||
return {
|
||||
role: 'user',
|
||||
parts: this.toParts(content),
|
||||
};
|
||||
}
|
||||
if (typeof content === 'string') {
|
||||
// it's a string
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [{ text: content }],
|
||||
};
|
||||
}
|
||||
if ('parts' in content) {
|
||||
// it's a Content - process parts to handle thought filtering
|
||||
return {
|
||||
...content,
|
||||
parts: content.parts
|
||||
? this.toParts(content.parts.filter((p) => p != null))
|
||||
: [],
|
||||
};
|
||||
}
|
||||
// it's a Part
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [this.toPart(content as Part)],
|
||||
};
|
||||
}
|
||||
|
||||
private toParts(parts: PartUnion[]): Part[] {
|
||||
return parts.map((p) => this.toPart(p));
|
||||
}
|
||||
|
||||
private toPart(part: PartUnion): Part {
|
||||
if (typeof part === 'string') {
|
||||
// it's a string
|
||||
return { text: part };
|
||||
}
|
||||
|
||||
// Handle thought parts for CountToken API compatibility
|
||||
// The CountToken API expects parts to have certain required "oneof" fields initialized,
|
||||
// but thought parts don't conform to this schema and cause API failures
|
||||
if ('thought' in part && part.thought) {
|
||||
const thoughtText = `[Thought: ${part.thought}]`;
|
||||
|
||||
const newPart = { ...part };
|
||||
delete (newPart as Record<string, unknown>)['thought'];
|
||||
|
||||
const hasApiContent =
|
||||
'functionCall' in newPart ||
|
||||
'functionResponse' in newPart ||
|
||||
'inlineData' in newPart ||
|
||||
'fileData' in newPart;
|
||||
|
||||
if (hasApiContent) {
|
||||
// It's a functionCall or other non-text part. Just strip the thought.
|
||||
return newPart;
|
||||
}
|
||||
|
||||
// If no other valid API content, this must be a text part.
|
||||
// Combine existing text (if any) with the thought, preserving other properties.
|
||||
const text = (newPart as { text?: unknown }).text;
|
||||
const existingText = text ? String(text) : '';
|
||||
const combinedText = existingText
|
||||
? `${existingText}\n${thoughtText}`
|
||||
: thoughtText;
|
||||
|
||||
return {
|
||||
...newPart,
|
||||
text: combinedText,
|
||||
};
|
||||
}
|
||||
|
||||
return part;
|
||||
}
|
||||
}
|
||||
@@ -47,7 +47,7 @@ describe('executeToolCall', () => {
|
||||
getDebugMode: () => false,
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
authType: 'gemini-api-key',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
|
||||
@@ -22,6 +22,10 @@ import { GenerateContentResponse, FinishReason } from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import { StreamingToolCallParser } from './streamingToolCallParser.js';
|
||||
import {
|
||||
convertSchema,
|
||||
type SchemaComplianceMode,
|
||||
} from '../../utils/schemaConverter.js';
|
||||
|
||||
/**
|
||||
* Extended usage type that supports both OpenAI standard format and alternative formats
|
||||
@@ -80,11 +84,13 @@ interface ParsedParts {
|
||||
*/
|
||||
export class OpenAIContentConverter {
|
||||
private model: string;
|
||||
private schemaCompliance: SchemaComplianceMode;
|
||||
private streamingToolCallParser: StreamingToolCallParser =
|
||||
new StreamingToolCallParser();
|
||||
|
||||
constructor(model: string) {
|
||||
constructor(model: string, schemaCompliance: SchemaComplianceMode = 'auto') {
|
||||
this.model = model;
|
||||
this.schemaCompliance = schemaCompliance;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -205,6 +211,10 @@ export class OpenAIContentConverter {
|
||||
);
|
||||
}
|
||||
|
||||
if (parameters) {
|
||||
parameters = convertSchema(parameters, this.schemaCompliance);
|
||||
}
|
||||
|
||||
openAITools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
|
||||
@@ -99,6 +99,7 @@ describe('OpenAIContentGenerator (Refactored)', () => {
|
||||
},
|
||||
} as unknown as OpenAI),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
generator = new OpenAIContentGenerator(
|
||||
@@ -211,6 +212,7 @@ describe('OpenAIContentGenerator (Refactored)', () => {
|
||||
},
|
||||
} as unknown as OpenAI),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
const testGenerator = new TestGenerator(
|
||||
@@ -277,6 +279,7 @@ describe('OpenAIContentGenerator (Refactored)', () => {
|
||||
},
|
||||
} as unknown as OpenAI),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
const testGenerator = new TestGenerator(
|
||||
|
||||
@@ -154,4 +154,8 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ describe('ContentGenerationPipeline', () => {
|
||||
buildClient: vi.fn().mockReturnValue(mockClient),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
buildHeaders: vi.fn().mockReturnValue({}),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
// Mock telemetry service
|
||||
@@ -108,7 +109,10 @@ describe('ContentGenerationPipeline', () => {
|
||||
describe('constructor', () => {
|
||||
it('should initialize with correct configuration', () => {
|
||||
expect(mockProvider.buildClient).toHaveBeenCalled();
|
||||
expect(OpenAIContentConverter).toHaveBeenCalledWith('test-model');
|
||||
expect(OpenAIContentConverter).toHaveBeenCalledWith(
|
||||
'test-model',
|
||||
undefined,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ export class ContentGenerationPipeline {
|
||||
this.client = this.config.provider.buildClient();
|
||||
this.converter = new OpenAIContentConverter(
|
||||
this.contentGeneratorConfig.model,
|
||||
this.contentGeneratorConfig.schemaCompliance,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -282,16 +283,22 @@ export class ContentGenerationPipeline {
|
||||
private buildSamplingParameters(
|
||||
request: GenerateContentParameters,
|
||||
): Record<string, unknown> {
|
||||
const defaultSamplingParams =
|
||||
this.config.provider.getDefaultGenerationConfig();
|
||||
const configSamplingParams = this.contentGeneratorConfig.samplingParams;
|
||||
|
||||
// Helper function to get parameter value with priority: config > request > default
|
||||
const getParameterValue = <T>(
|
||||
configKey: keyof NonNullable<typeof configSamplingParams>,
|
||||
requestKey: keyof NonNullable<typeof request.config>,
|
||||
defaultValue?: T,
|
||||
requestKey?: keyof NonNullable<typeof request.config>,
|
||||
): T | undefined => {
|
||||
const configValue = configSamplingParams?.[configKey] as T | undefined;
|
||||
const requestValue = request.config?.[requestKey] as T | undefined;
|
||||
const requestValue = requestKey
|
||||
? (request.config?.[requestKey] as T | undefined)
|
||||
: undefined;
|
||||
const defaultValue = requestKey
|
||||
? (defaultSamplingParams[requestKey] as T)
|
||||
: undefined;
|
||||
|
||||
if (configValue !== undefined) return configValue;
|
||||
if (requestValue !== undefined) return requestValue;
|
||||
@@ -303,12 +310,8 @@ export class ContentGenerationPipeline {
|
||||
key: string,
|
||||
configKey: keyof NonNullable<typeof configSamplingParams>,
|
||||
requestKey?: keyof NonNullable<typeof request.config>,
|
||||
defaultValue?: T,
|
||||
): Record<string, T> | Record<string, never> => {
|
||||
const value = requestKey
|
||||
? getParameterValue(configKey, requestKey, defaultValue)
|
||||
: ((configSamplingParams?.[configKey] as T | undefined) ??
|
||||
defaultValue);
|
||||
): Record<string, T | undefined> => {
|
||||
const value = getParameterValue<T>(configKey, requestKey);
|
||||
|
||||
return value !== undefined ? { [key]: value } : {};
|
||||
};
|
||||
@@ -322,10 +325,18 @@ export class ContentGenerationPipeline {
|
||||
...addParameterIfDefined('max_tokens', 'max_tokens', 'maxOutputTokens'),
|
||||
|
||||
// Config-only parameters (no request fallback)
|
||||
...addParameterIfDefined('top_k', 'top_k'),
|
||||
...addParameterIfDefined('top_k', 'top_k', 'topK'),
|
||||
...addParameterIfDefined('repetition_penalty', 'repetition_penalty'),
|
||||
...addParameterIfDefined('presence_penalty', 'presence_penalty'),
|
||||
...addParameterIfDefined('frequency_penalty', 'frequency_penalty'),
|
||||
...addParameterIfDefined(
|
||||
'presence_penalty',
|
||||
'presence_penalty',
|
||||
'presencePenalty',
|
||||
),
|
||||
...addParameterIfDefined(
|
||||
'frequency_penalty',
|
||||
'frequency_penalty',
|
||||
'frequencyPenalty',
|
||||
),
|
||||
};
|
||||
|
||||
return params;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import OpenAI from 'openai';
|
||||
import type { GenerateContentConfig } from '@google/genai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { AuthType } from '../../contentGenerator.js';
|
||||
@@ -141,6 +142,14 @@ export class DashScopeOpenAICompatibleProvider
|
||||
};
|
||||
}
|
||||
|
||||
getDefaultGenerationConfig(): GenerateContentConfig {
|
||||
return {
|
||||
temperature: 0.7,
|
||||
topP: 0.8,
|
||||
topK: 20,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Add cache control flag to specified message(s) for DashScope providers
|
||||
*/
|
||||
|
||||
@@ -8,6 +8,7 @@ import type OpenAI from 'openai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { DefaultOpenAICompatibleProvider } from './default.js';
|
||||
import type { GenerateContentConfig } from '@google/genai';
|
||||
|
||||
export class DeepSeekOpenAICompatibleProvider extends DefaultOpenAICompatibleProvider {
|
||||
constructor(
|
||||
@@ -76,4 +77,10 @@ export class DeepSeekOpenAICompatibleProvider extends DefaultOpenAICompatiblePro
|
||||
messages,
|
||||
};
|
||||
}
|
||||
|
||||
override getDefaultGenerationConfig(): GenerateContentConfig {
|
||||
return {
|
||||
temperature: 0,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import OpenAI from 'openai';
|
||||
import type { GenerateContentConfig } from '@google/genai';
|
||||
import type { Config } from '../../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
|
||||
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
|
||||
@@ -55,4 +56,10 @@ export class DefaultOpenAICompatibleProvider
|
||||
...request, // Preserve all original parameters including sampling params
|
||||
};
|
||||
}
|
||||
|
||||
getDefaultGenerationConfig(): GenerateContentConfig {
|
||||
return {
|
||||
topP: 0.95,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { GenerateContentConfig } from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
|
||||
// Extended types to support cache_control for DashScope
|
||||
@@ -22,6 +23,7 @@ export interface OpenAICompatibleProvider {
|
||||
request: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
userPromptId: string,
|
||||
): OpenAI.Chat.ChatCompletionCreateParams;
|
||||
getDefaultGenerationConfig(): GenerateContentConfig;
|
||||
}
|
||||
|
||||
export type DashScopeRequestMetadata = {
|
||||
|
||||
@@ -36,13 +36,6 @@ vi.mock('../utils/errorReporting', () => ({
|
||||
reportError: vi.fn(),
|
||||
}));
|
||||
|
||||
// Use the actual implementation from partUtils now that it's provided.
|
||||
vi.mock('../utils/generateContentResponseUtilities', () => ({
|
||||
getResponseText: (resp: GenerateContentResponse) =>
|
||||
resp.candidates?.[0]?.content?.parts?.map((part) => part.text).join('') ||
|
||||
undefined,
|
||||
}));
|
||||
|
||||
describe('Turn', () => {
|
||||
let turn: Turn;
|
||||
// Define a type for the mocked Chat instance for clarity
|
||||
@@ -156,6 +149,7 @@ describe('Turn', () => {
|
||||
type: GeminiEventType.Thought,
|
||||
value: { subject: '', description: 'reasoning...' },
|
||||
},
|
||||
{ type: GeminiEventType.Content, value: 'final answer' },
|
||||
]);
|
||||
});
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user