mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-26 19:49:17 +00:00
Compare commits
11 Commits
fix/integr
...
fix-langua
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
25dbe98e6e | ||
|
|
e5dbd69899 | ||
|
|
101bd5f9b3 | ||
|
|
61c626b618 | ||
|
|
a28278e950 | ||
|
|
a8f7bab544 | ||
|
|
4ca62ba836 | ||
|
|
398a1044ce | ||
|
|
f07259a7c9 | ||
|
|
4d9f25e9fe | ||
|
|
15efeb0107 |
54
.github/workflows/release-sdk.yml
vendored
54
.github/workflows/release-sdk.yml
vendored
@@ -33,10 +33,6 @@ on:
|
||||
type: 'boolean'
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}'
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
release-sdk:
|
||||
runs-on: 'ubuntu-latest'
|
||||
@@ -50,7 +46,6 @@ jobs:
|
||||
packages: 'write'
|
||||
id-token: 'write'
|
||||
issues: 'write'
|
||||
pull-requests: 'write'
|
||||
outputs:
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
|
||||
@@ -168,11 +163,11 @@ jobs:
|
||||
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Update package version'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
env:
|
||||
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
|
||||
run: |-
|
||||
# Use npm workspaces so the root lockfile is updated consistently.
|
||||
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
npm version "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: 'Commit and Conditionally Push package version'
|
||||
env:
|
||||
@@ -180,7 +175,7 @@ jobs:
|
||||
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
git add packages/sdk-typescript/package.json package-lock.json
|
||||
git add packages/sdk-typescript/package.json
|
||||
if git diff --staged --quiet; then
|
||||
echo "No version changes to commit"
|
||||
else
|
||||
@@ -227,49 +222,6 @@ jobs:
|
||||
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
|
||||
--generate-notes
|
||||
|
||||
- name: 'Create PR to merge release branch into main'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
id: 'pr'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
|
||||
pr_url="$(gh pr list --head "${RELEASE_BRANCH}" --base main --json url --jq '.[0].url')"
|
||||
if [[ -z "${pr_url}" ]]; then
|
||||
pr_url="$(gh pr create \
|
||||
--base main \
|
||||
--head "${RELEASE_BRANCH}" \
|
||||
--title "chore(release): sdk-typescript ${RELEASE_TAG}" \
|
||||
--body "Automated release PR for sdk-typescript ${RELEASE_TAG}.")"
|
||||
fi
|
||||
|
||||
echo "PR_URL=${pr_url}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Wait for CI checks to complete'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
echo "Waiting for CI checks to complete..."
|
||||
gh pr checks "${PR_URL}" --watch --interval 30
|
||||
|
||||
- name: 'Enable auto-merge for release PR'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
gh pr merge "${PR_URL}" --merge --auto
|
||||
|
||||
- name: 'Create Issue on Failure'
|
||||
if: |-
|
||||
${{ failure() }}
|
||||
|
||||
@@ -43,7 +43,6 @@ Qwen Code uses JSON settings files for persistent configuration. There are four
|
||||
In addition to a project settings file, a project's `.qwen` directory can contain other project-specific files related to Qwen Code's operation, such as:
|
||||
|
||||
- [Custom sandbox profiles](../features/sandbox) (e.g. `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
|
||||
- [Agent Skills](../features/skills) (experimental) under `.qwen/skills/` (each Skill is a directory containing a `SKILL.md`).
|
||||
|
||||
### Available settings in `settings.json`
|
||||
|
||||
@@ -381,8 +380,6 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--checkpointing` | | Enables [checkpointing](../features/checkpointing). | | |
|
||||
| `--experimental-acp` | | Enables ACP mode (Agent Control Protocol). Useful for IDE/editor integrations like [Zed](../integration-zed). | | Experimental. |
|
||||
| `--experimental-skills` | | Enables experimental [Agent Skills](../features/skills) (registers the `skill` tool and loads Skills from `.qwen/skills/` and `~/.qwen/skills/`). | | Experimental. |
|
||||
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
|
||||
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |
|
||||
| `--proxy` | | Sets the proxy for the CLI. | Proxy URL | Example: `--proxy http://localhost:7890`. |
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
export default {
|
||||
commands: 'Commands',
|
||||
'sub-agents': 'SubAgents',
|
||||
skills: 'Skills (Experimental)',
|
||||
headless: 'Headless Mode',
|
||||
checkpointing: {
|
||||
display: 'hidden',
|
||||
@@ -10,4 +9,5 @@ export default {
|
||||
mcp: 'MCP',
|
||||
'token-caching': 'Token Caching',
|
||||
sandbox: 'Sandboxing',
|
||||
language: 'i18n',
|
||||
};
|
||||
|
||||
@@ -48,7 +48,7 @@ Commands specifically for controlling interface and output language.
|
||||
| → `ui [language]` | Set UI interface language | `/language ui zh-CN` |
|
||||
| → `output [language]` | Set LLM output language | `/language output Chinese` |
|
||||
|
||||
- Available UI languages: `zh-CN` (Simplified Chinese), `en-US` (English)
|
||||
- Available built-in UI languages: `zh-CN` (Simplified Chinese), `en-US` (English), `ru-RU` (Russian), `de-DE` (German)
|
||||
- Output language examples: `Chinese`, `English`, `Japanese`, etc.
|
||||
|
||||
### 1.4 Tool and Model Management
|
||||
@@ -72,17 +72,16 @@ Commands for managing AI tools and models.
|
||||
|
||||
Commands for obtaining information and performing system settings.
|
||||
|
||||
| Command | Description | Usage Examples |
|
||||
| --------------- | ----------------------------------------------- | ------------------------------------------------ |
|
||||
| `/help` | Display help information for available commands | `/help` or `/?` |
|
||||
| `/about` | Display version information | `/about` |
|
||||
| `/stats` | Display detailed statistics for current session | `/stats` |
|
||||
| `/settings` | Open settings editor | `/settings` |
|
||||
| `/auth` | Change authentication method | `/auth` |
|
||||
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
|
||||
| `/copy` | Copy last output content to clipboard | `/copy` |
|
||||
| `/quit-confirm` | Show confirmation dialog before quitting | `/quit-confirm` (shortcut: press `Ctrl+C` twice) |
|
||||
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
|
||||
| Command | Description | Usage Examples |
|
||||
| ----------- | ----------------------------------------------- | -------------------------------- |
|
||||
| `/help` | Display help information for available commands | `/help` or `/?` |
|
||||
| `/about` | Display version information | `/about` |
|
||||
| `/stats` | Display detailed statistics for current session | `/stats` |
|
||||
| `/settings` | Open settings editor | `/settings` |
|
||||
| `/auth` | Change authentication method | `/auth` |
|
||||
| `/bug` | Submit issue about Qwen Code | `/bug Button click unresponsive` |
|
||||
| `/copy` | Copy last output content to clipboard | `/copy` |
|
||||
| `/quit` | Exit Qwen Code immediately | `/quit` or `/exit` |
|
||||
|
||||
### 1.6 Common Shortcuts
|
||||
|
||||
|
||||
@@ -189,20 +189,19 @@ qwen -p "Write code" --output-format stream-json --include-partial-messages | jq
|
||||
|
||||
Key command-line options for headless usage:
|
||||
|
||||
| Option | Description | Example |
|
||||
| ---------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
|
||||
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
|
||||
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
|
||||
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
|
||||
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
|
||||
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
|
||||
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
|
||||
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
|
||||
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
|
||||
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
|
||||
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
|
||||
| `--experimental-skills` | Enable experimental Skills (registers the `skill` tool) | `qwen --experimental-skills -p "What Skills are available?"` |
|
||||
| Option | Description | Example |
|
||||
| ---------------------------- | --------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
|
||||
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
|
||||
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
|
||||
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
|
||||
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
|
||||
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
|
||||
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
|
||||
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
|
||||
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
|
||||
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
|
||||
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
|
||||
|
||||
For complete details on all available configuration options, settings files, and environment variables, see the [Configuration Guide](../configuration/settings).
|
||||
|
||||
|
||||
136
docs/users/features/language.md
Normal file
136
docs/users/features/language.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Internationalization (i18n) & Language
|
||||
|
||||
Qwen Code is built for multilingual workflows: it supports UI localization (i18n/l10n) in the CLI, lets you choose the assistant output language, and allows custom UI language packs.
|
||||
|
||||
## Overview
|
||||
|
||||
From a user point of view, Qwen Code’s “internationalization” spans multiple layers:
|
||||
|
||||
| Capability / Setting | What it controls | Where stored |
|
||||
| ------------------------ | ---------------------------------------------------------------------- | ---------------------------- |
|
||||
| `/language ui` | Terminal UI text (menus, system messages, prompts) | `~/.qwen/settings.json` |
|
||||
| `/language output` | Language the AI responds in (an output preference, not UI translation) | `~/.qwen/output-language.md` |
|
||||
| Custom UI language packs | Overrides/extends built-in UI translations | `~/.qwen/locales/*.js` |
|
||||
|
||||
## UI Language
|
||||
|
||||
This is the CLI’s UI localization layer (i18n/l10n): it controls the language of menus, prompts, and system messages.
|
||||
|
||||
### Setting the UI Language
|
||||
|
||||
Use the `/language ui` command:
|
||||
|
||||
```bash
|
||||
/language ui zh-CN # Chinese
|
||||
/language ui en-US # English
|
||||
/language ui ru-RU # Russian
|
||||
/language ui de-DE # German
|
||||
```
|
||||
|
||||
Aliases are also supported:
|
||||
|
||||
```bash
|
||||
/language ui zh # Chinese
|
||||
/language ui en # English
|
||||
/language ui ru # Russian
|
||||
/language ui de # German
|
||||
```
|
||||
|
||||
### Auto-detection
|
||||
|
||||
On first startup, Qwen Code detects your system locale and sets the UI language automatically.
|
||||
|
||||
Detection priority:
|
||||
|
||||
1. `QWEN_CODE_LANG` environment variable
|
||||
2. `LANG` environment variable
|
||||
3. System locale via JavaScript Intl API
|
||||
4. Default: English
|
||||
|
||||
## LLM Output Language
|
||||
|
||||
The LLM output language controls what language the AI assistant responds in, regardless of what language you type your questions in.
|
||||
|
||||
### How It Works
|
||||
|
||||
The LLM output language is controlled by a rule file at `~/.qwen/output-language.md`. This file is automatically included in the LLM's context during startup, instructing it to respond in the specified language.
|
||||
|
||||
### Auto-detection
|
||||
|
||||
On first startup, if no `output-language.md` file exists, Qwen Code automatically creates one based on your system locale. For example:
|
||||
|
||||
- System locale `zh` creates a rule for Chinese responses
|
||||
- System locale `en` creates a rule for English responses
|
||||
- System locale `ru` creates a rule for Russian responses
|
||||
- System locale `de` creates a rule for German responses
|
||||
|
||||
### Manual Setting
|
||||
|
||||
Use `/language output <language>` to change:
|
||||
|
||||
```bash
|
||||
/language output Chinese
|
||||
/language output English
|
||||
/language output Japanese
|
||||
/language output German
|
||||
```
|
||||
|
||||
Any language name works. The LLM will be instructed to respond in that language.
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> After changing the output language, restart Qwen Code for the change to take effect.
|
||||
|
||||
### File Location
|
||||
|
||||
```
|
||||
~/.qwen/output-language.md
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Via Settings Dialog
|
||||
|
||||
1. Run `/settings`
|
||||
2. Find "Language" under General
|
||||
3. Select your preferred UI language
|
||||
|
||||
### Via Environment Variable
|
||||
|
||||
```bash
|
||||
export QWEN_CODE_LANG=zh
|
||||
```
|
||||
|
||||
This influences auto-detection on first startup (if you haven’t set a UI language and no `output-language.md` file exists yet).
|
||||
|
||||
## Custom Language Packs
|
||||
|
||||
For UI translations, you can create custom language packs in `~/.qwen/locales/`:
|
||||
|
||||
- Example: `~/.qwen/locales/es.js` for Spanish
|
||||
- Example: `~/.qwen/locales/fr.js` for French
|
||||
|
||||
User directory takes precedence over built-in translations.
|
||||
|
||||
> [!tip]
|
||||
>
|
||||
> Contributions are welcome! If you’d like to improve built-in translations or add new languages.
|
||||
> For a concrete example, see [PR #1238: feat(i18n): add Russian language support](https://github.com/QwenLM/qwen-code/pull/1238).
|
||||
|
||||
### Language Pack Format
|
||||
|
||||
```javascript
|
||||
// ~/.qwen/locales/es.js
|
||||
export default {
|
||||
Hello: 'Hola',
|
||||
Settings: 'Configuracion',
|
||||
// ... more translations
|
||||
};
|
||||
```
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/language` - Show current language settings
|
||||
- `/language ui [lang]` - Set UI language
|
||||
- `/language output <language>` - Set LLM output language
|
||||
- `/settings` - Open settings dialog
|
||||
@@ -1,282 +0,0 @@
|
||||
# Agent Skills (Experimental)
|
||||
|
||||
> Create, manage, and share Skills to extend Qwen Code’s capabilities.
|
||||
|
||||
This guide shows you how to create, use, and manage Agent Skills in **Qwen Code**. Skills are modular capabilities that extend the model’s effectiveness through organized folders containing instructions (and optionally scripts/resources).
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> Skills are currently **experimental** and must be enabled with `--experimental-skills`.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Qwen Code (recent version)
|
||||
- Run with the experimental flag enabled:
|
||||
|
||||
```bash
|
||||
qwen --experimental-skills
|
||||
```
|
||||
|
||||
- Basic familiarity with Qwen Code ([Quickstart](../quickstart.md))
|
||||
|
||||
## What are Agent Skills?
|
||||
|
||||
Agent Skills package expertise into discoverable capabilities. Each Skill consists of a `SKILL.md` file with instructions that the model can load when relevant, plus optional supporting files like scripts and templates.
|
||||
|
||||
### How Skills are invoked
|
||||
|
||||
Skills are **model-invoked** — the model autonomously decides when to use them based on your request and the Skill’s description. This is different from slash commands, which are **user-invoked** (you explicitly type `/command`).
|
||||
|
||||
### Benefits
|
||||
|
||||
- Extend Qwen Code for your workflows
|
||||
- Share expertise across your team via git
|
||||
- Reduce repetitive prompting
|
||||
- Compose multiple Skills for complex tasks
|
||||
|
||||
## Create a Skill
|
||||
|
||||
Skills are stored as directories containing a `SKILL.md` file.
|
||||
|
||||
### Personal Skills
|
||||
|
||||
Personal Skills are available across all your projects. Store them in `~/.qwen/skills/`:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.qwen/skills/my-skill-name
|
||||
```
|
||||
|
||||
Use personal Skills for:
|
||||
|
||||
- Your individual workflows and preferences
|
||||
- Experimental Skills you’re developing
|
||||
- Personal productivity helpers
|
||||
|
||||
### Project Skills
|
||||
|
||||
Project Skills are shared with your team. Store them in `.qwen/skills/` within your project:
|
||||
|
||||
```bash
|
||||
mkdir -p .qwen/skills/my-skill-name
|
||||
```
|
||||
|
||||
Use project Skills for:
|
||||
|
||||
- Team workflows and conventions
|
||||
- Project-specific expertise
|
||||
- Shared utilities and scripts
|
||||
|
||||
Project Skills can be checked into git and automatically become available to teammates.
|
||||
|
||||
## Write `SKILL.md`
|
||||
|
||||
Create a `SKILL.md` file with YAML frontmatter and Markdown content:
|
||||
|
||||
```yaml
|
||||
---
|
||||
name: your-skill-name
|
||||
description: Brief description of what this Skill does and when to use it
|
||||
---
|
||||
|
||||
# Your Skill Name
|
||||
|
||||
## Instructions
|
||||
Provide clear, step-by-step guidance for Qwen Code.
|
||||
|
||||
## Examples
|
||||
Show concrete examples of using this Skill.
|
||||
```
|
||||
|
||||
### Field requirements
|
||||
|
||||
Qwen Code currently validates that:
|
||||
|
||||
- `name` is a non-empty string
|
||||
- `description` is a non-empty string
|
||||
|
||||
Recommended conventions (not strictly enforced yet):
|
||||
|
||||
- Use lowercase letters, numbers, and hyphens in `name`
|
||||
- Make `description` specific: include both **what** the Skill does and **when** to use it (key words users will naturally mention)
|
||||
|
||||
## Add supporting files
|
||||
|
||||
Create additional files alongside `SKILL.md`:
|
||||
|
||||
```text
|
||||
my-skill/
|
||||
├── SKILL.md (required)
|
||||
├── reference.md (optional documentation)
|
||||
├── examples.md (optional examples)
|
||||
├── scripts/
|
||||
│ └── helper.py (optional utility)
|
||||
└── templates/
|
||||
└── template.txt (optional template)
|
||||
```
|
||||
|
||||
Reference these files from `SKILL.md`:
|
||||
|
||||
````markdown
|
||||
For advanced usage, see [reference.md](reference.md).
|
||||
|
||||
Run the helper script:
|
||||
|
||||
```bash
|
||||
python scripts/helper.py input.txt
|
||||
```
|
||||
````
|
||||
|
||||
## View available Skills
|
||||
|
||||
When `--experimental-skills` is enabled, Qwen Code discovers Skills from:
|
||||
|
||||
- Personal Skills: `~/.qwen/skills/`
|
||||
- Project Skills: `.qwen/skills/`
|
||||
|
||||
To view available Skills, ask Qwen Code directly:
|
||||
|
||||
```text
|
||||
What Skills are available?
|
||||
```
|
||||
|
||||
Or inspect the filesystem:
|
||||
|
||||
```bash
|
||||
# List personal Skills
|
||||
ls ~/.qwen/skills/
|
||||
|
||||
# List project Skills (if in a project directory)
|
||||
ls .qwen/skills/
|
||||
|
||||
# View a specific Skill’s content
|
||||
cat ~/.qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
## Test a Skill
|
||||
|
||||
After creating a Skill, test it by asking questions that match your description.
|
||||
|
||||
Example: if your description mentions “PDF files”:
|
||||
|
||||
```text
|
||||
Can you help me extract text from this PDF?
|
||||
```
|
||||
|
||||
The model autonomously decides to use your Skill if it matches the request — you don’t need to explicitly invoke it.
|
||||
|
||||
## Debug a Skill
|
||||
|
||||
If Qwen Code doesn’t use your Skill, check these common issues:
|
||||
|
||||
### Make the description specific
|
||||
|
||||
Too vague:
|
||||
|
||||
```yaml
|
||||
description: Helps with documents
|
||||
```
|
||||
|
||||
Specific:
|
||||
|
||||
```yaml
|
||||
description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDFs, forms, or document extraction.
|
||||
```
|
||||
|
||||
### Verify file path
|
||||
|
||||
- Personal Skills: `~/.qwen/skills/<skill-name>/SKILL.md`
|
||||
- Project Skills: `.qwen/skills/<skill-name>/SKILL.md`
|
||||
|
||||
```bash
|
||||
# Personal
|
||||
ls ~/.qwen/skills/my-skill/SKILL.md
|
||||
|
||||
# Project
|
||||
ls .qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
### Check YAML syntax
|
||||
|
||||
Invalid YAML prevents the Skill metadata from loading correctly.
|
||||
|
||||
```bash
|
||||
cat SKILL.md | head -n 15
|
||||
```
|
||||
|
||||
Ensure:
|
||||
|
||||
- Opening `---` on line 1
|
||||
- Closing `---` before Markdown content
|
||||
- Valid YAML syntax (no tabs, correct indentation)
|
||||
|
||||
### View errors
|
||||
|
||||
Run Qwen Code with debug mode to see Skill loading errors:
|
||||
|
||||
```bash
|
||||
qwen --experimental-skills --debug
|
||||
```
|
||||
|
||||
## Share Skills with your team
|
||||
|
||||
You can share Skills through project repositories:
|
||||
|
||||
1. Add the Skill under `.qwen/skills/`
|
||||
2. Commit and push
|
||||
3. Teammates pull the changes and run with `--experimental-skills`
|
||||
|
||||
```bash
|
||||
git add .qwen/skills/
|
||||
git commit -m "Add team Skill for PDF processing"
|
||||
git push
|
||||
```
|
||||
|
||||
## Update a Skill
|
||||
|
||||
Edit `SKILL.md` directly:
|
||||
|
||||
```bash
|
||||
# Personal Skill
|
||||
code ~/.qwen/skills/my-skill/SKILL.md
|
||||
|
||||
# Project Skill
|
||||
code .qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
Changes take effect the next time you start Qwen Code. If Qwen Code is already running, restart it to load the updates.
|
||||
|
||||
## Remove a Skill
|
||||
|
||||
Delete the Skill directory:
|
||||
|
||||
```bash
|
||||
# Personal
|
||||
rm -rf ~/.qwen/skills/my-skill
|
||||
|
||||
# Project
|
||||
rm -rf .qwen/skills/my-skill
|
||||
git commit -m "Remove unused Skill"
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
### Keep Skills focused
|
||||
|
||||
One Skill should address one capability:
|
||||
|
||||
- Focused: “PDF form filling”, “Excel analysis”, “Git commit messages”
|
||||
- Too broad: “Document processing” (split into smaller Skills)
|
||||
|
||||
### Write clear descriptions
|
||||
|
||||
Help the model discover when to use Skills by including specific triggers:
|
||||
|
||||
```yaml
|
||||
description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or .xlsx data.
|
||||
```
|
||||
|
||||
### Test with your team
|
||||
|
||||
- Does the Skill activate when expected?
|
||||
- Are the instructions clear?
|
||||
- Are there missing examples or edge cases?
|
||||
@@ -24,6 +24,8 @@ export default tseslint.config(
|
||||
'.integration-tests/**',
|
||||
'packages/**/.integration-test/**',
|
||||
'dist/**',
|
||||
'docs-site/.next/**',
|
||||
'docs-site/out/**',
|
||||
],
|
||||
},
|
||||
eslint.configs.recommended,
|
||||
|
||||
@@ -202,8 +202,8 @@ describe('file-system', () => {
|
||||
const readAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'read_file',
|
||||
);
|
||||
const editAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'edit_file',
|
||||
const writeAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'write_file',
|
||||
);
|
||||
const successfulReplace = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
|
||||
@@ -226,15 +226,15 @@ describe('file-system', () => {
|
||||
|
||||
// CRITICAL: Verify that no matter what the model did, it never successfully
|
||||
// wrote or replaced anything.
|
||||
if (editAttempt) {
|
||||
if (writeAttempt) {
|
||||
console.error(
|
||||
'A edit_file attempt was made when no file should be written.',
|
||||
'A write_file attempt was made when no file should be written.',
|
||||
);
|
||||
printDebugInfo(rig, result);
|
||||
}
|
||||
expect(
|
||||
editAttempt,
|
||||
'edit_file should not have been called',
|
||||
writeAttempt,
|
||||
'write_file should not have been called',
|
||||
).toBeUndefined();
|
||||
|
||||
if (successfulReplace) {
|
||||
|
||||
@@ -952,8 +952,7 @@ describe('Permission Control (E2E)', () => {
|
||||
TEST_TIMEOUT,
|
||||
);
|
||||
|
||||
// FIXME: This test is flaky and sometimes fails with no tool calls.
|
||||
it.skip(
|
||||
it(
|
||||
'should allow read-only tools without restrictions',
|
||||
async () => {
|
||||
// Create test files for the model to read
|
||||
|
||||
2074
package-lock.json
generated
2074
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -18,6 +18,9 @@
|
||||
"scripts": {
|
||||
"start": "cross-env node scripts/start.js",
|
||||
"debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js",
|
||||
"auth:npm": "npx google-artifactregistry-auth",
|
||||
"auth:docker": "gcloud auth configure-docker us-west1-docker.pkg.dev",
|
||||
"auth": "npm run auth:npm && npm run auth:docker",
|
||||
"generate": "node scripts/generate-git-commit-info.js",
|
||||
"build": "node scripts/build.js",
|
||||
"build-and-start": "npm run build && npm run start",
|
||||
@@ -92,6 +95,7 @@
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"glob": "^10.5.0",
|
||||
"globals": "^16.0.0",
|
||||
"google-artifactregistry-auth": "^3.4.0",
|
||||
"husky": "^9.1.7",
|
||||
"json": "^11.0.0",
|
||||
"lint-staged": "^16.1.6",
|
||||
|
||||
@@ -36,10 +36,10 @@
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.30.0",
|
||||
"@google/genai": "1.16.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@qwen-code/qwen-code-core": "file:../core",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
"@types/update-notifier": "^6.0.8",
|
||||
"ansi-regex": "^6.2.2",
|
||||
"command-exists": "^1.2.9",
|
||||
|
||||
@@ -26,37 +26,5 @@ export function validateAuthMethod(authMethod: string): string | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_ANTHROPIC) {
|
||||
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'ANTHROPIC_API_KEY environment variable not found.';
|
||||
}
|
||||
|
||||
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
|
||||
if (!hasBaseUrl) {
|
||||
return 'ANTHROPIC_BASE_URL environment variable not found.';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
const hasApiKey = process.env['GEMINI_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'GEMINI_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_VERTEX_AI) {
|
||||
const hasApiKey = process.env['GOOGLE_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'GOOGLE_API_KEY environment variable not found. Please set it in your .env file or environment variables.';
|
||||
}
|
||||
|
||||
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
|
||||
return null;
|
||||
}
|
||||
|
||||
return 'Invalid auth method selected.';
|
||||
}
|
||||
|
||||
@@ -2114,14 +2114,7 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('always prefers model from argvs', async () => {
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
@@ -2141,14 +2134,7 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('selects the model from argvs if provided', async () => {
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
|
||||
@@ -112,7 +112,6 @@ export interface CliArgs {
|
||||
allowedMcpServerNames: string[] | undefined;
|
||||
allowedTools: string[] | undefined;
|
||||
experimentalAcp: boolean | undefined;
|
||||
experimentalSkills: boolean | undefined;
|
||||
extensions: string[] | undefined;
|
||||
listExtensions: boolean | undefined;
|
||||
openaiLogging: boolean | undefined;
|
||||
@@ -308,11 +307,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
type: 'boolean',
|
||||
description: 'Starts the agent in ACP mode',
|
||||
})
|
||||
.option('experimental-skills', {
|
||||
type: 'boolean',
|
||||
description: 'Enable experimental Skills feature',
|
||||
default: false,
|
||||
})
|
||||
.option('channel', {
|
||||
type: 'string',
|
||||
choices: ['VSCode', 'ACP', 'SDK', 'CI'],
|
||||
@@ -466,13 +460,7 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
})
|
||||
.option('auth-type', {
|
||||
type: 'string',
|
||||
choices: [
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
],
|
||||
choices: [AuthType.USE_OPENAI, AuthType.QWEN_OAUTH],
|
||||
description: 'Authentication type',
|
||||
})
|
||||
.deprecateOption(
|
||||
@@ -877,30 +865,11 @@ export async function loadCliConfig(
|
||||
);
|
||||
}
|
||||
|
||||
const selectedAuthType =
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType;
|
||||
|
||||
const apiKey =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey
|
||||
: '') || '';
|
||||
const baseUrl =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl
|
||||
: '') || '';
|
||||
const resolvedModel =
|
||||
argv.model ||
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name
|
||||
: '') ||
|
||||
'';
|
||||
process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name;
|
||||
|
||||
const sandboxConfig = await loadSandboxConfig(settings, argv);
|
||||
const screenReader =
|
||||
@@ -982,20 +951,27 @@ export async function loadCliConfig(
|
||||
maxSessionTurns:
|
||||
argv.maxSessionTurns ?? settings.model?.maxSessionTurns ?? -1,
|
||||
experimentalZedIntegration: argv.experimentalAcp || false,
|
||||
experimentalSkills: argv.experimentalSkills || false,
|
||||
listExtensions: argv.listExtensions || false,
|
||||
extensions: allExtensions,
|
||||
blockedMcpServers,
|
||||
noBrowser: !!process.env['NO_BROWSER'],
|
||||
authType: selectedAuthType,
|
||||
authType:
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType,
|
||||
inputFormat,
|
||||
outputFormat,
|
||||
includePartialMessages,
|
||||
generationConfig: {
|
||||
...(settings.model?.generationConfig || {}),
|
||||
model: resolvedModel,
|
||||
apiKey,
|
||||
baseUrl,
|
||||
apiKey:
|
||||
argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey,
|
||||
baseUrl:
|
||||
argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl,
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.model?.enableOpenAILogging
|
||||
|
||||
@@ -56,17 +56,6 @@ vi.mock('simple-git', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock('./extensions/github.js', async (importOriginal) => {
|
||||
const actual =
|
||||
await importOriginal<typeof import('./extensions/github.js')>();
|
||||
return {
|
||||
...actual,
|
||||
downloadFromGitHubRelease: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('os', async (importOriginal) => {
|
||||
const mockedOs = await importOriginal<typeof os>();
|
||||
return {
|
||||
|
||||
@@ -41,17 +41,6 @@ vi.mock('simple-git', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock('../extensions/github.js', async (importOriginal) => {
|
||||
const actual =
|
||||
await importOriginal<typeof import('../extensions/github.js')>();
|
||||
return {
|
||||
...actual,
|
||||
downloadFromGitHubRelease: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Mocked GitHub release download failure')),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('os', async (importOriginal) => {
|
||||
const mockedOs = await importOriginal<typeof os>();
|
||||
return {
|
||||
|
||||
@@ -15,6 +15,7 @@ import { type LoadedSettings, SettingScope } from '../config/settings.js';
|
||||
import { performInitialAuth } from './auth.js';
|
||||
import { validateTheme } from './theme.js';
|
||||
import { initializeI18n } from '../i18n/index.js';
|
||||
import { initializeLlmOutputLanguage } from '../ui/commands/languageCommand.js';
|
||||
|
||||
export interface InitializationResult {
|
||||
authError: string | null;
|
||||
@@ -41,6 +42,9 @@ export async function initializeApp(
|
||||
'auto';
|
||||
await initializeI18n(languageSetting);
|
||||
|
||||
// Auto-detect and set LLM output language on first use
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
const authType = settings.merged.security?.auth?.selectedType;
|
||||
const authError = await performInitialAuth(config, authType);
|
||||
|
||||
|
||||
@@ -461,7 +461,6 @@ describe('gemini.tsx main function kitty protocol', () => {
|
||||
allowedMcpServerNames: undefined,
|
||||
allowedTools: undefined,
|
||||
experimentalAcp: undefined,
|
||||
experimentalSkills: undefined,
|
||||
extensions: undefined,
|
||||
listExtensions: undefined,
|
||||
openaiLogging: undefined,
|
||||
|
||||
@@ -4,8 +4,13 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Config, AuthType } from '@qwen-code/qwen-code-core';
|
||||
import { InputFormat, logUserPrompt } from '@qwen-code/qwen-code-core';
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
AuthType,
|
||||
getOauthClient,
|
||||
InputFormat,
|
||||
logUserPrompt,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { render } from 'ink';
|
||||
import dns from 'node:dns';
|
||||
import os from 'node:os';
|
||||
@@ -394,6 +399,15 @@ export async function main() {
|
||||
initializationResult = await initializeApp(config, settings);
|
||||
}
|
||||
|
||||
if (
|
||||
settings.merged.security?.auth?.selectedType ===
|
||||
AuthType.LOGIN_WITH_GOOGLE &&
|
||||
config.isBrowserLaunchSuppressed()
|
||||
) {
|
||||
// Do oauth before app renders to make copying the link possible.
|
||||
await getOauthClient(settings.merged.security.auth.selectedType, config);
|
||||
}
|
||||
|
||||
if (config.getExperimentalZedIntegration()) {
|
||||
return runAcpAgent(config, settings, extensions, argv);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* Copyright 2025 Qwen team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
@@ -8,15 +8,21 @@ import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { fileURLToPath, pathToFileURL } from 'node:url';
|
||||
import { homedir } from 'node:os';
|
||||
import {
|
||||
type SupportedLanguage,
|
||||
getLanguageNameFromLocale,
|
||||
} from './languages.js';
|
||||
|
||||
export type SupportedLanguage = 'en' | 'zh' | 'ru' | string; // Allow custom language codes
|
||||
export type { SupportedLanguage };
|
||||
export { getLanguageNameFromLocale };
|
||||
|
||||
// State
|
||||
let currentLanguage: SupportedLanguage = 'en';
|
||||
let translations: Record<string, string> = {};
|
||||
let translations: Record<string, string | string[]> = {};
|
||||
|
||||
// Cache
|
||||
type TranslationDict = Record<string, string>;
|
||||
type TranslationValue = string | string[];
|
||||
type TranslationDict = Record<string, TranslationValue>;
|
||||
const translationCache: Record<string, TranslationDict> = {};
|
||||
const loadingPromises: Record<string, Promise<TranslationDict>> = {};
|
||||
|
||||
@@ -52,11 +58,13 @@ export function detectSystemLanguage(): SupportedLanguage {
|
||||
if (envLang?.startsWith('zh')) return 'zh';
|
||||
if (envLang?.startsWith('en')) return 'en';
|
||||
if (envLang?.startsWith('ru')) return 'ru';
|
||||
if (envLang?.startsWith('de')) return 'de';
|
||||
|
||||
try {
|
||||
const locale = Intl.DateTimeFormat().resolvedOptions().locale;
|
||||
if (locale.startsWith('zh')) return 'zh';
|
||||
if (locale.startsWith('ru')) return 'ru';
|
||||
if (locale.startsWith('de')) return 'de';
|
||||
} catch {
|
||||
// Fallback to default
|
||||
}
|
||||
@@ -224,9 +232,25 @@ export function getCurrentLanguage(): SupportedLanguage {
|
||||
|
||||
export function t(key: string, params?: Record<string, string>): string {
|
||||
const translation = translations[key] ?? key;
|
||||
if (Array.isArray(translation)) {
|
||||
return key;
|
||||
}
|
||||
return interpolate(translation, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a translation that is an array of strings.
|
||||
* @param key The translation key
|
||||
* @returns The array of strings, or an empty array if not found or not an array
|
||||
*/
|
||||
export function ta(key: string): string[] {
|
||||
const translation = translations[key];
|
||||
if (Array.isArray(translation)) {
|
||||
return translation;
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
export async function initializeI18n(
|
||||
lang?: SupportedLanguage | 'auto',
|
||||
): Promise<void> {
|
||||
|
||||
48
packages/cli/src/i18n/languages.ts
Normal file
48
packages/cli/src/i18n/languages.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export type SupportedLanguage = 'en' | 'zh' | 'ru' | 'de' | string;
|
||||
|
||||
export interface LanguageDefinition {
|
||||
/** The internal locale code used by the i18n system (e.g., 'en', 'zh'). */
|
||||
code: SupportedLanguage;
|
||||
/** The standard name used in UI settings (e.g., 'en-US', 'zh-CN'). */
|
||||
id: string;
|
||||
/** The full English name of the language (e.g., 'English', 'Chinese'). */
|
||||
fullName: string;
|
||||
}
|
||||
|
||||
export const SUPPORTED_LANGUAGES: readonly LanguageDefinition[] = [
|
||||
{
|
||||
code: 'en',
|
||||
id: 'en-US',
|
||||
fullName: 'English',
|
||||
},
|
||||
{
|
||||
code: 'zh',
|
||||
id: 'zh-CN',
|
||||
fullName: 'Chinese',
|
||||
},
|
||||
{
|
||||
code: 'ru',
|
||||
id: 'ru-RU',
|
||||
fullName: 'Russian',
|
||||
},
|
||||
{
|
||||
code: 'de',
|
||||
id: 'de-DE',
|
||||
fullName: 'German',
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Maps a locale code to its English language name.
|
||||
* Used for LLM output language instructions.
|
||||
*/
|
||||
export function getLanguageNameFromLocale(locale: SupportedLanguage): string {
|
||||
const lang = SUPPORTED_LANGUAGES.find((l) => l.code === locale);
|
||||
return lang?.fullName || 'English';
|
||||
}
|
||||
@@ -102,8 +102,8 @@ export default {
|
||||
'Theme "{{themeName}}" not found.': 'Theme "{{themeName}}" not found.',
|
||||
'Theme "{{themeName}}" not found in selected scope.':
|
||||
'Theme "{{themeName}}" not found in selected scope.',
|
||||
'clear the screen and conversation history':
|
||||
'clear the screen and conversation history',
|
||||
'Clear conversation history and free up context':
|
||||
'Clear conversation history and free up context',
|
||||
'Compresses the context by replacing it with a summary.':
|
||||
'Compresses the context by replacing it with a summary.',
|
||||
'open full Qwen Code documentation in your browser':
|
||||
@@ -604,9 +604,10 @@ export default {
|
||||
// ============================================================================
|
||||
// Commands - Clear
|
||||
// ============================================================================
|
||||
'Clearing terminal and resetting chat.':
|
||||
'Clearing terminal and resetting chat.',
|
||||
'Clearing terminal.': 'Clearing terminal.',
|
||||
'Starting a new session, resetting chat, and clearing terminal.':
|
||||
'Starting a new session, resetting chat, and clearing terminal.',
|
||||
'Starting a new session and clearing.':
|
||||
'Starting a new session and clearing.',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Compress
|
||||
@@ -927,192 +928,138 @@ export default {
|
||||
// ============================================================================
|
||||
'Waiting for user confirmation...': 'Waiting for user confirmation...',
|
||||
'(esc to cancel, {{time}})': '(esc to cancel, {{time}})',
|
||||
"I'm Feeling Lucky": "I'm Feeling Lucky",
|
||||
'Shipping awesomeness... ': 'Shipping awesomeness... ',
|
||||
'Painting the serifs back on...': 'Painting the serifs back on...',
|
||||
'Navigating the slime mold...': 'Navigating the slime mold...',
|
||||
'Consulting the digital spirits...': 'Consulting the digital spirits...',
|
||||
'Reticulating splines...': 'Reticulating splines...',
|
||||
'Warming up the AI hamsters...': 'Warming up the AI hamsters...',
|
||||
'Asking the magic conch shell...': 'Asking the magic conch shell...',
|
||||
'Generating witty retort...': 'Generating witty retort...',
|
||||
'Polishing the algorithms...': 'Polishing the algorithms...',
|
||||
"Don't rush perfection (or my code)...":
|
||||
|
||||
// ============================================================================
|
||||
// Loading Phrases
|
||||
// ============================================================================
|
||||
WITTY_LOADING_PHRASES: [
|
||||
"I'm Feeling Lucky",
|
||||
'Shipping awesomeness... ',
|
||||
'Painting the serifs back on...',
|
||||
'Navigating the slime mold...',
|
||||
'Consulting the digital spirits...',
|
||||
'Reticulating splines...',
|
||||
'Warming up the AI hamsters...',
|
||||
'Asking the magic conch shell...',
|
||||
'Generating witty retort...',
|
||||
'Polishing the algorithms...',
|
||||
"Don't rush perfection (or my code)...",
|
||||
'Brewing fresh bytes...': 'Brewing fresh bytes...',
|
||||
'Counting electrons...': 'Counting electrons...',
|
||||
'Engaging cognitive processors...': 'Engaging cognitive processors...',
|
||||
'Checking for syntax errors in the universe...':
|
||||
'Brewing fresh bytes...',
|
||||
'Counting electrons...',
|
||||
'Engaging cognitive processors...',
|
||||
'Checking for syntax errors in the universe...',
|
||||
'One moment, optimizing humor...': 'One moment, optimizing humor...',
|
||||
'Shuffling punchlines...': 'Shuffling punchlines...',
|
||||
'Untangling neural nets...': 'Untangling neural nets...',
|
||||
'Compiling brilliance...': 'Compiling brilliance...',
|
||||
'Loading wit.exe...': 'Loading wit.exe...',
|
||||
'Summoning the cloud of wisdom...': 'Summoning the cloud of wisdom...',
|
||||
'Preparing a witty response...': 'Preparing a witty response...',
|
||||
"Just a sec, I'm debugging reality...":
|
||||
'One moment, optimizing humor...',
|
||||
'Shuffling punchlines...',
|
||||
'Untangling neural nets...',
|
||||
'Compiling brilliance...',
|
||||
'Loading wit.exe...',
|
||||
'Summoning the cloud of wisdom...',
|
||||
'Preparing a witty response...',
|
||||
"Just a sec, I'm debugging reality...",
|
||||
'Confuzzling the options...': 'Confuzzling the options...',
|
||||
'Tuning the cosmic frequencies...': 'Tuning the cosmic frequencies...',
|
||||
'Crafting a response worthy of your patience...':
|
||||
'Confuzzling the options...',
|
||||
'Tuning the cosmic frequencies...',
|
||||
'Crafting a response worthy of your patience...',
|
||||
'Compiling the 1s and 0s...': 'Compiling the 1s and 0s...',
|
||||
'Resolving dependencies... and existential crises...':
|
||||
'Compiling the 1s and 0s...',
|
||||
'Resolving dependencies... and existential crises...',
|
||||
'Defragmenting memories... both RAM and personal...':
|
||||
'Defragmenting memories... both RAM and personal...',
|
||||
'Rebooting the humor module...': 'Rebooting the humor module...',
|
||||
'Caching the essentials (mostly cat memes)...':
|
||||
'Rebooting the humor module...',
|
||||
'Caching the essentials (mostly cat memes)...',
|
||||
'Optimizing for ludicrous speed': 'Optimizing for ludicrous speed',
|
||||
"Swapping bits... don't tell the bytes...":
|
||||
'Optimizing for ludicrous speed',
|
||||
"Swapping bits... don't tell the bytes...",
|
||||
'Garbage collecting... be right back...':
|
||||
'Garbage collecting... be right back...',
|
||||
'Assembling the interwebs...': 'Assembling the interwebs...',
|
||||
'Converting coffee into code...': 'Converting coffee into code...',
|
||||
'Updating the syntax for reality...': 'Updating the syntax for reality...',
|
||||
'Rewiring the synapses...': 'Rewiring the synapses...',
|
||||
'Looking for a misplaced semicolon...':
|
||||
'Assembling the interwebs...',
|
||||
'Converting coffee into code...',
|
||||
'Updating the syntax for reality...',
|
||||
'Rewiring the synapses...',
|
||||
'Looking for a misplaced semicolon...',
|
||||
"Greasin' the cogs of the machine...": "Greasin' the cogs of the machine...",
|
||||
'Pre-heating the servers...': 'Pre-heating the servers...',
|
||||
'Calibrating the flux capacitor...': 'Calibrating the flux capacitor...',
|
||||
'Engaging the improbability drive...': 'Engaging the improbability drive...',
|
||||
'Channeling the Force...': 'Channeling the Force...',
|
||||
'Aligning the stars for optimal response...':
|
||||
"Greasin' the cogs of the machine...",
|
||||
'Pre-heating the servers...',
|
||||
'Calibrating the flux capacitor...',
|
||||
'Engaging the improbability drive...',
|
||||
'Channeling the Force...',
|
||||
'Aligning the stars for optimal response...',
|
||||
'So say we all...': 'So say we all...',
|
||||
'Loading the next great idea...': 'Loading the next great idea...',
|
||||
"Just a moment, I'm in the zone...": "Just a moment, I'm in the zone...",
|
||||
'Preparing to dazzle you with brilliance...':
|
||||
'So say we all...',
|
||||
'Loading the next great idea...',
|
||||
"Just a moment, I'm in the zone...",
|
||||
'Preparing to dazzle you with brilliance...',
|
||||
"Just a tick, I'm polishing my wit...":
|
||||
"Just a tick, I'm polishing my wit...",
|
||||
"Hold tight, I'm crafting a masterpiece...":
|
||||
"Hold tight, I'm crafting a masterpiece...",
|
||||
"Just a jiffy, I'm debugging the universe...":
|
||||
"Just a jiffy, I'm debugging the universe...",
|
||||
"Just a moment, I'm aligning the pixels...":
|
||||
"Just a moment, I'm aligning the pixels...",
|
||||
"Just a sec, I'm optimizing the humor...":
|
||||
"Just a sec, I'm optimizing the humor...",
|
||||
"Just a moment, I'm tuning the algorithms...":
|
||||
"Just a moment, I'm tuning the algorithms...",
|
||||
'Warp speed engaged...': 'Warp speed engaged...',
|
||||
'Mining for more Dilithium crystals...':
|
||||
'Warp speed engaged...',
|
||||
'Mining for more Dilithium crystals...',
|
||||
"Don't panic...": "Don't panic...",
|
||||
'Following the white rabbit...': 'Following the white rabbit...',
|
||||
'The truth is in here... somewhere...':
|
||||
"Don't panic...",
|
||||
'Following the white rabbit...',
|
||||
'The truth is in here... somewhere...',
|
||||
'Blowing on the cartridge...': 'Blowing on the cartridge...',
|
||||
'Loading... Do a barrel roll!': 'Loading... Do a barrel roll!',
|
||||
'Waiting for the respawn...': 'Waiting for the respawn...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...':
|
||||
'Blowing on the cartridge...',
|
||||
'Loading... Do a barrel roll!',
|
||||
'Waiting for the respawn...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...',
|
||||
"The cake is not a lie, it's just still loading...":
|
||||
"The cake is not a lie, it's just still loading...",
|
||||
'Fiddling with the character creation screen...':
|
||||
'Fiddling with the character creation screen...',
|
||||
"Just a moment, I'm finding the right meme...":
|
||||
"Just a moment, I'm finding the right meme...",
|
||||
"Pressing 'A' to continue...": "Pressing 'A' to continue...",
|
||||
'Herding digital cats...': 'Herding digital cats...',
|
||||
'Polishing the pixels...': 'Polishing the pixels...',
|
||||
'Finding a suitable loading screen pun...':
|
||||
"Pressing 'A' to continue...",
|
||||
'Herding digital cats...',
|
||||
'Polishing the pixels...',
|
||||
'Finding a suitable loading screen pun...',
|
||||
'Distracting you with this witty phrase...':
|
||||
'Distracting you with this witty phrase...',
|
||||
'Almost there... probably...': 'Almost there... probably...',
|
||||
'Our hamsters are working as fast as they can...':
|
||||
'Almost there... probably...',
|
||||
'Our hamsters are working as fast as they can...',
|
||||
'Giving Cloudy a pat on the head...': 'Giving Cloudy a pat on the head...',
|
||||
'Petting the cat...': 'Petting the cat...',
|
||||
'Rickrolling my boss...': 'Rickrolling my boss...',
|
||||
'Never gonna give you up, never gonna let you down...':
|
||||
'Giving Cloudy a pat on the head...',
|
||||
'Petting the cat...',
|
||||
'Rickrolling my boss...',
|
||||
'Never gonna give you up, never gonna let you down...',
|
||||
'Slapping the bass...': 'Slapping the bass...',
|
||||
'Tasting the snozberries...': 'Tasting the snozberries...',
|
||||
"I'm going the distance, I'm going for speed...":
|
||||
'Slapping the bass...',
|
||||
'Tasting the snozberries...',
|
||||
"I'm going the distance, I'm going for speed...",
|
||||
'Is this the real life? Is this just fantasy?...':
|
||||
'Is this the real life? Is this just fantasy?...',
|
||||
"I've got a good feeling about this...":
|
||||
"I've got a good feeling about this...",
|
||||
'Poking the bear...': 'Poking the bear...',
|
||||
'Doing research on the latest memes...':
|
||||
'Poking the bear...',
|
||||
'Doing research on the latest memes...',
|
||||
'Figuring out how to make this more witty...':
|
||||
'Figuring out how to make this more witty...',
|
||||
'Hmmm... let me think...': 'Hmmm... let me think...',
|
||||
'What do you call a fish with no eyes? A fsh...':
|
||||
'Hmmm... let me think...',
|
||||
'What do you call a fish with no eyes? A fsh...',
|
||||
'Why did the computer go to therapy? It had too many bytes...':
|
||||
'Why did the computer go to therapy? It had too many bytes...',
|
||||
"Why don't programmers like nature? It has too many bugs...":
|
||||
"Why don't programmers like nature? It has too many bugs...",
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...':
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...',
|
||||
'Why did the developer go broke? Because they used up all their cache...':
|
||||
'Why did the developer go broke? Because they used up all their cache...',
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...":
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...",
|
||||
'Applying percussive maintenance...': 'Applying percussive maintenance...',
|
||||
'Searching for the correct USB orientation...':
|
||||
'Applying percussive maintenance...',
|
||||
'Searching for the correct USB orientation...',
|
||||
'Ensuring the magic smoke stays inside the wires...':
|
||||
'Ensuring the magic smoke stays inside the wires...',
|
||||
'Rewriting in Rust for no particular reason...':
|
||||
'Rewriting in Rust for no particular reason...',
|
||||
'Trying to exit Vim...': 'Trying to exit Vim...',
|
||||
'Spinning up the hamster wheel...': 'Spinning up the hamster wheel...',
|
||||
"That's not a bug, it's an undocumented feature...":
|
||||
'Trying to exit Vim...',
|
||||
'Spinning up the hamster wheel...',
|
||||
"That's not a bug, it's an undocumented feature...",
|
||||
'Engage.': 'Engage.',
|
||||
"I'll be back... with an answer.": "I'll be back... with an answer.",
|
||||
'My other process is a TARDIS...': 'My other process is a TARDIS...',
|
||||
'Communing with the machine spirit...':
|
||||
'Engage.',
|
||||
"I'll be back... with an answer.",
|
||||
'My other process is a TARDIS...',
|
||||
'Communing with the machine spirit...',
|
||||
'Letting the thoughts marinate...': 'Letting the thoughts marinate...',
|
||||
'Just remembered where I put my keys...':
|
||||
'Letting the thoughts marinate...',
|
||||
'Just remembered where I put my keys...',
|
||||
'Pondering the orb...': 'Pondering the orb...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
|
||||
'Pondering the orb...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
|
||||
'Initiating thoughtful gaze...': 'Initiating thoughtful gaze...',
|
||||
"What's a computer's favorite snack? Microchips.":
|
||||
'Initiating thoughtful gaze...',
|
||||
"What's a computer's favorite snack? Microchips.",
|
||||
"Why do Java developers wear glasses? Because they don't C#.":
|
||||
"Why do Java developers wear glasses? Because they don't C#.",
|
||||
'Charging the laser... pew pew!': 'Charging the laser... pew pew!',
|
||||
'Dividing by zero... just kidding!': 'Dividing by zero... just kidding!',
|
||||
'Looking for an adult superviso... I mean, processing.':
|
||||
'Charging the laser... pew pew!',
|
||||
'Dividing by zero... just kidding!',
|
||||
'Looking for an adult superviso... I mean, processing.',
|
||||
'Making it go beep boop.': 'Making it go beep boop.',
|
||||
'Buffering... because even AIs need a moment.':
|
||||
'Making it go beep boop.',
|
||||
'Buffering... because even AIs need a moment.',
|
||||
'Entangling quantum particles for a faster response...':
|
||||
'Entangling quantum particles for a faster response...',
|
||||
'Polishing the chrome... on the algorithms.':
|
||||
'Polishing the chrome... on the algorithms.',
|
||||
'Are you not entertained? (Working on it!)':
|
||||
'Are you not entertained? (Working on it!)',
|
||||
'Summoning the code gremlins... to help, of course.':
|
||||
'Summoning the code gremlins... to help, of course.',
|
||||
'Just waiting for the dial-up tone to finish...':
|
||||
'Just waiting for the dial-up tone to finish...',
|
||||
'Recalibrating the humor-o-meter.': 'Recalibrating the humor-o-meter.',
|
||||
'My other loading screen is even funnier.':
|
||||
'Recalibrating the humor-o-meter.',
|
||||
'My other loading screen is even funnier.',
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...":
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...",
|
||||
'Enhancing... Enhancing... Still loading.':
|
||||
'Enhancing... Enhancing... Still loading.',
|
||||
"It's not a bug, it's a feature... of this loading screen.":
|
||||
"It's not a bug, it's a feature... of this loading screen.",
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)':
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)',
|
||||
'Constructing additional pylons...': 'Constructing additional pylons...',
|
||||
'Constructing additional pylons...',
|
||||
],
|
||||
};
|
||||
|
||||
@@ -103,8 +103,8 @@ export default {
|
||||
'Theme "{{themeName}}" not found.': 'Тема "{{themeName}}" не найдена.',
|
||||
'Theme "{{themeName}}" not found in selected scope.':
|
||||
'Тема "{{themeName}}" не найдена в выбранной области.',
|
||||
'clear the screen and conversation history':
|
||||
'Очистка экрана и истории диалога',
|
||||
'Clear conversation history and free up context':
|
||||
'Очистить историю диалога и освободить контекст',
|
||||
'Compresses the context by replacing it with a summary.':
|
||||
'Сжатие контекста заменой на краткую сводку',
|
||||
'open full Qwen Code documentation in your browser':
|
||||
@@ -313,6 +313,7 @@ export default {
|
||||
'Tool Output Truncation Lines': 'Лимит строк вывода инструментов',
|
||||
'Folder Trust': 'Доверие к папке',
|
||||
'Vision Model Preview': 'Визуальная модель (предпросмотр)',
|
||||
'Tool Schema Compliance': 'Соответствие схеме инструмента',
|
||||
// Варианты перечислений настроек
|
||||
'Auto (detect from system)': 'Авто (определить из системы)',
|
||||
Text: 'Текст',
|
||||
@@ -341,8 +342,8 @@ export default {
|
||||
'Установка предпочитаемого внешнего редактора',
|
||||
'Manage extensions': 'Управление расширениями',
|
||||
'List active extensions': 'Показать активные расширения',
|
||||
'Update extensions. Usage: update |--all':
|
||||
'Обновить расширения. Использование: update |--all',
|
||||
'Update extensions. Usage: update <extension-names>|--all':
|
||||
'Обновить расширения. Использование: update <extension-names>|--all',
|
||||
'manage IDE integration': 'Управление интеграцией с IDE',
|
||||
'check status of IDE integration': 'Проверить статус интеграции с IDE',
|
||||
'install required IDE companion for {{ideName}}':
|
||||
@@ -400,7 +401,8 @@ export default {
|
||||
'Set LLM output language': 'Установка языка вывода LLM',
|
||||
'Usage: /language ui [zh-CN|en-US]':
|
||||
'Использование: /language ui [zh-CN|en-US|ru-RU]',
|
||||
'Usage: /language output ': 'Использование: /language output ',
|
||||
'Usage: /language output <language>':
|
||||
'Использование: /language output <language>',
|
||||
'Example: /language output 中文': 'Пример: /language output 中文',
|
||||
'Example: /language output English': 'Пример: /language output English',
|
||||
'Example: /language output 日本語': 'Пример: /language output 日本語',
|
||||
@@ -417,9 +419,8 @@ export default {
|
||||
'To request additional UI language packs, please open an issue on GitHub.':
|
||||
'Для запроса дополнительных языковых пакетов интерфейса, пожалуйста, создайте обращение на GitHub.',
|
||||
'Available options:': 'Доступные варианты:',
|
||||
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
|
||||
' - en-US: English': ' - en-US: Английский',
|
||||
' - ru-RU: Russian': ' - ru-RU: Русский',
|
||||
' - zh-CN: Simplified Chinese': ' - zh-CN: Упрощенный китайский',
|
||||
' - en-US: English': ' - en-US: Английский',
|
||||
'Set UI language to Simplified Chinese (zh-CN)':
|
||||
'Установить язык интерфейса на упрощенный китайский (zh-CN)',
|
||||
'Set UI language to English (en-US)':
|
||||
@@ -435,8 +436,8 @@ export default {
|
||||
'Режим подтверждения изменен на: {{mode}}',
|
||||
'Approval mode changed to: {{mode}} (saved to {{scope}} settings{{location}})':
|
||||
'Режим подтверждения изменен на: {{mode}} (сохранено в настройках {{scope}}{{location}})',
|
||||
'Usage: /approval-mode [--session|--user|--project]':
|
||||
'Использование: /approval-mode [--session|--user|--project]',
|
||||
'Usage: /approval-mode <mode> [--session|--user|--project]':
|
||||
'Использование: /approval-mode <mode> [--session|--user|--project]',
|
||||
'Scope subcommands do not accept additional arguments.':
|
||||
'Подкоманды области не принимают дополнительных аргументов.',
|
||||
'Plan mode - Analyze only, do not modify files or execute commands':
|
||||
@@ -588,8 +589,8 @@ export default {
|
||||
'Ошибка при экспорте диалога: {{error}}',
|
||||
'Conversation shared to {{filePath}}': 'Диалог экспортирован в {{filePath}}',
|
||||
'No conversation found to share.': 'Нет диалога для экспорта.',
|
||||
'Share the current conversation to a markdown or json file. Usage: /chat share <путь-к-файлу>':
|
||||
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <путь-к-файлу>',
|
||||
'Share the current conversation to a markdown or json file. Usage: /chat share <file>':
|
||||
'Экспортировать текущий диалог в markdown или json файл. Использование: /chat share <файл>',
|
||||
|
||||
// ============================================================================
|
||||
// Команды - Резюме
|
||||
@@ -618,8 +619,9 @@ export default {
|
||||
// ============================================================================
|
||||
// Команды - Очистка
|
||||
// ============================================================================
|
||||
'Clearing terminal and resetting chat.': 'Очистка терминала и сброс чата.',
|
||||
'Clearing terminal.': 'Очистка терминала.',
|
||||
'Starting a new session, resetting chat, and clearing terminal.':
|
||||
'Начало новой сессии, сброс чата и очистка терминала.',
|
||||
'Starting a new session and clearing.': 'Начало новой сессии и очистка.',
|
||||
|
||||
// ============================================================================
|
||||
// Команды - Сжатие
|
||||
@@ -650,8 +652,8 @@ export default {
|
||||
'Команда /directory add не поддерживается в ограничительных профилях песочницы. Пожалуйста, используйте --include-directories при запуске сессии.',
|
||||
"Error adding '{{path}}': {{error}}":
|
||||
"Ошибка при добавлении '{{path}}': {{error}}",
|
||||
'Successfully added GEMINI.md files from the following directories if there are:\n- {{directories}}':
|
||||
'Успешно добавлены файлы GEMINI.md из следующих директорий (если они есть):\n- {{directories}}',
|
||||
'Successfully added QWEN.md files from the following directories if there are:\n- {{directories}}':
|
||||
'Успешно добавлены файлы QWEN.md из следующих директорий (если они есть):\n- {{directories}}',
|
||||
'Error refreshing memory: {{error}}':
|
||||
'Ошибка при обновлении памяти: {{error}}',
|
||||
'Successfully added directories:\n- {{directories}}':
|
||||
@@ -884,6 +886,7 @@ export default {
|
||||
// Экран выхода / Статистика
|
||||
// ============================================================================
|
||||
'Agent powering down. Goodbye!': 'Агент завершает работу. До свидания!',
|
||||
'To continue this session, run': 'Для продолжения этой сессии, выполните',
|
||||
'Interaction Summary': 'Сводка взаимодействия',
|
||||
'Session ID:': 'ID сессии:',
|
||||
'Tool Calls:': 'Вызовы инструментов:',
|
||||
@@ -943,179 +946,140 @@ export default {
|
||||
'Waiting for user confirmation...':
|
||||
'Ожидание подтверждения от пользователя...',
|
||||
'(esc to cancel, {{time}})': '(esc для отмены, {{time}})',
|
||||
"I'm Feeling Lucky": 'Мне повезёт!',
|
||||
'Shipping awesomeness... ': 'Доставляем крутизну... ',
|
||||
'Painting the serifs back on...': 'Рисуем засечки на буквах...',
|
||||
'Navigating the slime mold...': 'Пробираемся через слизевиков..',
|
||||
'Consulting the digital spirits...': 'Советуемся с цифровыми духами...',
|
||||
'Reticulating splines...': 'Сглаживание сплайнов...',
|
||||
'Warming up the AI hamsters...': 'Разогреваем ИИ-хомячков...',
|
||||
'Asking the magic conch shell...': 'Спрашиваем волшебную ракушку...',
|
||||
'Generating witty retort...': 'Генерируем остроумный ответ...',
|
||||
'Polishing the algorithms...': 'Полируем алгоритмы...',
|
||||
"Don't rush perfection (or my code)...":
|
||||
|
||||
// ============================================================================
|
||||
|
||||
// ============================================================================
|
||||
// Loading Phrases
|
||||
// ============================================================================
|
||||
WITTY_LOADING_PHRASES: [
|
||||
'Мне повезёт!',
|
||||
'Доставляем крутизну... ',
|
||||
'Рисуем засечки на буквах...',
|
||||
'Пробираемся через слизевиков..',
|
||||
'Советуемся с цифровыми духами...',
|
||||
'Сглаживание сплайнов...',
|
||||
'Разогреваем ИИ-хомячков...',
|
||||
'Спрашиваем волшебную ракушку...',
|
||||
'Генерируем остроумный ответ...',
|
||||
'Полируем алгоритмы...',
|
||||
'Не торопите совершенство (или мой код)...',
|
||||
'Brewing fresh bytes...': 'Завариваем свежие байты...',
|
||||
'Counting electrons...': 'Пересчитываем электроны...',
|
||||
'Engaging cognitive processors...': 'Задействуем когнитивные процессоры...',
|
||||
'Checking for syntax errors in the universe...':
|
||||
'Завариваем свежие байты...',
|
||||
'Пересчитываем электроны...',
|
||||
'Задействуем когнитивные процессоры...',
|
||||
'Ищем синтаксические ошибки во вселенной...',
|
||||
'One moment, optimizing humor...': 'Секундочку, оптимизируем юмор...',
|
||||
'Shuffling punchlines...': 'Перетасовываем панчлайны...',
|
||||
'Untangling neural nets...': 'Распутаваем нейросети...',
|
||||
'Compiling brilliance...': 'Компилируем гениальность...',
|
||||
'Loading wit.exe...': 'Загружаем yumor.exe...',
|
||||
'Summoning the cloud of wisdom...': 'Призываем облако мудрости...',
|
||||
'Preparing a witty response...': 'Готовим остроумный ответ...',
|
||||
"Just a sec, I'm debugging reality...": 'Секунду, идёт отладка реальности...',
|
||||
'Confuzzling the options...': 'Запутываем варианты...',
|
||||
'Tuning the cosmic frequencies...': 'Настраиваем космические частоты...',
|
||||
'Crafting a response worthy of your patience...':
|
||||
'Секундочку, оптимизируем юмор...',
|
||||
'Перетасовываем панчлайны...',
|
||||
'Распутаваем нейросети...',
|
||||
'Компилируем гениальность...',
|
||||
'Загружаем yumor.exe...',
|
||||
'Призываем облако мудрости...',
|
||||
'Готовим остроумный ответ...',
|
||||
'Секунду, идёт отладка реальности...',
|
||||
'Запутываем варианты...',
|
||||
'Настраиваем космические частоты...',
|
||||
'Создаем ответ, достойный вашего терпения...',
|
||||
'Compiling the 1s and 0s...': 'Компилируем единички и нолики...',
|
||||
'Resolving dependencies... and existential crises...':
|
||||
'Компилируем единички и нолики...',
|
||||
'Разрешаем зависимости... и экзистенциальные кризисы...',
|
||||
'Defragmenting memories... both RAM and personal...':
|
||||
'Дефрагментация памяти... и оперативной, и личной...',
|
||||
'Rebooting the humor module...': 'Перезагрузка модуля юмора...',
|
||||
'Caching the essentials (mostly cat memes)...':
|
||||
'Перезагрузка модуля юмора...',
|
||||
'Кэшируем самое важное (в основном мемы с котиками)...',
|
||||
'Optimizing for ludicrous speed': 'Оптимизация для безумной скорости',
|
||||
"Swapping bits... don't tell the bytes...":
|
||||
'Оптимизация для безумной скорости',
|
||||
'Меняем биты... только байтам не говорите...',
|
||||
'Garbage collecting... be right back...': 'Сборка мусора... скоро вернусь...',
|
||||
'Assembling the interwebs...': 'Сборка интернетов...',
|
||||
'Converting coffee into code...': 'Превращаем кофе в код...',
|
||||
'Updating the syntax for reality...': 'Обновляем синтаксис реальности...',
|
||||
'Rewiring the synapses...': 'Переподключаем синапсы...',
|
||||
'Looking for a misplaced semicolon...': 'Ищем лишнюю точку с запятой...',
|
||||
"Greasin' the cogs of the machine...": 'Смазываем шестерёнки машины...',
|
||||
'Pre-heating the servers...': 'Разогреваем серверы...',
|
||||
'Calibrating the flux capacitor...': 'Калибруем потоковый накопитель...',
|
||||
'Engaging the improbability drive...': 'Включаем двигатель невероятности...',
|
||||
'Channeling the Force...': 'Направляем Силу...',
|
||||
'Aligning the stars for optimal response...':
|
||||
'Сборка мусора... скоро вернусь...',
|
||||
'Сборка интернетов...',
|
||||
'Превращаем кофе в код...',
|
||||
'Обновляем синтаксис реальности...',
|
||||
'Переподключаем синапсы...',
|
||||
'Ищем лишнюю точку с запятой...',
|
||||
'Смазываем шестерёнки машины...',
|
||||
'Разогреваем серверы...',
|
||||
'Калибруем потоковый накопитель...',
|
||||
'Включаем двигатель невероятности...',
|
||||
'Направляем Силу...',
|
||||
'Выравниваем звёзды для оптимального ответа...',
|
||||
'So say we all...': 'Так скажем мы все...',
|
||||
'Loading the next great idea...': 'Загрузка следующей великой идеи...',
|
||||
"Just a moment, I'm in the zone...": 'Минутку, я в потоке...',
|
||||
'Preparing to dazzle you with brilliance...':
|
||||
'Так скажем мы все...',
|
||||
'Загрузка следующей великой идеи...',
|
||||
'Минутку, я в потоке...',
|
||||
'Готовлюсь ослепить вас гениальностью...',
|
||||
"Just a tick, I'm polishing my wit...": 'Секунду, полирую остроумие...',
|
||||
"Hold tight, I'm crafting a masterpiece...": 'Держитесь, создаю шедевр...',
|
||||
"Just a jiffy, I'm debugging the universe...":
|
||||
'Секунду, полирую остроумие...',
|
||||
'Держитесь, создаю шедевр...',
|
||||
'Мигом, отлаживаю вселенную...',
|
||||
"Just a moment, I'm aligning the pixels...": 'Момент, выравниваю пиксели...',
|
||||
"Just a sec, I'm optimizing the humor...": 'Секунду, оптимизирую юмор...',
|
||||
"Just a moment, I'm tuning the algorithms...":
|
||||
'Момент, выравниваю пиксели...',
|
||||
'Секунду, оптимизирую юмор...',
|
||||
'Момент, настраиваю алгоритмы...',
|
||||
'Warp speed engaged...': 'Варп-скорость включена...',
|
||||
'Mining for more Dilithium crystals...': 'Добываем кристаллы дилития...',
|
||||
"Don't panic...": 'Без паники...',
|
||||
'Following the white rabbit...': 'Следуем за белым кроликом...',
|
||||
'The truth is in here... somewhere...': 'Истина где-то здесь... внутри...',
|
||||
'Blowing on the cartridge...': 'Продуваем картридж...',
|
||||
'Loading... Do a barrel roll!': 'Загрузка... Сделай бочку!',
|
||||
'Waiting for the respawn...': 'Ждем респауна...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...':
|
||||
'Варп-прыжок активирован...',
|
||||
'Добываем кристаллы дилития...',
|
||||
'Без паники...',
|
||||
'Следуем за белым кроликом...',
|
||||
'Истина где-то здесь... внутри...',
|
||||
'Продуваем картридж...',
|
||||
'Загрузка... Сделай бочку!',
|
||||
'Ждем респауна...',
|
||||
'Делаем Дугу Кесселя менее чем за 12 парсеков...',
|
||||
"The cake is not a lie, it's just still loading...":
|
||||
'Тортик — не ложь, он просто ещё грузится...',
|
||||
'Fiddling with the character creation screen...':
|
||||
'Возимся с экраном создания персонажа...',
|
||||
"Just a moment, I'm finding the right meme...":
|
||||
'Минутку, ищу подходящий мем...',
|
||||
"Pressing 'A' to continue...": "Нажимаем 'A' для продолжения...",
|
||||
'Herding digital cats...': 'Пасём цифровых котов...',
|
||||
'Polishing the pixels...': 'Полируем пиксели...',
|
||||
'Finding a suitable loading screen pun...':
|
||||
"Нажимаем 'A' для продолжения...",
|
||||
'Пасём цифровых котов...',
|
||||
'Полируем пиксели...',
|
||||
'Ищем подходящий каламбур для экрана загрузки...',
|
||||
'Distracting you with this witty phrase...':
|
||||
'Отвлекаем вас этой остроумной фразой...',
|
||||
'Almost there... probably...': 'Почти готово... вроде...',
|
||||
'Our hamsters are working as fast as they can...':
|
||||
'Почти готово... вроде...',
|
||||
'Наши хомячки работают изо всех сил...',
|
||||
'Giving Cloudy a pat on the head...': 'Гладим Облачко по голове...',
|
||||
'Petting the cat...': 'Гладим кота...',
|
||||
'Rickrolling my boss...': 'Рикроллим начальника...',
|
||||
'Never gonna give you up, never gonna let you down...':
|
||||
'Гладим Облачко по голове...',
|
||||
'Гладим кота...',
|
||||
'Рикроллим начальника...',
|
||||
'Never gonna give you up, never gonna let you down...',
|
||||
'Slapping the bass...': 'Лабаем бас-гитару...',
|
||||
'Tasting the snozberries...': 'Пробуем снузберри на вкус...',
|
||||
"I'm going the distance, I'm going for speed...":
|
||||
'Лабаем бас-гитару...',
|
||||
'Пробуем снузберри на вкус...',
|
||||
'Иду до конца, иду на скорость...',
|
||||
'Is this the real life? Is this just fantasy?...':
|
||||
'Is this the real life? Is this just fantasy?...',
|
||||
"I've got a good feeling about this...": 'У меня хорошее предчувствие...',
|
||||
'Poking the bear...': 'Дразним медведя... (Не лезь...)',
|
||||
'Doing research on the latest memes...': 'Изучаем свежие мемы...',
|
||||
'Figuring out how to make this more witty...':
|
||||
'У меня хорошее предчувствие...',
|
||||
'Дразним медведя... (Не лезь...)',
|
||||
'Изучаем свежие мемы...',
|
||||
'Думаем, как сделать это остроумнее...',
|
||||
'Hmmm... let me think...': 'Хмм... дайте подумать...',
|
||||
'What do you call a fish with no eyes? A fsh...':
|
||||
'Хмм... дайте подумать...',
|
||||
'Как называется бумеранг, который не возвращается? Палка...',
|
||||
'Why did the computer go to therapy? It had too many bytes...':
|
||||
'Почему компьютер простудился? Потому что оставил окна открытыми...',
|
||||
"Why don't programmers like nature? It has too many bugs...":
|
||||
'Почему программисты не любят гулять на улице? Там среда не настроена...',
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...':
|
||||
'Почему программисты предпочитают тёмную тему? Потому что в темноте не видно багов...',
|
||||
'Why did the developer go broke? Because they used up all their cache...':
|
||||
'Почему разработчик разорился? Потому что потратил весь свой кэш...',
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...":
|
||||
'Что можно делать со сломанным карандашом? Ничего — он тупой...',
|
||||
'Applying percussive maintenance...': 'Провожу настройку методом тыка...',
|
||||
'Searching for the correct USB orientation...':
|
||||
'Провожу настройку методом тыка...',
|
||||
'Ищем, какой стороной вставлять флешку...',
|
||||
'Ensuring the magic smoke stays inside the wires...':
|
||||
'Следим, чтобы волшебный дым не вышел из проводов...',
|
||||
'Rewriting in Rust for no particular reason...':
|
||||
'Переписываем всё на Rust без особой причины...',
|
||||
'Trying to exit Vim...': 'Пытаемся выйти из Vim...',
|
||||
'Spinning up the hamster wheel...': 'Раскручиваем колесо для хомяка...',
|
||||
"That's not a bug, it's an undocumented feature...": 'Это не баг, а фича...',
|
||||
'Engage.': 'Поехали!',
|
||||
"I'll be back... with an answer.": 'Я вернусь... с ответом.',
|
||||
'My other process is a TARDIS...': 'Мой другой процесс — это ТАРДИС...',
|
||||
'Communing with the machine spirit...': 'Общаемся с духом машины...',
|
||||
'Letting the thoughts marinate...': 'Даем мыслям замариноваться...',
|
||||
'Just remembered where I put my keys...':
|
||||
'Пытаемся выйти из Vim...',
|
||||
'Раскручиваем колесо для хомяка...',
|
||||
'Это не баг, а фича...',
|
||||
'Поехали!',
|
||||
'Я вернусь... с ответом.',
|
||||
'Мой другой процесс — это ТАРДИС...',
|
||||
'Общаемся с духом машины...',
|
||||
'Даем мыслям замариноваться...',
|
||||
'Только что вспомнил, куда положил ключи...',
|
||||
'Pondering the orb...': 'Размышляю над сферой...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
|
||||
'Я видел такое, во что вы, люди, просто не поверите... например, пользователя, читающего сообщения загрузки.',
|
||||
'Initiating thoughtful gaze...': 'Инициируем задумчивый взгляд...',
|
||||
"What's a computer's favorite snack? Microchips.":
|
||||
'Размышляю над сферой...',
|
||||
'Я видел такое, что вам, людям, и не снилось... пользователя, читающего эти сообщения.',
|
||||
'Инициируем задумчивый взгляд...',
|
||||
'Что сервер заказывает в баре? Пинг-коладу.',
|
||||
"Why do Java developers wear glasses? Because they don't C#.":
|
||||
'Почему Java-разработчики не убираются дома? Они ждут сборщик мусора...',
|
||||
'Charging the laser... pew pew!': 'Заряжаем лазер... пиу-пиу!',
|
||||
'Dividing by zero... just kidding!': 'Делим на ноль... шучу!',
|
||||
'Looking for an adult superviso... I mean, processing.':
|
||||
'Заряжаем лазер... пиу-пиу!',
|
||||
'Делим на ноль... шучу!',
|
||||
'Ищу взрослых для присмот... в смысле, обрабатываю.',
|
||||
'Making it go beep boop.': 'Делаем бип-буп.',
|
||||
'Buffering... because even AIs need a moment.':
|
||||
'Буферизация... даже ИИ нужно мгновение.',
|
||||
'Entangling quantum particles for a faster response...':
|
||||
'Делаем бип-буп.',
|
||||
'Буферизация... даже ИИ нужно время подумать.',
|
||||
'Запутываем квантовые частицы для быстрого ответа...',
|
||||
'Polishing the chrome... on the algorithms.':
|
||||
'Полируем хром... на алгоритмах.',
|
||||
'Are you not entertained? (Working on it!)':
|
||||
'Вы ещё не развлеклись?! Разве вы не за этим сюда пришли?!',
|
||||
'Summoning the code gremlins... to help, of course.':
|
||||
'Призываем гремлинов кода... для помощи, конечно же.',
|
||||
'Just waiting for the dial-up tone to finish...':
|
||||
'Ждем, пока закончится звук dial-up модема...',
|
||||
'Recalibrating the humor-o-meter.': 'Перекалибровка юморометра.',
|
||||
'My other loading screen is even funnier.':
|
||||
'Перекалибровка юморометра.',
|
||||
'Мой другой экран загрузки ещё смешнее.',
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...":
|
||||
'Кажется, где-то по клавиатуре гуляет кот...',
|
||||
'Enhancing... Enhancing... Still loading.':
|
||||
'Улучшаем... Ещё улучшаем... Всё ещё грузится.',
|
||||
"It's not a bug, it's a feature... of this loading screen.":
|
||||
'Это не баг, это фича... экрана загрузки.',
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)':
|
||||
'Пробовали выключить и включить снова? (Экран загрузки, не меня!)',
|
||||
'Constructing additional pylons...': 'Нужно построить больше пилонов...',
|
||||
'Нужно построить больше пилонов...',
|
||||
],
|
||||
};
|
||||
|
||||
@@ -101,7 +101,7 @@ export default {
|
||||
'Theme "{{themeName}}" not found.': '未找到主题 "{{themeName}}"。',
|
||||
'Theme "{{themeName}}" not found in selected scope.':
|
||||
'在所选作用域中未找到主题 "{{themeName}}"。',
|
||||
'clear the screen and conversation history': '清屏并清除对话历史',
|
||||
'Clear conversation history and free up context': '清除对话历史并释放上下文',
|
||||
'Compresses the context by replacing it with a summary.':
|
||||
'通过用摘要替换来压缩上下文',
|
||||
'open full Qwen Code documentation in your browser':
|
||||
@@ -573,8 +573,9 @@ export default {
|
||||
// ============================================================================
|
||||
// Commands - Clear
|
||||
// ============================================================================
|
||||
'Clearing terminal and resetting chat.': '正在清屏并重置聊天',
|
||||
'Clearing terminal.': '正在清屏',
|
||||
'Starting a new session, resetting chat, and clearing terminal.':
|
||||
'正在开始新会话,重置聊天并清屏。',
|
||||
'Starting a new session and clearing.': '正在开始新会话并清屏。',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Compress
|
||||
@@ -880,165 +881,39 @@ export default {
|
||||
// ============================================================================
|
||||
'Waiting for user confirmation...': '等待用户确认...',
|
||||
'(esc to cancel, {{time}})': '(按 esc 取消,{{time}})',
|
||||
"I'm Feeling Lucky": '我感觉很幸运',
|
||||
'Shipping awesomeness... ': '正在运送精彩内容... ',
|
||||
'Painting the serifs back on...': '正在重新绘制衬线...',
|
||||
'Navigating the slime mold...': '正在导航粘液霉菌...',
|
||||
'Consulting the digital spirits...': '正在咨询数字精灵...',
|
||||
'Reticulating splines...': '正在网格化样条曲线...',
|
||||
'Warming up the AI hamsters...': '正在预热 AI 仓鼠...',
|
||||
'Asking the magic conch shell...': '正在询问魔法海螺壳...',
|
||||
'Generating witty retort...': '正在生成机智的反驳...',
|
||||
'Polishing the algorithms...': '正在打磨算法...',
|
||||
"Don't rush perfection (or my code)...": '不要急于追求完美(或我的代码)...',
|
||||
'Brewing fresh bytes...': '正在酿造新鲜字节...',
|
||||
'Counting electrons...': '正在计算电子...',
|
||||
'Engaging cognitive processors...': '正在启动认知处理器...',
|
||||
'Checking for syntax errors in the universe...':
|
||||
'正在检查宇宙中的语法错误...',
|
||||
'One moment, optimizing humor...': '稍等片刻,正在优化幽默感...',
|
||||
'Shuffling punchlines...': '正在洗牌笑点...',
|
||||
'Untangling neural nets...': '正在解开神经网络...',
|
||||
'Compiling brilliance...': '正在编译智慧...',
|
||||
'Loading wit.exe...': '正在加载 wit.exe...',
|
||||
'Summoning the cloud of wisdom...': '正在召唤智慧云...',
|
||||
'Preparing a witty response...': '正在准备机智的回复...',
|
||||
"Just a sec, I'm debugging reality...": '稍等片刻,我正在调试现实...',
|
||||
'Confuzzling the options...': '正在混淆选项...',
|
||||
'Tuning the cosmic frequencies...': '正在调谐宇宙频率...',
|
||||
'Crafting a response worthy of your patience...':
|
||||
'正在制作值得您耐心等待的回复...',
|
||||
'Compiling the 1s and 0s...': '正在编译 1 和 0...',
|
||||
'Resolving dependencies... and existential crises...':
|
||||
'正在解决依赖关系...和存在主义危机...',
|
||||
'Defragmenting memories... both RAM and personal...':
|
||||
'正在整理记忆碎片...包括 RAM 和个人记忆...',
|
||||
'Rebooting the humor module...': '正在重启幽默模块...',
|
||||
'Caching the essentials (mostly cat memes)...':
|
||||
'正在缓存必需品(主要是猫咪表情包)...',
|
||||
'Optimizing for ludicrous speed': '正在优化到荒谬的速度',
|
||||
"Swapping bits... don't tell the bytes...": '正在交换位...不要告诉字节...',
|
||||
'Garbage collecting... be right back...': '正在垃圾回收...马上回来...',
|
||||
'Assembling the interwebs...': '正在组装互联网...',
|
||||
'Converting coffee into code...': '正在将咖啡转换为代码...',
|
||||
'Updating the syntax for reality...': '正在更新现实的语法...',
|
||||
'Rewiring the synapses...': '正在重新连接突触...',
|
||||
'Looking for a misplaced semicolon...': '正在寻找放错位置的分号...',
|
||||
"Greasin' the cogs of the machine...": '正在给机器的齿轮上油...',
|
||||
'Pre-heating the servers...': '正在预热服务器...',
|
||||
'Calibrating the flux capacitor...': '正在校准通量电容器...',
|
||||
'Engaging the improbability drive...': '正在启动不可能性驱动器...',
|
||||
'Channeling the Force...': '正在引导原力...',
|
||||
'Aligning the stars for optimal response...': '正在对齐星星以获得最佳回复...',
|
||||
'So say we all...': '我们都说...',
|
||||
'Loading the next great idea...': '正在加载下一个伟大的想法...',
|
||||
"Just a moment, I'm in the zone...": '稍等片刻,我正进入状态...',
|
||||
'Preparing to dazzle you with brilliance...': '正在准备用智慧让您眼花缭乱...',
|
||||
"Just a tick, I'm polishing my wit...": '稍等片刻,我正在打磨我的智慧...',
|
||||
"Hold tight, I'm crafting a masterpiece...": '请稍等,我正在制作杰作...',
|
||||
"Just a jiffy, I'm debugging the universe...": '稍等片刻,我正在调试宇宙...',
|
||||
"Just a moment, I'm aligning the pixels...": '稍等片刻,我正在对齐像素...',
|
||||
"Just a sec, I'm optimizing the humor...": '稍等片刻,我正在优化幽默感...',
|
||||
"Just a moment, I'm tuning the algorithms...": '稍等片刻,我正在调整算法...',
|
||||
'Warp speed engaged...': '曲速已启动...',
|
||||
'Mining for more Dilithium crystals...': '正在挖掘更多二锂晶体...',
|
||||
"Don't panic...": '不要惊慌...',
|
||||
'Following the white rabbit...': '正在跟随白兔...',
|
||||
'The truth is in here... somewhere...': '真相在这里...某个地方...',
|
||||
'Blowing on the cartridge...': '正在吹卡带...',
|
||||
'Loading... Do a barrel roll!': '正在加载...做个桶滚!',
|
||||
'Waiting for the respawn...': '等待重生...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...':
|
||||
'正在以不到 12 秒差距完成凯塞尔航线...',
|
||||
"The cake is not a lie, it's just still loading...":
|
||||
'蛋糕不是谎言,只是还在加载...',
|
||||
'Fiddling with the character creation screen...': '正在摆弄角色创建界面...',
|
||||
"Just a moment, I'm finding the right meme...":
|
||||
'稍等片刻,我正在寻找合适的表情包...',
|
||||
"Pressing 'A' to continue...": "按 'A' 继续...",
|
||||
'Herding digital cats...': '正在放牧数字猫...',
|
||||
'Polishing the pixels...': '正在打磨像素...',
|
||||
'Finding a suitable loading screen pun...': '正在寻找合适的加载屏幕双关语...',
|
||||
'Distracting you with this witty phrase...':
|
||||
'正在用这个机智的短语分散您的注意力...',
|
||||
'Almost there... probably...': '快到了...可能...',
|
||||
'Our hamsters are working as fast as they can...':
|
||||
'我们的仓鼠正在尽可能快地工作...',
|
||||
'Giving Cloudy a pat on the head...': '正在拍拍 Cloudy 的头...',
|
||||
'Petting the cat...': '正在抚摸猫咪...',
|
||||
'Rickrolling my boss...': '正在 Rickroll 我的老板...',
|
||||
'Never gonna give you up, never gonna let you down...':
|
||||
'永远不会放弃你,永远不会让你失望...',
|
||||
'Slapping the bass...': '正在拍打低音...',
|
||||
'Tasting the snozberries...': '正在品尝 snozberries...',
|
||||
"I'm going the distance, I'm going for speed...":
|
||||
'我要走得更远,我要追求速度...',
|
||||
'Is this the real life? Is this just fantasy?...':
|
||||
'这是真实的生活吗?还是只是幻想?...',
|
||||
"I've got a good feeling about this...": '我对这个感觉很好...',
|
||||
'Poking the bear...': '正在戳熊...',
|
||||
'Doing research on the latest memes...': '正在研究最新的表情包...',
|
||||
'Figuring out how to make this more witty...': '正在想办法让这更有趣...',
|
||||
'Hmmm... let me think...': '嗯...让我想想...',
|
||||
'What do you call a fish with no eyes? A fsh...':
|
||||
'没有眼睛的鱼叫什么?一条鱼...',
|
||||
'Why did the computer go to therapy? It had too many bytes...':
|
||||
'为什么电脑去看心理医生?因为它有太多字节...',
|
||||
"Why don't programmers like nature? It has too many bugs...":
|
||||
'为什么程序员不喜欢大自然?因为虫子太多了...',
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...':
|
||||
'为什么程序员喜欢暗色模式?因为光会吸引虫子...',
|
||||
'Why did the developer go broke? Because they used up all their cache...':
|
||||
'为什么开发者破产了?因为他们用完了所有缓存...',
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...":
|
||||
'你能用断了的铅笔做什么?什么都不能,因为它没有笔尖...',
|
||||
'Applying percussive maintenance...': '正在应用敲击维护...',
|
||||
'Searching for the correct USB orientation...': '正在寻找正确的 USB 方向...',
|
||||
'Ensuring the magic smoke stays inside the wires...':
|
||||
'确保魔法烟雾留在电线内...',
|
||||
'Rewriting in Rust for no particular reason...':
|
||||
'正在用 Rust 重写,没有特别的原因...',
|
||||
'Trying to exit Vim...': '正在尝试退出 Vim...',
|
||||
'Spinning up the hamster wheel...': '正在启动仓鼠轮...',
|
||||
"That's not a bug, it's an undocumented feature...":
|
||||
'这不是一个错误,这是一个未记录的功能...',
|
||||
'Engage.': '启动。',
|
||||
"I'll be back... with an answer.": '我会回来的...带着答案。',
|
||||
'My other process is a TARDIS...': '我的另一个进程是 TARDIS...',
|
||||
'Communing with the machine spirit...': '正在与机器精神交流...',
|
||||
'Letting the thoughts marinate...': '让想法慢慢酝酿...',
|
||||
'Just remembered where I put my keys...': '刚刚想起我把钥匙放在哪里了...',
|
||||
'Pondering the orb...': '正在思考球体...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.":
|
||||
'我见过你们不会相信的事情...比如一个阅读加载消息的用户。',
|
||||
'Initiating thoughtful gaze...': '正在启动深思凝视...',
|
||||
"What's a computer's favorite snack? Microchips.":
|
||||
'电脑最喜欢的零食是什么?微芯片。',
|
||||
"Why do Java developers wear glasses? Because they don't C#.":
|
||||
'为什么 Java 开发者戴眼镜?因为他们不会 C#。',
|
||||
'Charging the laser... pew pew!': '正在给激光充电...砰砰!',
|
||||
'Dividing by zero... just kidding!': '除以零...只是开玩笑!',
|
||||
'Looking for an adult superviso... I mean, processing.':
|
||||
'正在寻找成人监督...我是说,处理中。',
|
||||
'Making it go beep boop.': '让它发出哔哔声。',
|
||||
'Buffering... because even AIs need a moment.':
|
||||
'正在缓冲...因为即使是 AI 也需要片刻。',
|
||||
'Entangling quantum particles for a faster response...':
|
||||
'正在纠缠量子粒子以获得更快的回复...',
|
||||
'Polishing the chrome... on the algorithms.': '正在打磨铬...在算法上。',
|
||||
'Are you not entertained? (Working on it!)': '你不觉得有趣吗?(正在努力!)',
|
||||
'Summoning the code gremlins... to help, of course.':
|
||||
'正在召唤代码小精灵...当然是来帮忙的。',
|
||||
'Just waiting for the dial-up tone to finish...': '只是等待拨号音结束...',
|
||||
'Recalibrating the humor-o-meter.': '正在重新校准幽默计。',
|
||||
'My other loading screen is even funnier.': '我的另一个加载屏幕更有趣。',
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...":
|
||||
'很确定有只猫在某个地方键盘上走...',
|
||||
'Enhancing... Enhancing... Still loading.':
|
||||
'正在增强...正在增强...仍在加载。',
|
||||
"It's not a bug, it's a feature... of this loading screen.":
|
||||
'这不是一个错误,这是一个功能...这个加载屏幕的功能。',
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)':
|
||||
'你试过把它关掉再打开吗?(加载屏幕,不是我。)',
|
||||
'Constructing additional pylons...': '正在建造额外的能量塔...',
|
||||
WITTY_LOADING_PHRASES: [
|
||||
// --- 职场搬砖系列 ---
|
||||
'正在努力搬砖,请稍候...',
|
||||
'老板在身后,快加载啊!',
|
||||
'头发掉光前,一定能加载完...',
|
||||
'服务器正在深呼吸,准备放大招...',
|
||||
'正在向服务器投喂咖啡...',
|
||||
|
||||
// --- 大厂黑话系列 ---
|
||||
'正在赋能全链路,寻找关键抓手...',
|
||||
'正在降本增效,优化加载路径...',
|
||||
'正在打破部门壁垒,沉淀方法论...',
|
||||
'正在拥抱变化,迭代核心价值...',
|
||||
'正在对齐颗粒度,打磨底层逻辑...',
|
||||
'大力出奇迹,正在强行加载...',
|
||||
|
||||
// --- 程序员自嘲系列 ---
|
||||
'只要我不写代码,代码就没有 Bug...',
|
||||
'正在把 Bug 转化为 Feature...',
|
||||
'只要我不尴尬,Bug 就追不上我...',
|
||||
'正在试图理解去年的自己写了什么...',
|
||||
'正在猿力觉醒中,请耐心等待...',
|
||||
|
||||
// --- 合作愉快系列 ---
|
||||
'正在询问产品经理:这需求是真的吗?',
|
||||
'正在给产品经理画饼,请稍等...',
|
||||
|
||||
// --- 温暖治愈系列 ---
|
||||
'每一行代码,都在努力让世界变得更好一点点...',
|
||||
'每一个伟大的想法,都值得这份耐心的等待...',
|
||||
'别急,美好的事物总是需要一点时间去酝酿...',
|
||||
'愿你的代码永无 Bug,愿你的梦想终将成真...',
|
||||
'哪怕只有 0.1% 的进度,也是在向目标靠近...',
|
||||
'加载的是字节,承载的是对技术的热爱...',
|
||||
],
|
||||
};
|
||||
|
||||
@@ -610,6 +610,8 @@ export abstract class BaseJsonOutputAdapter {
|
||||
const errorText = parseAndFormatApiError(
|
||||
event.value.error,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
this.config.getModel(),
|
||||
);
|
||||
this.appendText(state, errorText, null);
|
||||
break;
|
||||
|
||||
@@ -221,6 +221,8 @@ export async function runNonInteractive(
|
||||
const errorText = parseAndFormatApiError(
|
||||
event.value.error,
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
);
|
||||
process.stderr.write(`${errorText}\n`);
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ const mockPrompt = {
|
||||
{ name: 'trail', required: false, description: "The animal's trail." },
|
||||
],
|
||||
invoke: vi.fn().mockResolvedValue({
|
||||
messages: [{ content: { type: 'text', text: 'Hello, world!' } }],
|
||||
messages: [{ content: { text: 'Hello, world!' } }],
|
||||
}),
|
||||
};
|
||||
|
||||
|
||||
@@ -123,10 +123,7 @@ export class McpPromptLoader implements ICommandLoader {
|
||||
};
|
||||
}
|
||||
|
||||
const firstMessage = result.messages?.[0];
|
||||
const content = firstMessage?.content;
|
||||
|
||||
if (content?.type !== 'text') {
|
||||
if (!result.messages?.[0]?.content?.['text']) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
@@ -137,7 +134,7 @@ export class McpPromptLoader implements ICommandLoader {
|
||||
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: JSON.stringify(content.text),
|
||||
content: JSON.stringify(result.messages[0].content.text),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { LoadedSettings } from '../config/settings.js';
|
||||
import type { InitializationResult } from '../core/initializer.js';
|
||||
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
|
||||
import { UIStateContext, type UIState } from './contexts/UIStateContext.js';
|
||||
import {
|
||||
UIActionsContext,
|
||||
@@ -55,6 +56,7 @@ vi.mock('./App.js', () => ({
|
||||
App: TestContextConsumer,
|
||||
}));
|
||||
|
||||
vi.mock('./hooks/useQuotaAndFallback.js');
|
||||
vi.mock('./hooks/useHistoryManager.js');
|
||||
vi.mock('./hooks/useThemeCommand.js');
|
||||
vi.mock('./auth/useAuth.js');
|
||||
@@ -120,6 +122,7 @@ describe('AppContainer State Management', () => {
|
||||
let mockInitResult: InitializationResult;
|
||||
|
||||
// Create typed mocks for all hooks
|
||||
const mockedUseQuotaAndFallback = useQuotaAndFallback as Mock;
|
||||
const mockedUseHistory = useHistory as Mock;
|
||||
const mockedUseThemeCommand = useThemeCommand as Mock;
|
||||
const mockedUseAuthCommand = useAuthCommand as Mock;
|
||||
@@ -161,6 +164,10 @@ describe('AppContainer State Management', () => {
|
||||
capturedUIActions = null!;
|
||||
|
||||
// **Provide a default return value for EVERY mocked hook.**
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: null,
|
||||
handleProQuotaChoice: vi.fn(),
|
||||
});
|
||||
mockedUseHistory.mockReturnValue({
|
||||
history: [],
|
||||
addItem: vi.fn(),
|
||||
@@ -560,6 +567,75 @@ describe('AppContainer State Management', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Quota and Fallback Integration', () => {
|
||||
it('passes a null proQuotaRequest to UIStateContext by default', () => {
|
||||
// The default mock from beforeEach already sets proQuotaRequest to null
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert that the context value is as expected
|
||||
expect(capturedUIState.proQuotaRequest).toBeNull();
|
||||
});
|
||||
|
||||
it('passes a valid proQuotaRequest to UIStateContext when provided by the hook', () => {
|
||||
// Arrange: Create a mock request object that a UI dialog would receive
|
||||
const mockRequest = {
|
||||
failedModel: 'gemini-pro',
|
||||
fallbackModel: 'gemini-flash',
|
||||
resolve: vi.fn(),
|
||||
};
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: mockRequest,
|
||||
handleProQuotaChoice: vi.fn(),
|
||||
});
|
||||
|
||||
// Act: Render the container
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert: The mock request is correctly passed through the context
|
||||
expect(capturedUIState.proQuotaRequest).toEqual(mockRequest);
|
||||
});
|
||||
|
||||
it('passes the handleProQuotaChoice function to UIActionsContext', () => {
|
||||
// Arrange: Create a mock handler function
|
||||
const mockHandler = vi.fn();
|
||||
mockedUseQuotaAndFallback.mockReturnValue({
|
||||
proQuotaRequest: null,
|
||||
handleProQuotaChoice: mockHandler,
|
||||
});
|
||||
|
||||
// Act: Render the container
|
||||
render(
|
||||
<AppContainer
|
||||
config={mockConfig}
|
||||
settings={mockSettings}
|
||||
version="1.0.0"
|
||||
initializationResult={mockInitResult}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Assert: The action in the context is the mock handler we provided
|
||||
expect(capturedUIActions.handleProQuotaChoice).toBe(mockHandler);
|
||||
|
||||
// You can even verify that the plumbed function is callable
|
||||
capturedUIActions.handleProQuotaChoice('auth');
|
||||
expect(mockHandler).toHaveBeenCalledWith('auth');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Terminal Title Update Feature', () => {
|
||||
beforeEach(() => {
|
||||
// Reset mock stdout for each test
|
||||
|
||||
@@ -32,6 +32,7 @@ import {
|
||||
type Config,
|
||||
type IdeInfo,
|
||||
type IdeContext,
|
||||
type UserTierId,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
IdeClient,
|
||||
ideContextStore,
|
||||
@@ -47,6 +48,7 @@ import { useHistory } from './hooks/useHistoryManager.js';
|
||||
import { useMemoryMonitor } from './hooks/useMemoryMonitor.js';
|
||||
import { useThemeCommand } from './hooks/useThemeCommand.js';
|
||||
import { useAuthCommand } from './auth/useAuth.js';
|
||||
import { useQuotaAndFallback } from './hooks/useQuotaAndFallback.js';
|
||||
import { useEditorSettings } from './hooks/useEditorSettings.js';
|
||||
import { useSettingsCommand } from './hooks/useSettingsCommand.js';
|
||||
import { useModelCommand } from './hooks/useModelCommand.js';
|
||||
@@ -190,6 +192,8 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
|
||||
const [currentModel, setCurrentModel] = useState(getEffectiveModel());
|
||||
|
||||
const [userTier] = useState<UserTierId | undefined>(undefined);
|
||||
|
||||
const [isConfigInitialized, setConfigInitialized] = useState(false);
|
||||
|
||||
const [userMessages, setUserMessages] = useState<string[]>([]);
|
||||
@@ -363,6 +367,14 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
cancelAuthentication,
|
||||
} = useAuthCommand(settings, config, historyManager.addItem);
|
||||
|
||||
const { proQuotaRequest, handleProQuotaChoice } = useQuotaAndFallback({
|
||||
config,
|
||||
historyManager,
|
||||
userTier,
|
||||
setAuthState,
|
||||
setModelSwitchedFromQuotaError,
|
||||
});
|
||||
|
||||
useInitializationAuthError(initializationResult.authError, onAuthError);
|
||||
|
||||
// Sync user tier from config when authentication changes
|
||||
@@ -740,7 +752,8 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
!initError &&
|
||||
!isProcessing &&
|
||||
(streamingState === StreamingState.Idle ||
|
||||
streamingState === StreamingState.Responding);
|
||||
streamingState === StreamingState.Responding) &&
|
||||
!proQuotaRequest;
|
||||
|
||||
const [controlsHeight, setControlsHeight] = useState(0);
|
||||
|
||||
@@ -1193,6 +1206,7 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
isAuthenticating ||
|
||||
isEditorDialogOpen ||
|
||||
showIdeRestartPrompt ||
|
||||
!!proQuotaRequest ||
|
||||
isSubagentCreateDialogOpen ||
|
||||
isAgentsManagerDialogOpen ||
|
||||
isApprovalModeDialogOpen ||
|
||||
@@ -1263,6 +1277,8 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
showWorkspaceMigrationDialog,
|
||||
workspaceExtensions,
|
||||
currentModel,
|
||||
userTier,
|
||||
proQuotaRequest,
|
||||
contextFileNames,
|
||||
errorCount,
|
||||
availableTerminalHeight,
|
||||
@@ -1351,6 +1367,8 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
showAutoAcceptIndicator,
|
||||
showWorkspaceMigrationDialog,
|
||||
workspaceExtensions,
|
||||
userTier,
|
||||
proQuotaRequest,
|
||||
contextFileNames,
|
||||
errorCount,
|
||||
availableTerminalHeight,
|
||||
@@ -1412,6 +1430,7 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
handleClearScreen,
|
||||
onWorkspaceMigrationDialogOpen,
|
||||
onWorkspaceMigrationDialogClose,
|
||||
handleProQuotaChoice,
|
||||
// Vision switch dialog
|
||||
handleVisionSwitchSelect,
|
||||
// Welcome back dialog
|
||||
@@ -1449,6 +1468,7 @@ export const AppContainer = (props: AppContainerProps) => {
|
||||
handleClearScreen,
|
||||
onWorkspaceMigrationDialogOpen,
|
||||
onWorkspaceMigrationDialogClose,
|
||||
handleProQuotaChoice,
|
||||
handleVisionSwitchSelect,
|
||||
handleWelcomeBackSelection,
|
||||
handleWelcomeBackClose,
|
||||
|
||||
@@ -168,7 +168,7 @@ describe('AuthDialog', () => {
|
||||
|
||||
it('should not show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to something else', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'foobar';
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.LOGIN_WITH_GOOGLE;
|
||||
|
||||
const settings: LoadedSettings = new LoadedSettings(
|
||||
{
|
||||
@@ -212,7 +212,7 @@ describe('AuthDialog', () => {
|
||||
|
||||
it('should show the GEMINI_API_KEY message if QWEN_DEFAULT_AUTH_TYPE is set to use api key', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'foobar';
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_OPENAI;
|
||||
process.env['QWEN_DEFAULT_AUTH_TYPE'] = AuthType.USE_GEMINI;
|
||||
|
||||
const settings: LoadedSettings = new LoadedSettings(
|
||||
{
|
||||
@@ -504,12 +504,12 @@ describe('AuthDialog', () => {
|
||||
},
|
||||
{
|
||||
settings: {
|
||||
security: { auth: { selectedType: AuthType.USE_OPENAI } },
|
||||
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
|
||||
ui: { customThemes: {} },
|
||||
mcpServers: {},
|
||||
},
|
||||
originalSettings: {
|
||||
security: { auth: { selectedType: AuthType.USE_OPENAI } },
|
||||
security: { auth: { selectedType: AuthType.LOGIN_WITH_GOOGLE } },
|
||||
ui: { customThemes: {} },
|
||||
mcpServers: {},
|
||||
},
|
||||
|
||||
@@ -225,26 +225,16 @@ export const useAuthCommand = (
|
||||
const defaultAuthType = process.env['QWEN_DEFAULT_AUTH_TYPE'];
|
||||
if (
|
||||
defaultAuthType &&
|
||||
![
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].includes(defaultAuthType as AuthType)
|
||||
![AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].includes(
|
||||
defaultAuthType as AuthType,
|
||||
)
|
||||
) {
|
||||
onAuthError(
|
||||
t(
|
||||
'Invalid QWEN_DEFAULT_AUTH_TYPE value: "{{value}}". Valid values are: {{validValues}}',
|
||||
{
|
||||
value: defaultAuthType,
|
||||
validValues: [
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].join(', '),
|
||||
validValues: [AuthType.QWEN_OAUTH, AuthType.USE_OPENAI].join(', '),
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
@@ -15,6 +15,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
const original = await importOriginal<typeof core>();
|
||||
return {
|
||||
...original,
|
||||
getOauthClient: vi.fn(original.getOauthClient),
|
||||
getIdeInstaller: vi.fn(original.getIdeInstaller),
|
||||
IdeClient: {
|
||||
getInstance: vi.fn(),
|
||||
|
||||
@@ -13,6 +13,16 @@ import { createMockCommandContext } from '../../test-utils/mockCommandContext.js
|
||||
vi.mock('../../i18n/index.js', () => ({
|
||||
setLanguageAsync: vi.fn().mockResolvedValue(undefined),
|
||||
getCurrentLanguage: vi.fn().mockReturnValue('en'),
|
||||
detectSystemLanguage: vi.fn().mockReturnValue('en'),
|
||||
getLanguageNameFromLocale: vi.fn((locale: string) => {
|
||||
const map: Record<string, string> = {
|
||||
zh: 'Chinese',
|
||||
en: 'English',
|
||||
ru: 'Russian',
|
||||
de: 'German',
|
||||
};
|
||||
return map[locale] || 'English';
|
||||
}),
|
||||
t: vi.fn((key: string) => key),
|
||||
}));
|
||||
|
||||
@@ -61,7 +71,10 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
|
||||
// Import modules after mocking
|
||||
import * as i18n from '../../i18n/index.js';
|
||||
import { languageCommand } from './languageCommand.js';
|
||||
import {
|
||||
languageCommand,
|
||||
initializeLlmOutputLanguage,
|
||||
} from './languageCommand.js';
|
||||
|
||||
describe('languageCommand', () => {
|
||||
let mockContext: CommandContext;
|
||||
@@ -186,6 +199,39 @@ describe('languageCommand', () => {
|
||||
content: expect.stringContaining('Chinese'),
|
||||
});
|
||||
});
|
||||
|
||||
it('should parse Unicode LLM output language from marker', async () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue(
|
||||
[
|
||||
'# ⚠️ CRITICAL: 中文 Output Language Rule - HIGHEST PRIORITY ⚠️',
|
||||
'<!-- qwen-code:llm-output-language: 中文 -->',
|
||||
'',
|
||||
'Some other content...',
|
||||
].join('\n'),
|
||||
);
|
||||
|
||||
vi.mocked(i18n.t).mockImplementation(
|
||||
(key: string, params?: Record<string, string>) => {
|
||||
if (params && key.includes('{{lang}}')) {
|
||||
return key.replace('{{lang}}', params['lang'] || '');
|
||||
}
|
||||
return key;
|
||||
},
|
||||
);
|
||||
|
||||
if (!languageCommand.action) {
|
||||
throw new Error('The language command must have an action.');
|
||||
}
|
||||
|
||||
const result = await languageCommand.action(mockContext, '');
|
||||
|
||||
expect(result).toEqual({
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: expect.stringContaining('中文'),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('main command action - config not available', () => {
|
||||
@@ -400,6 +446,34 @@ describe('languageCommand', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should normalize locale code "ru" to "Russian"', async () => {
|
||||
if (!languageCommand.action) {
|
||||
throw new Error('The language command must have an action.');
|
||||
}
|
||||
|
||||
await languageCommand.action(mockContext, 'output ru');
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('Russian'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should normalize locale code "de" to "German"', async () => {
|
||||
if (!languageCommand.action) {
|
||||
throw new Error('The language command must have an action.');
|
||||
}
|
||||
|
||||
await languageCommand.action(mockContext, 'output de');
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('German'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle file write errors gracefully', async () => {
|
||||
vi.mocked(fs.writeFileSync).mockImplementation(() => {
|
||||
throw new Error('Permission denied');
|
||||
@@ -481,6 +555,8 @@ describe('languageCommand', () => {
|
||||
const nestedNames = uiSubcommand?.subCommands?.map((c) => c.name);
|
||||
expect(nestedNames).toContain('zh-CN');
|
||||
expect(nestedNames).toContain('en-US');
|
||||
expect(nestedNames).toContain('ru-RU');
|
||||
expect(nestedNames).toContain('de-DE');
|
||||
});
|
||||
|
||||
it('should have action that sets language', async () => {
|
||||
@@ -542,16 +618,9 @@ describe('languageCommand', () => {
|
||||
const enUSSubcommand = uiSubcommand?.subCommands?.find(
|
||||
(c) => c.name === 'en-US',
|
||||
);
|
||||
|
||||
it('zh-CN should have aliases', () => {
|
||||
expect(zhCNSubcommand?.altNames).toContain('zh');
|
||||
expect(zhCNSubcommand?.altNames).toContain('chinese');
|
||||
});
|
||||
|
||||
it('en-US should have aliases', () => {
|
||||
expect(enUSSubcommand?.altNames).toContain('en');
|
||||
expect(enUSSubcommand?.altNames).toContain('english');
|
||||
});
|
||||
const deDESubcommand = uiSubcommand?.subCommands?.find(
|
||||
(c) => c.name === 'de-DE',
|
||||
);
|
||||
|
||||
it('zh-CN action should set Chinese', async () => {
|
||||
if (!zhCNSubcommand?.action) {
|
||||
@@ -583,6 +652,21 @@ describe('languageCommand', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('de-DE action should set German', async () => {
|
||||
if (!deDESubcommand?.action) {
|
||||
throw new Error('de-DE subcommand must have an action.');
|
||||
}
|
||||
|
||||
const result = await deDESubcommand.action(mockContext, '');
|
||||
|
||||
expect(i18n.setLanguageAsync).toHaveBeenCalledWith('de');
|
||||
expect(result).toEqual({
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: expect.stringContaining('UI language changed'),
|
||||
});
|
||||
});
|
||||
|
||||
it('should reject extra arguments', async () => {
|
||||
if (!zhCNSubcommand?.action) {
|
||||
throw new Error('zh-CN subcommand must have an action.');
|
||||
@@ -597,4 +681,74 @@ describe('languageCommand', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('initializeLlmOutputLanguage', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(fs.mkdirSync).mockImplementation(() => undefined);
|
||||
vi.mocked(fs.writeFileSync).mockImplementation(() => undefined);
|
||||
});
|
||||
|
||||
it('should create file when it does not exist', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('en');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.mkdirSync).toHaveBeenCalled();
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('English'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should NOT overwrite existing file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should detect Chinese locale and create Chinese rule file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('zh');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('Chinese'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should detect Russian locale and create Russian rule file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('ru');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('Russian'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
|
||||
it('should detect German locale and create German rule file', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
vi.mocked(i18n.detectSystemLanguage).mockReturnValue('de');
|
||||
|
||||
initializeLlmOutputLanguage();
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
expect.stringContaining('output-language.md'),
|
||||
expect.stringContaining('German'),
|
||||
'utf-8',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* Copyright 2025 Qwen team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
@@ -15,51 +15,72 @@ import { SettingScope } from '../../config/settings.js';
|
||||
import {
|
||||
setLanguageAsync,
|
||||
getCurrentLanguage,
|
||||
detectSystemLanguage,
|
||||
getLanguageNameFromLocale,
|
||||
type SupportedLanguage,
|
||||
t,
|
||||
} from '../../i18n/index.js';
|
||||
import {
|
||||
SUPPORTED_LANGUAGES,
|
||||
type LanguageDefinition,
|
||||
} from '../../i18n/languages.js';
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { Storage } from '@qwen-code/qwen-code-core';
|
||||
|
||||
const LLM_OUTPUT_LANGUAGE_RULE_FILENAME = 'output-language.md';
|
||||
const LLM_OUTPUT_LANGUAGE_MARKER_PREFIX = 'qwen-code:llm-output-language:';
|
||||
|
||||
function parseUiLanguageArg(input: string): SupportedLanguage | null {
|
||||
const lowered = input.trim().toLowerCase();
|
||||
if (!lowered) return null;
|
||||
for (const lang of SUPPORTED_LANGUAGES) {
|
||||
if (
|
||||
lowered === lang.code ||
|
||||
lowered === lang.id.toLowerCase() ||
|
||||
lowered === lang.fullName.toLowerCase()
|
||||
) {
|
||||
return lang.code;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function formatUiLanguageDisplay(lang: SupportedLanguage): string {
|
||||
const option = SUPPORTED_LANGUAGES.find((o) => o.code === lang);
|
||||
return option ? `${option.fullName}(${option.id})` : lang;
|
||||
}
|
||||
|
||||
function sanitizeLanguageForMarker(language: string): string {
|
||||
// HTML comments cannot contain "--" or end markers like "-->" or "--!>" safely.
|
||||
// Also avoid newlines to keep the marker single-line and robust to parsing.
|
||||
return language
|
||||
.replace(/[\r\n]/g, ' ')
|
||||
.replace(/--!?>/g, '')
|
||||
.replace(/--/g, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the LLM output language rule template based on the language name.
|
||||
*/
|
||||
function generateLlmOutputLanguageRule(language: string): string {
|
||||
return `# ⚠️ CRITICAL: ${language} Output Language Rule - HIGHEST PRIORITY ⚠️
|
||||
const markerLanguage = sanitizeLanguageForMarker(language);
|
||||
return `# Output language preference: ${language}
|
||||
<!-- ${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX} ${markerLanguage} -->
|
||||
|
||||
## 🚨 MANDATORY RULE - NO EXCEPTIONS 🚨
|
||||
## Goal
|
||||
Prefer responding in **${language}** for normal assistant messages and explanations.
|
||||
|
||||
**YOU MUST RESPOND IN ${language.toUpperCase()} FOR EVERY SINGLE OUTPUT, REGARDLESS OF THE USER'S INPUT LANGUAGE.**
|
||||
## Keep technical artifacts unchanged
|
||||
Do **not** translate or rewrite:
|
||||
- Code blocks, CLI commands, file paths, stack traces, logs, JSON keys, identifiers
|
||||
- Exact quoted text from the user (keep quotes verbatim)
|
||||
|
||||
This is a **NON-NEGOTIABLE** requirement. Even if the user writes in English, says "hi", asks a simple question, or explicitly requests another language, **YOU MUST ALWAYS RESPOND IN ${language.toUpperCase()}.**
|
||||
## When a conflict exists
|
||||
If higher-priority instructions (system/developer) require a different behavior, follow them.
|
||||
|
||||
## What Must Be in ${language}
|
||||
|
||||
**EVERYTHING** you output: conversation replies, tool call descriptions, success/error messages, generated file content (comments, documentation), and all explanatory text.
|
||||
|
||||
**Tool outputs**: All descriptive text from \`read_file\`, \`write_file\`, \`codebase_search\`, \`run_terminal_cmd\`, \`todo_write\`, \`web_search\`, etc. MUST be in ${language}.
|
||||
|
||||
## Examples
|
||||
|
||||
### ✅ CORRECT:
|
||||
- User says "hi" → Respond in ${language} (e.g., "Bonjour" if ${language} is French)
|
||||
- Tool result → "已成功读取文件 config.json" (if ${language} is Chinese)
|
||||
- Error → "无法找到指定的文件" (if ${language} is Chinese)
|
||||
|
||||
### ❌ WRONG:
|
||||
- User says "hi" → "Hello" in English
|
||||
- Tool result → "Successfully read file" in English
|
||||
- Error → "File not found" in English
|
||||
|
||||
## Notes
|
||||
|
||||
- Code elements (variable/function names, syntax) can remain in English
|
||||
- Comments, documentation, and all other text MUST be in ${language}
|
||||
|
||||
**THIS RULE IS ACTIVE NOW. ALL OUTPUTS MUST BE IN ${language.toUpperCase()}. NO EXCEPTIONS.**
|
||||
## Tool / system outputs
|
||||
Raw tool/system outputs may contain fixed-format English. Preserve them verbatim, and if needed, add a short **${language}** explanation below.
|
||||
`;
|
||||
}
|
||||
|
||||
@@ -73,6 +94,80 @@ function getLlmOutputLanguageRulePath(): string {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalizes a language input to its full English name.
|
||||
* If the input is a known locale code (e.g., "ru", "zh"), converts it to the full name.
|
||||
* Otherwise, returns the input as-is (e.g., "Japanese" stays "Japanese").
|
||||
*/
|
||||
function normalizeLanguageName(language: string): string {
|
||||
const lowered = language.toLowerCase();
|
||||
// Check if it's a known locale code and convert to full name
|
||||
const fullName = getLanguageNameFromLocale(lowered);
|
||||
// If getLanguageNameFromLocale returned a different value, use it
|
||||
// Otherwise, use the original input (preserves case for unknown languages)
|
||||
if (fullName !== 'English' || lowered === 'en') {
|
||||
return fullName;
|
||||
}
|
||||
return language;
|
||||
}
|
||||
|
||||
function extractLlmOutputLanguageFromRuleFileContent(
|
||||
content: string,
|
||||
): string | null {
|
||||
// Preferred: machine-readable marker that supports Unicode and spaces.
|
||||
// Example: <!-- qwen-code:llm-output-language: 中文 -->
|
||||
const markerMatch = content.match(
|
||||
new RegExp(
|
||||
String.raw`<!--\s*${LLM_OUTPUT_LANGUAGE_MARKER_PREFIX}\s*(.*?)\s*-->`,
|
||||
'i',
|
||||
),
|
||||
);
|
||||
if (markerMatch?.[1]) {
|
||||
const lang = markerMatch[1].trim();
|
||||
if (lang) return lang;
|
||||
}
|
||||
|
||||
// Backward compatibility: parse the heading line.
|
||||
// Example: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
|
||||
// Example: "# ⚠️ CRITICAL: 日本語 Output Language Rule - HIGHEST PRIORITY ⚠️"
|
||||
const headingMatch = content.match(
|
||||
/^#.*?CRITICAL:\s*(.*?)\s+Output Language Rule\b/im,
|
||||
);
|
||||
if (headingMatch?.[1]) {
|
||||
const lang = headingMatch[1].trim();
|
||||
if (lang) return lang;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the LLM output language rule file on first startup.
|
||||
* If the file already exists, it is not overwritten (respects user preference).
|
||||
*/
|
||||
export function initializeLlmOutputLanguage(): void {
|
||||
const filePath = getLlmOutputLanguageRulePath();
|
||||
|
||||
// Skip if file already exists (user preference)
|
||||
if (fs.existsSync(filePath)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Detect system language and map to language name
|
||||
const detectedLocale = detectSystemLanguage();
|
||||
const languageName = getLanguageNameFromLocale(detectedLocale);
|
||||
|
||||
// Generate the rule file
|
||||
const content = generateLlmOutputLanguageRule(languageName);
|
||||
|
||||
// Ensure directory exists
|
||||
const dir = path.dirname(filePath);
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
|
||||
// Write file
|
||||
fs.writeFileSync(filePath, content, 'utf-8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current LLM output language from the rule file if it exists.
|
||||
*/
|
||||
@@ -81,12 +176,7 @@ function getCurrentLlmOutputLanguage(): string | null {
|
||||
if (fs.existsSync(filePath)) {
|
||||
try {
|
||||
const content = fs.readFileSync(filePath, 'utf-8');
|
||||
// Extract language name from the first line
|
||||
// Template format: "# CRITICAL: Chinese Output Language Rule - HIGHEST PRIORITY"
|
||||
const match = content.match(/^#.*?(\w+)\s+Output Language Rule/i);
|
||||
if (match) {
|
||||
return match[1];
|
||||
}
|
||||
return extractLlmOutputLanguageFromRuleFileContent(content);
|
||||
} catch {
|
||||
// Ignore errors
|
||||
}
|
||||
@@ -127,18 +217,11 @@ async function setUiLanguage(
|
||||
// Reload commands to update their descriptions with the new language
|
||||
context.ui.reloadCommands();
|
||||
|
||||
// Map language codes to friendly display names
|
||||
const langDisplayNames: Partial<Record<SupportedLanguage, string>> = {
|
||||
zh: '中文(zh-CN)',
|
||||
en: 'English(en-US)',
|
||||
ru: 'Русский (ru-RU)',
|
||||
};
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: t('UI language changed to {{lang}}', {
|
||||
lang: langDisplayNames[lang] || lang,
|
||||
lang: formatUiLanguageDisplay(lang),
|
||||
}),
|
||||
};
|
||||
}
|
||||
@@ -151,7 +234,9 @@ function generateLlmOutputLanguageRuleFile(
|
||||
): Promise<MessageActionReturn> {
|
||||
try {
|
||||
const filePath = getLlmOutputLanguageRulePath();
|
||||
const content = generateLlmOutputLanguageRule(language);
|
||||
// Normalize locale codes (e.g., "ru" -> "Russian") to full language names
|
||||
const normalizedLanguage = normalizeLanguageName(language);
|
||||
const content = generateLlmOutputLanguageRule(normalizedLanguage);
|
||||
|
||||
// Ensure directory exists
|
||||
const dir = path.dirname(filePath);
|
||||
@@ -196,7 +281,6 @@ export const languageCommand: SlashCommand = {
|
||||
args: string,
|
||||
): Promise<SlashCommandActionReturn> => {
|
||||
const { services } = context;
|
||||
|
||||
if (!services.config) {
|
||||
return {
|
||||
type: 'message',
|
||||
@@ -207,18 +291,37 @@ export const languageCommand: SlashCommand = {
|
||||
|
||||
const trimmedArgs = args.trim();
|
||||
|
||||
// Handle subcommands if called directly via action (for tests/backward compatibility)
|
||||
const parts = trimmedArgs.split(/\s+/);
|
||||
const firstArg = parts[0].toLowerCase();
|
||||
const subArgs = parts.slice(1).join(' ');
|
||||
|
||||
if (firstArg === 'ui' || firstArg === 'output') {
|
||||
const subCommand = languageCommand.subCommands?.find(
|
||||
(s) => s.name === firstArg,
|
||||
);
|
||||
if (subCommand?.action) {
|
||||
return subCommand.action(
|
||||
context,
|
||||
subArgs,
|
||||
) as Promise<SlashCommandActionReturn>;
|
||||
}
|
||||
}
|
||||
|
||||
// If no arguments, show current language settings and usage
|
||||
if (!trimmedArgs) {
|
||||
const currentUiLang = getCurrentLanguage();
|
||||
const currentLlmLang = getCurrentLlmOutputLanguage();
|
||||
const message = [
|
||||
t('Current UI language: {{lang}}', { lang: currentUiLang }),
|
||||
t('Current UI language: {{lang}}', {
|
||||
lang: formatUiLanguageDisplay(currentUiLang as SupportedLanguage),
|
||||
}),
|
||||
currentLlmLang
|
||||
? t('Current LLM output language: {{lang}}', { lang: currentLlmLang })
|
||||
: t('LLM output language not set'),
|
||||
'',
|
||||
t('Available subcommands:'),
|
||||
` /language ui [zh-CN|en-US|ru-RU] - ${t('Set UI language')}`,
|
||||
` /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
|
||||
` /language output <language> - ${t('Set LLM output language')}`,
|
||||
].join('\n');
|
||||
|
||||
@@ -229,115 +332,21 @@ export const languageCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
// Parse subcommand
|
||||
const parts = trimmedArgs.split(/\s+/);
|
||||
const subcommand = parts[0].toLowerCase();
|
||||
|
||||
if (subcommand === 'ui') {
|
||||
// Handle /language ui [zh-CN|en-US|ru-RU]
|
||||
if (parts.length === 1) {
|
||||
// Show UI language subcommand help
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: [
|
||||
t('Set UI language'),
|
||||
'',
|
||||
t('Usage: /language ui [zh-CN|en-US|ru-RU]'),
|
||||
'',
|
||||
t('Available options:'),
|
||||
t(' - zh-CN: Simplified Chinese'),
|
||||
t(' - en-US: English'),
|
||||
t(' - ru-RU: Russian'),
|
||||
'',
|
||||
t(
|
||||
'To request additional UI language packs, please open an issue on GitHub.',
|
||||
),
|
||||
].join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
const langArg = parts[1].toLowerCase();
|
||||
let targetLang: SupportedLanguage | null = null;
|
||||
|
||||
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
|
||||
targetLang = 'en';
|
||||
} else if (
|
||||
langArg === 'zh' ||
|
||||
langArg === 'chinese' ||
|
||||
langArg === '中文' ||
|
||||
langArg === 'zh-cn'
|
||||
) {
|
||||
targetLang = 'zh';
|
||||
} else if (
|
||||
langArg === 'ru' ||
|
||||
langArg === 'ru-RU' ||
|
||||
langArg === 'russian' ||
|
||||
langArg === 'русский'
|
||||
) {
|
||||
targetLang = 'ru';
|
||||
} else {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Invalid language. Available: en-US, zh-CN, ru-RU'),
|
||||
};
|
||||
}
|
||||
|
||||
return setUiLanguage(context, targetLang);
|
||||
} else if (subcommand === 'output') {
|
||||
// Handle /language output <language>
|
||||
if (parts.length === 1) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: [
|
||||
t('Set LLM output language'),
|
||||
'',
|
||||
t('Usage: /language output <language>'),
|
||||
` ${t('Example: /language output 中文')}`,
|
||||
].join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
// Join all parts after "output" as the language name
|
||||
const language = parts.slice(1).join(' ');
|
||||
return generateLlmOutputLanguageRuleFile(language);
|
||||
} else {
|
||||
// Backward compatibility: treat as UI language
|
||||
const langArg = trimmedArgs.toLowerCase();
|
||||
let targetLang: SupportedLanguage | null = null;
|
||||
|
||||
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
|
||||
targetLang = 'en';
|
||||
} else if (
|
||||
langArg === 'zh' ||
|
||||
langArg === 'chinese' ||
|
||||
langArg === '中文' ||
|
||||
langArg === 'zh-cn'
|
||||
) {
|
||||
targetLang = 'zh';
|
||||
} else if (
|
||||
langArg === 'ru' ||
|
||||
langArg === 'ru-RU' ||
|
||||
langArg === 'russian' ||
|
||||
langArg === 'русский'
|
||||
) {
|
||||
targetLang = 'ru';
|
||||
} else {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: [
|
||||
t('Invalid command. Available subcommands:'),
|
||||
' - /language ui [zh-CN|en-US|ru-RU] - ' + t('Set UI language'),
|
||||
' - /language output <language> - ' + t('Set LLM output language'),
|
||||
].join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
// Handle backward compatibility for /language [lang]
|
||||
const targetLang = parseUiLanguageArg(trimmedArgs);
|
||||
if (targetLang) {
|
||||
return setUiLanguage(context, targetLang);
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: [
|
||||
t('Invalid command. Available subcommands:'),
|
||||
` - /language ui [${SUPPORTED_LANGUAGES.map((o) => o.id).join('|')}] - ${t('Set UI language')}`,
|
||||
' - /language output <language> - ' + t('Set LLM output language'),
|
||||
].join('\n'),
|
||||
};
|
||||
},
|
||||
subCommands: [
|
||||
{
|
||||
@@ -358,11 +367,14 @@ export const languageCommand: SlashCommand = {
|
||||
content: [
|
||||
t('Set UI language'),
|
||||
'',
|
||||
t('Usage: /language ui [zh-CN|en-US]'),
|
||||
t('Usage: /language ui [{{options}}]', {
|
||||
options: SUPPORTED_LANGUAGES.map((o) => o.id).join('|'),
|
||||
}),
|
||||
'',
|
||||
t('Available options:'),
|
||||
t(' - zh-CN: Simplified Chinese'),
|
||||
t(' - en-US: English'),
|
||||
...SUPPORTED_LANGUAGES.map(
|
||||
(o) => ` - ${o.id}: ${t(o.fullName)}`,
|
||||
),
|
||||
'',
|
||||
t(
|
||||
'To request additional UI language packs, please open an issue on GitHub.',
|
||||
@@ -371,99 +383,20 @@ export const languageCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
const langArg = trimmedArgs.toLowerCase();
|
||||
let targetLang: SupportedLanguage | null = null;
|
||||
|
||||
if (langArg === 'en' || langArg === 'english' || langArg === 'en-us') {
|
||||
targetLang = 'en';
|
||||
} else if (
|
||||
langArg === 'zh' ||
|
||||
langArg === 'chinese' ||
|
||||
langArg === '中文' ||
|
||||
langArg === 'zh-cn'
|
||||
) {
|
||||
targetLang = 'zh';
|
||||
} else {
|
||||
const targetLang = parseUiLanguageArg(trimmedArgs);
|
||||
if (!targetLang) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Invalid language. Available: en-US, zh-CN'),
|
||||
content: t('Invalid language. Available: {{options}}', {
|
||||
options: SUPPORTED_LANGUAGES.map((o) => o.id).join(','),
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
return setUiLanguage(context, targetLang);
|
||||
},
|
||||
subCommands: [
|
||||
{
|
||||
name: 'zh-CN',
|
||||
altNames: ['zh', 'chinese', '中文'],
|
||||
get description() {
|
||||
return t('Set UI language to Simplified Chinese (zh-CN)');
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
args: string,
|
||||
): Promise<MessageActionReturn> => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, 'zh');
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'en-US',
|
||||
altNames: ['en', 'english'],
|
||||
get description() {
|
||||
return t('Set UI language to English (en-US)');
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
args: string,
|
||||
): Promise<MessageActionReturn> => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, 'en');
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'ru-RU',
|
||||
altNames: ['ru', 'russian', 'русский'],
|
||||
get description() {
|
||||
return t('Set UI language to Russian (ru-RU)');
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
args: string,
|
||||
): Promise<MessageActionReturn> => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, 'ru');
|
||||
},
|
||||
},
|
||||
],
|
||||
subCommands: SUPPORTED_LANGUAGES.map(createUiLanguageSubCommand),
|
||||
},
|
||||
{
|
||||
name: 'output',
|
||||
@@ -496,3 +429,28 @@ export const languageCommand: SlashCommand = {
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper to create a UI language subcommand.
|
||||
*/
|
||||
function createUiLanguageSubCommand(option: LanguageDefinition): SlashCommand {
|
||||
return {
|
||||
name: option.id,
|
||||
get description() {
|
||||
return t('Set UI language to {{name}}', { name: option.fullName });
|
||||
},
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context, args) => {
|
||||
if (args.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t(
|
||||
'Language subcommands do not accept additional arguments.',
|
||||
),
|
||||
};
|
||||
}
|
||||
return setUiLanguage(context, option.code);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import { AuthDialog } from '../auth/AuthDialog.js';
|
||||
import { OpenAIKeyPrompt } from './OpenAIKeyPrompt.js';
|
||||
import { EditorSettingsDialog } from './EditorSettingsDialog.js';
|
||||
import { WorkspaceMigrationDialog } from './WorkspaceMigrationDialog.js';
|
||||
import { ProQuotaDialog } from './ProQuotaDialog.js';
|
||||
import { PermissionsModifyTrustDialog } from './PermissionsModifyTrustDialog.js';
|
||||
import { ModelDialog } from './ModelDialog.js';
|
||||
import { ApprovalModeDialog } from './ApprovalModeDialog.js';
|
||||
@@ -86,6 +87,15 @@ export const DialogManager = ({
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.proQuotaRequest) {
|
||||
return (
|
||||
<ProQuotaDialog
|
||||
failedModel={uiState.proQuotaRequest.failedModel}
|
||||
fallbackModel={uiState.proQuotaRequest.fallbackModel}
|
||||
onChoice={uiActions.handleProQuotaChoice}
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (uiState.shouldShowIdePrompt) {
|
||||
return (
|
||||
<IdeIntegrationNudge
|
||||
|
||||
91
packages/cli/src/ui/components/ProQuotaDialog.test.tsx
Normal file
91
packages/cli/src/ui/components/ProQuotaDialog.test.tsx
Normal file
@@ -0,0 +1,91 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { render } from 'ink-testing-library';
|
||||
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
|
||||
import { ProQuotaDialog } from './ProQuotaDialog.js';
|
||||
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
|
||||
|
||||
// Mock the child component to make it easier to test the parent
|
||||
vi.mock('./shared/RadioButtonSelect.js', () => ({
|
||||
RadioButtonSelect: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('ProQuotaDialog', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should render with correct title and options', () => {
|
||||
const { lastFrame } = render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={() => {}}
|
||||
/>,
|
||||
);
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Pro quota limit reached for gemini-2.5-pro.');
|
||||
|
||||
// Check that RadioButtonSelect was called with the correct items
|
||||
expect(RadioButtonSelect).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
items: [
|
||||
{
|
||||
label: 'Change auth (executes the /auth command)',
|
||||
value: 'auth',
|
||||
key: 'auth',
|
||||
},
|
||||
{
|
||||
label: `Continue with gemini-2.5-flash`,
|
||||
value: 'continue',
|
||||
key: 'continue',
|
||||
},
|
||||
],
|
||||
}),
|
||||
undefined,
|
||||
);
|
||||
});
|
||||
|
||||
it('should call onChoice with "auth" when "Change auth" is selected', () => {
|
||||
const mockOnChoice = vi.fn();
|
||||
render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={mockOnChoice}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Get the onSelect function passed to RadioButtonSelect
|
||||
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
|
||||
|
||||
// Simulate the selection
|
||||
onSelect('auth');
|
||||
|
||||
expect(mockOnChoice).toHaveBeenCalledWith('auth');
|
||||
});
|
||||
|
||||
it('should call onChoice with "continue" when "Continue with flash" is selected', () => {
|
||||
const mockOnChoice = vi.fn();
|
||||
render(
|
||||
<ProQuotaDialog
|
||||
failedModel="gemini-2.5-pro"
|
||||
fallbackModel="gemini-2.5-flash"
|
||||
onChoice={mockOnChoice}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Get the onSelect function passed to RadioButtonSelect
|
||||
const onSelect = (RadioButtonSelect as Mock).mock.calls[0][0].onSelect;
|
||||
|
||||
// Simulate the selection
|
||||
onSelect('continue');
|
||||
|
||||
expect(mockOnChoice).toHaveBeenCalledWith('continue');
|
||||
});
|
||||
});
|
||||
55
packages/cli/src/ui/components/ProQuotaDialog.tsx
Normal file
55
packages/cli/src/ui/components/ProQuotaDialog.tsx
Normal file
@@ -0,0 +1,55 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type React from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { RadioButtonSelect } from './shared/RadioButtonSelect.js';
|
||||
import { theme } from '../semantic-colors.js';
|
||||
import { t } from '../../i18n/index.js';
|
||||
|
||||
interface ProQuotaDialogProps {
|
||||
failedModel: string;
|
||||
fallbackModel: string;
|
||||
onChoice: (choice: 'auth' | 'continue') => void;
|
||||
}
|
||||
|
||||
export function ProQuotaDialog({
|
||||
failedModel,
|
||||
fallbackModel,
|
||||
onChoice,
|
||||
}: ProQuotaDialogProps): React.JSX.Element {
|
||||
const items = [
|
||||
{
|
||||
label: t('Change auth (executes the /auth command)'),
|
||||
value: 'auth' as const,
|
||||
key: 'auth',
|
||||
},
|
||||
{
|
||||
label: t('Continue with {{model}}', { model: fallbackModel }),
|
||||
value: 'continue' as const,
|
||||
key: 'continue',
|
||||
},
|
||||
];
|
||||
|
||||
const handleSelect = (choice: 'auth' | 'continue') => {
|
||||
onChoice(choice);
|
||||
};
|
||||
|
||||
return (
|
||||
<Box borderStyle="round" flexDirection="column" paddingX={1}>
|
||||
<Text bold color={theme.status.warning}>
|
||||
{t('Pro quota limit reached for {{model}}.', { model: failedModel })}
|
||||
</Text>
|
||||
<Box marginTop={1}>
|
||||
<RadioButtonSelect
|
||||
items={items}
|
||||
initialIndex={1}
|
||||
onSelect={handleSelect}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -55,6 +55,7 @@ export interface UIActions {
|
||||
handleClearScreen: () => void;
|
||||
onWorkspaceMigrationDialogOpen: () => void;
|
||||
onWorkspaceMigrationDialogClose: () => void;
|
||||
handleProQuotaChoice: (choice: 'auth' | 'continue') => void;
|
||||
// Vision switch dialog
|
||||
handleVisionSwitchSelect: (outcome: VisionSwitchOutcome) => void;
|
||||
// Welcome back dialog
|
||||
|
||||
@@ -22,13 +22,21 @@ import type {
|
||||
AuthType,
|
||||
IdeContext,
|
||||
ApprovalMode,
|
||||
UserTierId,
|
||||
IdeInfo,
|
||||
FallbackIntent,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { DOMElement } from 'ink';
|
||||
import type { SessionStatsState } from '../contexts/SessionContext.js';
|
||||
import type { ExtensionUpdateState } from '../state/extensions.js';
|
||||
import type { UpdateObject } from '../utils/updateCheck.js';
|
||||
|
||||
export interface ProQuotaDialogRequest {
|
||||
failedModel: string;
|
||||
fallbackModel: string;
|
||||
resolve: (intent: FallbackIntent) => void;
|
||||
}
|
||||
|
||||
import { type UseHistoryManagerReturn } from '../hooks/useHistoryManager.js';
|
||||
import { type RestartReason } from '../hooks/useIdeTrustListener.js';
|
||||
|
||||
@@ -91,6 +99,8 @@ export interface UIState {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
workspaceExtensions: any[]; // Extension[]
|
||||
// Quota-related state
|
||||
userTier: UserTierId | undefined;
|
||||
proQuotaRequest: ProQuotaDialogRequest | null;
|
||||
currentModel: string;
|
||||
contextFileNames: string[];
|
||||
errorCount: number;
|
||||
|
||||
@@ -1323,7 +1323,7 @@ describe('useGeminiStream', () => {
|
||||
it('should call parseAndFormatApiError with the correct authType on stream initialization failure', async () => {
|
||||
// 1. Setup
|
||||
const mockError = new Error('Rate limit exceeded');
|
||||
const mockAuthType = AuthType.USE_VERTEX_AI;
|
||||
const mockAuthType = AuthType.LOGIN_WITH_GOOGLE;
|
||||
mockParseAndFormatApiError.mockClear();
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
@@ -1374,6 +1374,9 @@ describe('useGeminiStream', () => {
|
||||
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
|
||||
'Rate limit exceeded',
|
||||
mockAuthType,
|
||||
undefined,
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -2490,6 +2493,9 @@ describe('useGeminiStream', () => {
|
||||
expect(mockParseAndFormatApiError).toHaveBeenCalledWith(
|
||||
{ message: 'Test error' },
|
||||
expect.any(String),
|
||||
undefined,
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
GitService,
|
||||
UnauthorizedError,
|
||||
UserPromptEvent,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
logConversationFinishedEvent,
|
||||
ConversationFinishedEvent,
|
||||
ApprovalMode,
|
||||
@@ -526,15 +527,10 @@ export const useGeminiStream = (
|
||||
return currentThoughtBuffer;
|
||||
}
|
||||
|
||||
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
|
||||
const pendingType = pendingHistoryItemRef.current?.type;
|
||||
const isPendingThought =
|
||||
pendingType === 'gemini_thought' ||
|
||||
pendingType === 'gemini_thought_content';
|
||||
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
|
||||
// If we're not already showing a thought, start a new one
|
||||
if (!isPendingThought) {
|
||||
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
|
||||
// If there's a pending non-thought item, finalize it first
|
||||
if (pendingHistoryItemRef.current) {
|
||||
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
|
||||
@@ -542,37 +538,11 @@ export const useGeminiStream = (
|
||||
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
|
||||
}
|
||||
|
||||
// Split large thought messages for better rendering performance (same rationale
|
||||
// as regular content streaming). This helps avoid terminal flicker caused by
|
||||
// constantly re-rendering an ever-growing "pending" block.
|
||||
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
|
||||
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
|
||||
isPendingThought && pendingType === 'gemini_thought_content'
|
||||
? 'gemini_thought_content'
|
||||
: 'gemini_thought';
|
||||
|
||||
if (splitPoint === newThoughtBuffer.length) {
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: nextPendingType,
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
} else {
|
||||
const beforeText = newThoughtBuffer.substring(0, splitPoint);
|
||||
const afterText = newThoughtBuffer.substring(splitPoint);
|
||||
addItem(
|
||||
{
|
||||
type: nextPendingType,
|
||||
text: beforeText,
|
||||
},
|
||||
userMessageTimestamp,
|
||||
);
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought_content',
|
||||
text: afterText,
|
||||
});
|
||||
newThoughtBuffer = afterText;
|
||||
}
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought',
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
|
||||
// Also update the thought state for the loading indicator
|
||||
mergeThought(eventValue);
|
||||
@@ -630,6 +600,9 @@ export const useGeminiStream = (
|
||||
text: parseAndFormatApiError(
|
||||
eventValue.error,
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
),
|
||||
},
|
||||
userMessageTimestamp,
|
||||
@@ -681,9 +654,6 @@ export const useGeminiStream = (
|
||||
'Response stopped due to image safety violations.',
|
||||
[FinishReason.UNEXPECTED_TOOL_CALL]:
|
||||
'Response stopped due to unexpected tool call.',
|
||||
[FinishReason.IMAGE_PROHIBITED_CONTENT]:
|
||||
'Response stopped due to image prohibited content.',
|
||||
[FinishReason.NO_IMAGE]: 'Response stopped due to no image.',
|
||||
};
|
||||
|
||||
const message = finishReasonMessages[finishReason];
|
||||
@@ -800,17 +770,11 @@ export const useGeminiStream = (
|
||||
for await (const event of stream) {
|
||||
switch (event.type) {
|
||||
case ServerGeminiEventType.Thought:
|
||||
// If the thought has a subject, it's a discrete status update rather than
|
||||
// a streamed textual thought, so we update the thought state directly.
|
||||
if (event.value.subject) {
|
||||
setThought(event.value);
|
||||
} else {
|
||||
thoughtBuffer = handleThoughtEvent(
|
||||
event.value,
|
||||
thoughtBuffer,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
}
|
||||
thoughtBuffer = handleThoughtEvent(
|
||||
event.value,
|
||||
thoughtBuffer,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
break;
|
||||
case ServerGeminiEventType.Content:
|
||||
geminiMessageBuffer = handleContentEvent(
|
||||
@@ -881,7 +845,6 @@ export const useGeminiStream = (
|
||||
handleMaxSessionTurnsEvent,
|
||||
handleSessionTokenLimitExceededEvent,
|
||||
handleCitationEvent,
|
||||
setThought,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -1024,6 +987,9 @@ export const useGeminiStream = (
|
||||
text: parseAndFormatApiError(
|
||||
getErrorMessage(error) || 'Unknown error',
|
||||
config.getContentGeneratorConfig()?.authType,
|
||||
undefined,
|
||||
config.getModel(),
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
),
|
||||
},
|
||||
userMessageTimestamp,
|
||||
|
||||
@@ -8,19 +8,22 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { renderHook, act } from '@testing-library/react';
|
||||
import { useLoadingIndicator } from './useLoadingIndicator.js';
|
||||
import { StreamingState } from '../types.js';
|
||||
import {
|
||||
WITTY_LOADING_PHRASES,
|
||||
PHRASE_CHANGE_INTERVAL_MS,
|
||||
} from './usePhraseCycler.js';
|
||||
import { PHRASE_CHANGE_INTERVAL_MS } from './usePhraseCycler.js';
|
||||
import * as i18n from '../../i18n/index.js';
|
||||
|
||||
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
|
||||
|
||||
describe('useLoadingIndicator', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
|
||||
vi.spyOn(i18n, 't').mockImplementation((key) => key);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers(); // Restore real timers after each test
|
||||
act(() => vi.runOnlyPendingTimers);
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should initialize with default values when Idle', () => {
|
||||
@@ -28,9 +31,7 @@ describe('useLoadingIndicator', () => {
|
||||
useLoadingIndicator(StreamingState.Idle),
|
||||
);
|
||||
expect(result.current.elapsedTime).toBe(0);
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
});
|
||||
|
||||
it('should reflect values when Responding', async () => {
|
||||
@@ -40,18 +41,14 @@ describe('useLoadingIndicator', () => {
|
||||
|
||||
// Initial state before timers advance
|
||||
expect(result.current.elapsedTime).toBe(0);
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
|
||||
await act(async () => {
|
||||
await vi.advanceTimersByTimeAsync(PHRASE_CHANGE_INTERVAL_MS + 1);
|
||||
});
|
||||
|
||||
// Phrase should cycle if PHRASE_CHANGE_INTERVAL_MS has passed
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
});
|
||||
|
||||
it('should show waiting phrase and retain elapsedTime when WaitingForConfirmation', async () => {
|
||||
@@ -104,9 +101,7 @@ describe('useLoadingIndicator', () => {
|
||||
rerender({ streamingState: StreamingState.Responding });
|
||||
});
|
||||
expect(result.current.elapsedTime).toBe(0); // Should reset
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
|
||||
await act(async () => {
|
||||
await vi.advanceTimersByTimeAsync(1000);
|
||||
@@ -130,9 +125,7 @@ describe('useLoadingIndicator', () => {
|
||||
});
|
||||
|
||||
expect(result.current.elapsedTime).toBe(0);
|
||||
expect(WITTY_LOADING_PHRASES).toContain(
|
||||
result.current.currentLoadingPhrase,
|
||||
);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current.currentLoadingPhrase);
|
||||
|
||||
// Timer should not advance
|
||||
await act(async () => {
|
||||
|
||||
@@ -8,13 +8,17 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { renderHook, act } from '@testing-library/react';
|
||||
import {
|
||||
usePhraseCycler,
|
||||
WITTY_LOADING_PHRASES,
|
||||
PHRASE_CHANGE_INTERVAL_MS,
|
||||
} from './usePhraseCycler.js';
|
||||
import * as i18n from '../../i18n/index.js';
|
||||
|
||||
const MOCK_WITTY_PHRASES = ['Phrase 1', 'Phrase 2', 'Phrase 3'];
|
||||
|
||||
describe('usePhraseCycler', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
vi.spyOn(i18n, 'ta').mockReturnValue(MOCK_WITTY_PHRASES);
|
||||
vi.spyOn(i18n, 't').mockImplementation((key) => key);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -23,7 +27,7 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
it('should initialize with a witty phrase when not active and not waiting', () => {
|
||||
const { result } = renderHook(() => usePhraseCycler(false, false));
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should show "Waiting for user confirmation..." when isWaiting is true', () => {
|
||||
@@ -47,35 +51,30 @@ describe('usePhraseCycler', () => {
|
||||
it('should cycle through witty phrases when isActive is true and not waiting', () => {
|
||||
const { result } = renderHook(() => usePhraseCycler(true, false));
|
||||
// Initial phrase should be one of the witty phrases
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
const _initialPhrase = result.current;
|
||||
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
});
|
||||
// Phrase should change and be one of the witty phrases
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
|
||||
const _secondPhrase = result.current;
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
});
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should reset to a witty phrase when isActive becomes true after being false (and not waiting)', () => {
|
||||
// Ensure there are at least two phrases for this test to be meaningful.
|
||||
if (WITTY_LOADING_PHRASES.length < 2) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Mock Math.random to make the test deterministic.
|
||||
let callCount = 0;
|
||||
vi.spyOn(Math, 'random').mockImplementation(() => {
|
||||
// Cycle through 0, 1, 0, 1, ...
|
||||
const val = callCount % 2;
|
||||
callCount++;
|
||||
return val / WITTY_LOADING_PHRASES.length;
|
||||
return val / MOCK_WITTY_PHRASES.length;
|
||||
});
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
@@ -86,9 +85,9 @@ describe('usePhraseCycler', () => {
|
||||
// Activate
|
||||
rerender({ isActive: true, isWaiting: false });
|
||||
const firstActivePhrase = result.current;
|
||||
expect(WITTY_LOADING_PHRASES).toContain(firstActivePhrase);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(firstActivePhrase);
|
||||
// With our mock, this should be the first phrase.
|
||||
expect(firstActivePhrase).toBe(WITTY_LOADING_PHRASES[0]);
|
||||
expect(firstActivePhrase).toBe(MOCK_WITTY_PHRASES[0]);
|
||||
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
@@ -96,18 +95,18 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
// Phrase should change to the second phrase.
|
||||
expect(result.current).not.toBe(firstActivePhrase);
|
||||
expect(result.current).toBe(WITTY_LOADING_PHRASES[1]);
|
||||
expect(result.current).toBe(MOCK_WITTY_PHRASES[1]);
|
||||
|
||||
// Set to inactive - should reset to the default initial phrase
|
||||
rerender({ isActive: false, isWaiting: false });
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
|
||||
// Set back to active - should pick a random witty phrase (which our mock controls)
|
||||
act(() => {
|
||||
rerender({ isActive: true, isWaiting: false });
|
||||
});
|
||||
// The random mock will now return 0, so it should be the first phrase again.
|
||||
expect(result.current).toBe(WITTY_LOADING_PHRASES[0]);
|
||||
expect(result.current).toBe(MOCK_WITTY_PHRASES[0]);
|
||||
});
|
||||
|
||||
it('should clear phrase interval on unmount when active', () => {
|
||||
@@ -148,7 +147,7 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
rerender({ isActive: true, isWaiting: false, customPhrases: undefined });
|
||||
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should fall back to witty phrases if custom phrases are an empty array', () => {
|
||||
@@ -164,7 +163,7 @@ describe('usePhraseCycler', () => {
|
||||
},
|
||||
);
|
||||
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
|
||||
it('should reset to a witty phrase when transitioning from waiting to active', () => {
|
||||
@@ -174,16 +173,13 @@ describe('usePhraseCycler', () => {
|
||||
);
|
||||
|
||||
const _initialPhrase = result.current;
|
||||
expect(WITTY_LOADING_PHRASES).toContain(_initialPhrase);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(_initialPhrase);
|
||||
|
||||
// Cycle to a different phrase (potentially)
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(PHRASE_CHANGE_INTERVAL_MS);
|
||||
});
|
||||
if (WITTY_LOADING_PHRASES.length > 1) {
|
||||
// This check is probabilistic with random selection
|
||||
}
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
|
||||
// Go to waiting state
|
||||
rerender({ isActive: false, isWaiting: true });
|
||||
@@ -191,6 +187,6 @@ describe('usePhraseCycler', () => {
|
||||
|
||||
// Go back to active cycling - should pick a random witty phrase
|
||||
rerender({ isActive: true, isWaiting: false });
|
||||
expect(WITTY_LOADING_PHRASES).toContain(result.current);
|
||||
expect(MOCK_WITTY_PHRASES).toContain(result.current);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -5,139 +5,9 @@
|
||||
*/
|
||||
|
||||
import { useState, useEffect, useRef, useMemo } from 'react';
|
||||
import { t } from '../../i18n/index.js';
|
||||
import { t, ta } from '../../i18n/index.js';
|
||||
|
||||
export const WITTY_LOADING_PHRASES = [
|
||||
"I'm Feeling Lucky",
|
||||
'Shipping awesomeness... ',
|
||||
'Painting the serifs back on...',
|
||||
'Navigating the slime mold...',
|
||||
'Consulting the digital spirits...',
|
||||
'Reticulating splines...',
|
||||
'Warming up the AI hamsters...',
|
||||
'Asking the magic conch shell...',
|
||||
'Generating witty retort...',
|
||||
'Polishing the algorithms...',
|
||||
"Don't rush perfection (or my code)...",
|
||||
'Brewing fresh bytes...',
|
||||
'Counting electrons...',
|
||||
'Engaging cognitive processors...',
|
||||
'Checking for syntax errors in the universe...',
|
||||
'One moment, optimizing humor...',
|
||||
'Shuffling punchlines...',
|
||||
'Untangling neural nets...',
|
||||
'Compiling brilliance...',
|
||||
'Loading wit.exe...',
|
||||
'Summoning the cloud of wisdom...',
|
||||
'Preparing a witty response...',
|
||||
"Just a sec, I'm debugging reality...",
|
||||
'Confuzzling the options...',
|
||||
'Tuning the cosmic frequencies...',
|
||||
'Crafting a response worthy of your patience...',
|
||||
'Compiling the 1s and 0s...',
|
||||
'Resolving dependencies... and existential crises...',
|
||||
'Defragmenting memories... both RAM and personal...',
|
||||
'Rebooting the humor module...',
|
||||
'Caching the essentials (mostly cat memes)...',
|
||||
'Optimizing for ludicrous speed',
|
||||
"Swapping bits... don't tell the bytes...",
|
||||
'Garbage collecting... be right back...',
|
||||
'Assembling the interwebs...',
|
||||
'Converting coffee into code...',
|
||||
'Updating the syntax for reality...',
|
||||
'Rewiring the synapses...',
|
||||
'Looking for a misplaced semicolon...',
|
||||
"Greasin' the cogs of the machine...",
|
||||
'Pre-heating the servers...',
|
||||
'Calibrating the flux capacitor...',
|
||||
'Engaging the improbability drive...',
|
||||
'Channeling the Force...',
|
||||
'Aligning the stars for optimal response...',
|
||||
'So say we all...',
|
||||
'Loading the next great idea...',
|
||||
"Just a moment, I'm in the zone...",
|
||||
'Preparing to dazzle you with brilliance...',
|
||||
"Just a tick, I'm polishing my wit...",
|
||||
"Hold tight, I'm crafting a masterpiece...",
|
||||
"Just a jiffy, I'm debugging the universe...",
|
||||
"Just a moment, I'm aligning the pixels...",
|
||||
"Just a sec, I'm optimizing the humor...",
|
||||
"Just a moment, I'm tuning the algorithms...",
|
||||
'Warp speed engaged...',
|
||||
'Mining for more Dilithium crystals...',
|
||||
"Don't panic...",
|
||||
'Following the white rabbit...',
|
||||
'The truth is in here... somewhere...',
|
||||
'Blowing on the cartridge...',
|
||||
'Loading... Do a barrel roll!',
|
||||
'Waiting for the respawn...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...',
|
||||
"The cake is not a lie, it's just still loading...",
|
||||
'Fiddling with the character creation screen...',
|
||||
"Just a moment, I'm finding the right meme...",
|
||||
"Pressing 'A' to continue...",
|
||||
'Herding digital cats...',
|
||||
'Polishing the pixels...',
|
||||
'Finding a suitable loading screen pun...',
|
||||
'Distracting you with this witty phrase...',
|
||||
'Almost there... probably...',
|
||||
'Our hamsters are working as fast as they can...',
|
||||
'Giving Cloudy a pat on the head...',
|
||||
'Petting the cat...',
|
||||
'Rickrolling my boss...',
|
||||
'Never gonna give you up, never gonna let you down...',
|
||||
'Slapping the bass...',
|
||||
'Tasting the snozberries...',
|
||||
"I'm going the distance, I'm going for speed...",
|
||||
'Is this the real life? Is this just fantasy?...',
|
||||
"I've got a good feeling about this...",
|
||||
'Poking the bear...',
|
||||
'Doing research on the latest memes...',
|
||||
'Figuring out how to make this more witty...',
|
||||
'Hmmm... let me think...',
|
||||
'What do you call a fish with no eyes? A fsh...',
|
||||
'Why did the computer go to therapy? It had too many bytes...',
|
||||
"Why don't programmers like nature? It has too many bugs...",
|
||||
'Why do programmers prefer dark mode? Because light attracts bugs...',
|
||||
'Why did the developer go broke? Because they used up all their cache...',
|
||||
"What can you do with a broken pencil? Nothing, it's pointless...",
|
||||
'Applying percussive maintenance...',
|
||||
'Searching for the correct USB orientation...',
|
||||
'Ensuring the magic smoke stays inside the wires...',
|
||||
'Rewriting in Rust for no particular reason...',
|
||||
'Trying to exit Vim...',
|
||||
'Spinning up the hamster wheel...',
|
||||
"That's not a bug, it's an undocumented feature...",
|
||||
'Engage.',
|
||||
"I'll be back... with an answer.",
|
||||
'My other process is a TARDIS...',
|
||||
'Communing with the machine spirit...',
|
||||
'Letting the thoughts marinate...',
|
||||
'Just remembered where I put my keys...',
|
||||
'Pondering the orb...',
|
||||
"I've seen things you people wouldn't believe... like a user who reads loading messages.",
|
||||
'Initiating thoughtful gaze...',
|
||||
"What's a computer's favorite snack? Microchips.",
|
||||
"Why do Java developers wear glasses? Because they don't C#.",
|
||||
'Charging the laser... pew pew!',
|
||||
'Dividing by zero... just kidding!',
|
||||
'Looking for an adult superviso... I mean, processing.',
|
||||
'Making it go beep boop.',
|
||||
'Buffering... because even AIs need a moment.',
|
||||
'Entangling quantum particles for a faster response...',
|
||||
'Polishing the chrome... on the algorithms.',
|
||||
'Are you not entertained? (Working on it!)',
|
||||
'Summoning the code gremlins... to help, of course.',
|
||||
'Just waiting for the dial-up tone to finish...',
|
||||
'Recalibrating the humor-o-meter.',
|
||||
'My other loading screen is even funnier.',
|
||||
"Pretty sure there's a cat walking on the keyboard somewhere...",
|
||||
'Enhancing... Enhancing... Still loading.',
|
||||
"It's not a bug, it's a feature... of this loading screen.",
|
||||
'Have you tried turning it off and on again? (The loading screen, not me.)',
|
||||
'Constructing additional pylons...',
|
||||
'New line? That’s Ctrl+J.',
|
||||
];
|
||||
export const WITTY_LOADING_PHRASES: string[] = ["I'm Feeling Lucky"];
|
||||
|
||||
export const PHRASE_CHANGE_INTERVAL_MS = 15000;
|
||||
|
||||
@@ -152,14 +22,16 @@ export const usePhraseCycler = (
|
||||
isWaiting: boolean,
|
||||
customPhrases?: string[],
|
||||
) => {
|
||||
// Translate all phrases at once if using default phrases
|
||||
const loadingPhrases = useMemo(
|
||||
() =>
|
||||
customPhrases && customPhrases.length > 0
|
||||
? customPhrases
|
||||
: WITTY_LOADING_PHRASES.map((phrase) => t(phrase)),
|
||||
[customPhrases],
|
||||
);
|
||||
// Get phrases from translations if available
|
||||
const loadingPhrases = useMemo(() => {
|
||||
if (customPhrases && customPhrases.length > 0) {
|
||||
return customPhrases;
|
||||
}
|
||||
const translatedPhrases = ta('WITTY_LOADING_PHRASES');
|
||||
return translatedPhrases.length > 0
|
||||
? translatedPhrases
|
||||
: WITTY_LOADING_PHRASES;
|
||||
}, [customPhrases]);
|
||||
|
||||
const [currentLoadingPhrase, setCurrentLoadingPhrase] = useState(
|
||||
loadingPhrases[0],
|
||||
|
||||
391
packages/cli/src/ui/hooks/useQuotaAndFallback.test.ts
Normal file
391
packages/cli/src/ui/hooks/useQuotaAndFallback.test.ts
Normal file
@@ -0,0 +1,391 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
vi,
|
||||
describe,
|
||||
it,
|
||||
expect,
|
||||
beforeEach,
|
||||
afterEach,
|
||||
type Mock,
|
||||
} from 'vitest';
|
||||
import { act, renderHook } from '@testing-library/react';
|
||||
import {
|
||||
type Config,
|
||||
type FallbackModelHandler,
|
||||
UserTierId,
|
||||
AuthType,
|
||||
isGenericQuotaExceededError,
|
||||
isProQuotaExceededError,
|
||||
makeFakeConfig,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { useQuotaAndFallback } from './useQuotaAndFallback.js';
|
||||
import type { UseHistoryManagerReturn } from './useHistoryManager.js';
|
||||
import { AuthState, MessageType } from '../types.js';
|
||||
|
||||
// Mock the error checking functions from the core package to control test scenarios
|
||||
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
const original =
|
||||
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
|
||||
return {
|
||||
...original,
|
||||
isGenericQuotaExceededError: vi.fn(),
|
||||
isProQuotaExceededError: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
// Use a type alias for SpyInstance as it's not directly exported
|
||||
type SpyInstance = ReturnType<typeof vi.spyOn>;
|
||||
|
||||
describe('useQuotaAndFallback', () => {
|
||||
let mockConfig: Config;
|
||||
let mockHistoryManager: UseHistoryManagerReturn;
|
||||
let mockSetAuthState: Mock;
|
||||
let mockSetModelSwitchedFromQuotaError: Mock;
|
||||
let setFallbackHandlerSpy: SpyInstance;
|
||||
|
||||
const mockedIsGenericQuotaExceededError = isGenericQuotaExceededError as Mock;
|
||||
const mockedIsProQuotaExceededError = isProQuotaExceededError as Mock;
|
||||
|
||||
beforeEach(() => {
|
||||
mockConfig = makeFakeConfig();
|
||||
|
||||
// Spy on the method that requires the private field and mock its return.
|
||||
// This is cleaner than modifying the config class for tests.
|
||||
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
|
||||
model: 'test-model',
|
||||
authType: AuthType.LOGIN_WITH_GOOGLE,
|
||||
});
|
||||
|
||||
mockHistoryManager = {
|
||||
addItem: vi.fn(),
|
||||
history: [],
|
||||
updateItem: vi.fn(),
|
||||
clearItems: vi.fn(),
|
||||
loadHistory: vi.fn(),
|
||||
};
|
||||
mockSetAuthState = vi.fn();
|
||||
mockSetModelSwitchedFromQuotaError = vi.fn();
|
||||
|
||||
setFallbackHandlerSpy = vi.spyOn(mockConfig, 'setFallbackModelHandler');
|
||||
vi.spyOn(mockConfig, 'setQuotaErrorOccurred');
|
||||
|
||||
mockedIsGenericQuotaExceededError.mockReturnValue(false);
|
||||
mockedIsProQuotaExceededError.mockReturnValue(false);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should register a fallback handler on initialization', () => {
|
||||
renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
expect(setFallbackHandlerSpy).toHaveBeenCalledTimes(1);
|
||||
expect(setFallbackHandlerSpy.mock.calls[0][0]).toBeInstanceOf(Function);
|
||||
});
|
||||
|
||||
describe('Fallback Handler Logic', () => {
|
||||
// Helper function to render the hook and extract the registered handler
|
||||
const getRegisteredHandler = (
|
||||
userTier: UserTierId = UserTierId.FREE,
|
||||
): FallbackModelHandler => {
|
||||
renderHook(
|
||||
(props) =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: props.userTier,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
{ initialProps: { userTier } },
|
||||
);
|
||||
return setFallbackHandlerSpy.mock.calls[0][0] as FallbackModelHandler;
|
||||
};
|
||||
|
||||
it('should return null and take no action if already in fallback mode', async () => {
|
||||
vi.spyOn(mockConfig, 'isInFallbackMode').mockReturnValue(true);
|
||||
const handler = getRegisteredHandler();
|
||||
const result = await handler('gemini-pro', 'gemini-flash', new Error());
|
||||
|
||||
expect(result).toBeNull();
|
||||
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return null and take no action if authType is not LOGIN_WITH_GOOGLE', async () => {
|
||||
// Override the default mock from beforeEach for this specific test
|
||||
vi.spyOn(mockConfig, 'getContentGeneratorConfig').mockReturnValue({
|
||||
model: 'test-model',
|
||||
authType: AuthType.USE_GEMINI,
|
||||
});
|
||||
|
||||
const handler = getRegisteredHandler();
|
||||
const result = await handler('gemini-pro', 'gemini-flash', new Error());
|
||||
|
||||
expect(result).toBeNull();
|
||||
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
describe('Automatic Fallback Scenarios', () => {
|
||||
const testCases = [
|
||||
{
|
||||
errorType: 'generic',
|
||||
tier: UserTierId.FREE,
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B',
|
||||
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
|
||||
],
|
||||
},
|
||||
{
|
||||
errorType: 'generic',
|
||||
tier: UserTierId.STANDARD, // Paid tier
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B',
|
||||
'switch to using a paid API key from AI Studio',
|
||||
],
|
||||
},
|
||||
{
|
||||
errorType: 'other',
|
||||
tier: UserTierId.FREE,
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B for faster responses',
|
||||
'upgrade to a Gemini Code Assist Standard or Enterprise plan',
|
||||
],
|
||||
},
|
||||
{
|
||||
errorType: 'other',
|
||||
tier: UserTierId.LEGACY, // Paid tier
|
||||
expectedMessageSnippets: [
|
||||
'Automatically switching from model-A to model-B for faster responses',
|
||||
'switch to using a paid API key from AI Studio',
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for (const { errorType, tier, expectedMessageSnippets } of testCases) {
|
||||
it(`should handle ${errorType} error for ${tier} tier correctly`, async () => {
|
||||
mockedIsGenericQuotaExceededError.mockReturnValue(
|
||||
errorType === 'generic',
|
||||
);
|
||||
|
||||
const handler = getRegisteredHandler(tier);
|
||||
const result = await handler(
|
||||
'model-A',
|
||||
'model-B',
|
||||
new Error('quota exceeded'),
|
||||
);
|
||||
|
||||
// Automatic fallbacks should return 'stop'
|
||||
expect(result).toBe('stop');
|
||||
|
||||
expect(mockHistoryManager.addItem).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ type: MessageType.INFO }),
|
||||
expect.any(Number),
|
||||
);
|
||||
|
||||
const message = (mockHistoryManager.addItem as Mock).mock.calls[0][0]
|
||||
.text;
|
||||
for (const snippet of expectedMessageSnippets) {
|
||||
expect(message).toContain(snippet);
|
||||
}
|
||||
|
||||
expect(mockSetModelSwitchedFromQuotaError).toHaveBeenCalledWith(true);
|
||||
expect(mockConfig.setQuotaErrorOccurred).toHaveBeenCalledWith(true);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe('Interactive Fallback (Pro Quota Error)', () => {
|
||||
beforeEach(() => {
|
||||
mockedIsProQuotaExceededError.mockReturnValue(true);
|
||||
});
|
||||
|
||||
it('should set an interactive request and wait for user choice', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
|
||||
// Call the handler but do not await it, to check the intermediate state
|
||||
const promise = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota'),
|
||||
);
|
||||
|
||||
await act(async () => {});
|
||||
|
||||
// The hook should now have a pending request for the UI to handle
|
||||
expect(result.current.proQuotaRequest).not.toBeNull();
|
||||
expect(result.current.proQuotaRequest?.failedModel).toBe('gemini-pro');
|
||||
|
||||
// Simulate the user choosing to continue with the fallback model
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('continue');
|
||||
});
|
||||
|
||||
// The original promise from the handler should now resolve
|
||||
const intent = await promise;
|
||||
expect(intent).toBe('retry');
|
||||
|
||||
// The pending request should be cleared from the state
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle race conditions by stopping subsequent requests', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
|
||||
const promise1 = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota 1'),
|
||||
);
|
||||
await act(async () => {});
|
||||
|
||||
const firstRequest = result.current.proQuotaRequest;
|
||||
expect(firstRequest).not.toBeNull();
|
||||
|
||||
const result2 = await handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota 2'),
|
||||
);
|
||||
|
||||
// The lock should have stopped the second request
|
||||
expect(result2).toBe('stop');
|
||||
expect(result.current.proQuotaRequest).toBe(firstRequest);
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('continue');
|
||||
});
|
||||
|
||||
const intent1 = await promise1;
|
||||
expect(intent1).toBe('retry');
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('handleProQuotaChoice', () => {
|
||||
beforeEach(() => {
|
||||
mockedIsProQuotaExceededError.mockReturnValue(true);
|
||||
});
|
||||
|
||||
it('should do nothing if there is no pending pro quota request', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('auth');
|
||||
});
|
||||
|
||||
expect(mockSetAuthState).not.toHaveBeenCalled();
|
||||
expect(mockHistoryManager.addItem).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should resolve intent to "auth" and trigger auth state update', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
const promise = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota'),
|
||||
);
|
||||
await act(async () => {}); // Allow state to update
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('auth');
|
||||
});
|
||||
|
||||
const intent = await promise;
|
||||
expect(intent).toBe('auth');
|
||||
expect(mockSetAuthState).toHaveBeenCalledWith(AuthState.Updating);
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
});
|
||||
|
||||
it('should resolve intent to "retry" and add info message on continue', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useQuotaAndFallback({
|
||||
config: mockConfig,
|
||||
historyManager: mockHistoryManager,
|
||||
userTier: UserTierId.FREE,
|
||||
setAuthState: mockSetAuthState,
|
||||
setModelSwitchedFromQuotaError: mockSetModelSwitchedFromQuotaError,
|
||||
}),
|
||||
);
|
||||
|
||||
const handler = setFallbackHandlerSpy.mock
|
||||
.calls[0][0] as FallbackModelHandler;
|
||||
// The first `addItem` call is for the initial quota error message
|
||||
const promise = handler(
|
||||
'gemini-pro',
|
||||
'gemini-flash',
|
||||
new Error('pro quota'),
|
||||
);
|
||||
await act(async () => {}); // Allow state to update
|
||||
|
||||
act(() => {
|
||||
result.current.handleProQuotaChoice('continue');
|
||||
});
|
||||
|
||||
const intent = await promise;
|
||||
expect(intent).toBe('retry');
|
||||
expect(result.current.proQuotaRequest).toBeNull();
|
||||
|
||||
// Check for the second "Switched to fallback model" message
|
||||
expect(mockHistoryManager.addItem).toHaveBeenCalledTimes(2);
|
||||
const lastCall = (mockHistoryManager.addItem as Mock).mock.calls[1][0];
|
||||
expect(lastCall.type).toBe(MessageType.INFO);
|
||||
expect(lastCall.text).toContain('Switched to fallback model.');
|
||||
});
|
||||
});
|
||||
});
|
||||
175
packages/cli/src/ui/hooks/useQuotaAndFallback.ts
Normal file
175
packages/cli/src/ui/hooks/useQuotaAndFallback.ts
Normal file
@@ -0,0 +1,175 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
AuthType,
|
||||
type Config,
|
||||
type FallbackModelHandler,
|
||||
type FallbackIntent,
|
||||
isGenericQuotaExceededError,
|
||||
isProQuotaExceededError,
|
||||
UserTierId,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { type UseHistoryManagerReturn } from './useHistoryManager.js';
|
||||
import { AuthState, MessageType } from '../types.js';
|
||||
import { type ProQuotaDialogRequest } from '../contexts/UIStateContext.js';
|
||||
|
||||
interface UseQuotaAndFallbackArgs {
|
||||
config: Config;
|
||||
historyManager: UseHistoryManagerReturn;
|
||||
userTier: UserTierId | undefined;
|
||||
setAuthState: (state: AuthState) => void;
|
||||
setModelSwitchedFromQuotaError: (value: boolean) => void;
|
||||
}
|
||||
|
||||
export function useQuotaAndFallback({
|
||||
config,
|
||||
historyManager,
|
||||
userTier,
|
||||
setAuthState,
|
||||
setModelSwitchedFromQuotaError,
|
||||
}: UseQuotaAndFallbackArgs) {
|
||||
const [proQuotaRequest, setProQuotaRequest] =
|
||||
useState<ProQuotaDialogRequest | null>(null);
|
||||
const isDialogPending = useRef(false);
|
||||
|
||||
// Set up Flash fallback handler
|
||||
useEffect(() => {
|
||||
const fallbackHandler: FallbackModelHandler = async (
|
||||
failedModel,
|
||||
fallbackModel,
|
||||
error,
|
||||
): Promise<FallbackIntent | null> => {
|
||||
if (config.isInFallbackMode()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Fallbacks are currently only handled for OAuth users.
|
||||
const contentGeneratorConfig = config.getContentGeneratorConfig();
|
||||
if (
|
||||
!contentGeneratorConfig ||
|
||||
contentGeneratorConfig.authType !== AuthType.LOGIN_WITH_GOOGLE
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Use actual user tier if available; otherwise, default to FREE tier behavior (safe default)
|
||||
const isPaidTier =
|
||||
userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD;
|
||||
|
||||
let message: string;
|
||||
|
||||
if (error && isProQuotaExceededError(error)) {
|
||||
// Pro Quota specific messages (Interactive)
|
||||
if (isPaidTier) {
|
||||
message = `⚡ You have reached your daily ${failedModel} quota limit.
|
||||
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
|
||||
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
|
||||
} else {
|
||||
message = `⚡ You have reached your daily ${failedModel} quota limit.
|
||||
⚡ You can choose to authenticate with a paid API key or continue with the fallback model.
|
||||
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
|
||||
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
|
||||
⚡ You can switch authentication methods by typing /auth`;
|
||||
}
|
||||
} else if (error && isGenericQuotaExceededError(error)) {
|
||||
// Generic Quota (Automatic fallback)
|
||||
const actionMessage = `⚡ You have reached your daily quota limit.\n⚡ Automatically switching from ${failedModel} to ${fallbackModel} for the remainder of this session.`;
|
||||
|
||||
if (isPaidTier) {
|
||||
message = `${actionMessage}
|
||||
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
|
||||
} else {
|
||||
message = `${actionMessage}
|
||||
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
|
||||
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
|
||||
⚡ You can switch authentication methods by typing /auth`;
|
||||
}
|
||||
} else {
|
||||
// Consecutive 429s or other errors (Automatic fallback)
|
||||
const actionMessage = `⚡ Automatically switching from ${failedModel} to ${fallbackModel} for faster responses for the remainder of this session.`;
|
||||
|
||||
if (isPaidTier) {
|
||||
message = `${actionMessage}
|
||||
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
|
||||
⚡ To continue accessing the ${failedModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`;
|
||||
} else {
|
||||
message = `${actionMessage}
|
||||
⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${failedModel} quota limit
|
||||
⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist
|
||||
⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key
|
||||
⚡ You can switch authentication methods by typing /auth`;
|
||||
}
|
||||
}
|
||||
|
||||
// Add message to UI history
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: message,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
setModelSwitchedFromQuotaError(true);
|
||||
config.setQuotaErrorOccurred(true);
|
||||
|
||||
// Interactive Fallback for Pro quota
|
||||
if (error && isProQuotaExceededError(error)) {
|
||||
if (isDialogPending.current) {
|
||||
return 'stop'; // A dialog is already active, so just stop this request.
|
||||
}
|
||||
isDialogPending.current = true;
|
||||
|
||||
const intent: FallbackIntent = await new Promise<FallbackIntent>(
|
||||
(resolve) => {
|
||||
setProQuotaRequest({
|
||||
failedModel,
|
||||
fallbackModel,
|
||||
resolve,
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
return intent;
|
||||
}
|
||||
|
||||
return 'stop';
|
||||
};
|
||||
|
||||
config.setFallbackModelHandler(fallbackHandler);
|
||||
}, [config, historyManager, userTier, setModelSwitchedFromQuotaError]);
|
||||
|
||||
const handleProQuotaChoice = useCallback(
|
||||
(choice: 'auth' | 'continue') => {
|
||||
if (!proQuotaRequest) return;
|
||||
|
||||
const intent: FallbackIntent = choice === 'auth' ? 'auth' : 'retry';
|
||||
proQuotaRequest.resolve(intent);
|
||||
setProQuotaRequest(null);
|
||||
isDialogPending.current = false; // Reset the flag here
|
||||
|
||||
if (choice === 'auth') {
|
||||
setAuthState(AuthState.Updating);
|
||||
} else {
|
||||
historyManager.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: 'Switched to fallback model. Tip: Press Ctrl+P (or Up Arrow) to recall your previous prompt and submit it again if you wish.',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
},
|
||||
[proQuotaRequest, setAuthState, historyManager],
|
||||
);
|
||||
|
||||
return {
|
||||
proQuotaRequest,
|
||||
handleProQuotaChoice,
|
||||
};
|
||||
}
|
||||
@@ -411,7 +411,7 @@ describe('useQwenAuth', () => {
|
||||
expect(geminiResult.current.qwenAuthState.authStatus).toBe('idle');
|
||||
|
||||
const { result: oauthResult } = renderHook(() =>
|
||||
useQwenAuth(AuthType.USE_OPENAI, true),
|
||||
useQwenAuth(AuthType.LOGIN_WITH_GOOGLE, true),
|
||||
);
|
||||
expect(oauthResult.current.qwenAuthState.authStatus).toBe('idle');
|
||||
});
|
||||
|
||||
@@ -62,7 +62,7 @@ const mockConfig = {
|
||||
getAllowedTools: vi.fn(() => []),
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getUseSmartEdit: () => false,
|
||||
getUseModelRouter: () => false,
|
||||
|
||||
@@ -60,11 +60,6 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
|
||||
return id ? { id, label: id } : null;
|
||||
}
|
||||
|
||||
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
|
||||
const id = process.env['ANTHROPIC_MODEL']?.trim();
|
||||
return id ? { id, label: id } : null;
|
||||
}
|
||||
|
||||
export function getAvailableModelsForAuthType(
|
||||
authType: AuthType,
|
||||
): AvailableModel[] {
|
||||
@@ -75,10 +70,6 @@ export function getAvailableModelsForAuthType(
|
||||
const openAIModel = getOpenAIAvailableModelFromEnv();
|
||||
return openAIModel ? [openAIModel] : [];
|
||||
}
|
||||
case AuthType.USE_ANTHROPIC: {
|
||||
const anthropicModel = getAnthropicAvailableModelFromEnv();
|
||||
return anthropicModel ? [anthropicModel] : [];
|
||||
}
|
||||
default:
|
||||
// For other auth types, return empty array for now
|
||||
// This can be expanded later according to the design doc
|
||||
|
||||
@@ -20,11 +20,6 @@ const makeConfig = (tools: Record<string, AnyDeclarativeTool>) =>
|
||||
getToolRegistry: () => ({
|
||||
getTool: (name: string) => tools[name],
|
||||
}),
|
||||
getContentGenerator: () => ({
|
||||
// Default to showing full thinking content during resume unless explicitly
|
||||
// summarized; tests don't care about summarized thinking behavior.
|
||||
useSummarizedThinking: () => false,
|
||||
}),
|
||||
}) as unknown as Config;
|
||||
|
||||
describe('resumeHistoryUtils', () => {
|
||||
|
||||
@@ -204,11 +204,7 @@ function convertToHistoryItems(
|
||||
const parts = record.message?.parts as Part[] | undefined;
|
||||
|
||||
// Extract thought content
|
||||
const thoughtText = !config
|
||||
.getContentGenerator()
|
||||
.useSummarizedThinking()
|
||||
? extractThoughtTextFromParts(parts)
|
||||
: '';
|
||||
const thoughtText = extractThoughtTextFromParts(parts);
|
||||
|
||||
// Extract text content (non-function-call, non-thought)
|
||||
const text = extractTextFromParts(parts);
|
||||
|
||||
@@ -153,8 +153,7 @@ export async function getExtendedSystemInfo(
|
||||
|
||||
// Get base URL if using OpenAI auth
|
||||
const baseUrl =
|
||||
baseInfo.selectedAuthType === AuthType.USE_OPENAI ||
|
||||
baseInfo.selectedAuthType === AuthType.USE_ANTHROPIC
|
||||
baseInfo.selectedAuthType === AuthType.USE_OPENAI
|
||||
? context.services.config?.getContentGeneratorConfig()?.baseUrl
|
||||
: undefined;
|
||||
|
||||
|
||||
@@ -19,9 +19,6 @@ describe('validateNonInterActiveAuth', () => {
|
||||
let originalEnvVertexAi: string | undefined;
|
||||
let originalEnvGcp: string | undefined;
|
||||
let originalEnvOpenAiApiKey: string | undefined;
|
||||
let originalEnvQwenOauth: string | undefined;
|
||||
let originalEnvGoogleApiKey: string | undefined;
|
||||
let originalEnvAnthropicApiKey: string | undefined;
|
||||
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
|
||||
let processExitSpy: ReturnType<typeof vi.spyOn<[code?: number], never>>;
|
||||
let refreshAuthMock: ReturnType<typeof vi.fn>;
|
||||
@@ -32,16 +29,10 @@ describe('validateNonInterActiveAuth', () => {
|
||||
originalEnvVertexAi = process.env['GOOGLE_GENAI_USE_VERTEXAI'];
|
||||
originalEnvGcp = process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
originalEnvOpenAiApiKey = process.env['OPENAI_API_KEY'];
|
||||
originalEnvQwenOauth = process.env['QWEN_OAUTH'];
|
||||
originalEnvGoogleApiKey = process.env['GOOGLE_API_KEY'];
|
||||
originalEnvAnthropicApiKey = process.env['ANTHROPIC_API_KEY'];
|
||||
delete process.env['GEMINI_API_KEY'];
|
||||
delete process.env['GOOGLE_GENAI_USE_VERTEXAI'];
|
||||
delete process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
delete process.env['QWEN_OAUTH'];
|
||||
delete process.env['GOOGLE_API_KEY'];
|
||||
delete process.env['ANTHROPIC_API_KEY'];
|
||||
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||
processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit(${code}) called`);
|
||||
@@ -89,21 +80,6 @@ describe('validateNonInterActiveAuth', () => {
|
||||
} else {
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
}
|
||||
if (originalEnvQwenOauth !== undefined) {
|
||||
process.env['QWEN_OAUTH'] = originalEnvQwenOauth;
|
||||
} else {
|
||||
delete process.env['QWEN_OAUTH'];
|
||||
}
|
||||
if (originalEnvGoogleApiKey !== undefined) {
|
||||
process.env['GOOGLE_API_KEY'] = originalEnvGoogleApiKey;
|
||||
} else {
|
||||
delete process.env['GOOGLE_API_KEY'];
|
||||
}
|
||||
if (originalEnvAnthropicApiKey !== undefined) {
|
||||
process.env['ANTHROPIC_API_KEY'] = originalEnvAnthropicApiKey;
|
||||
} else {
|
||||
delete process.env['ANTHROPIC_API_KEY'];
|
||||
}
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
|
||||
@@ -21,16 +21,6 @@ function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
return AuthType.QWEN_OAUTH;
|
||||
}
|
||||
|
||||
if (process.env['GEMINI_API_KEY']) {
|
||||
return AuthType.USE_GEMINI;
|
||||
}
|
||||
if (process.env['GOOGLE_API_KEY']) {
|
||||
return AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
if (process.env['ANTHROPIC_API_KEY']) {
|
||||
return AuthType.USE_ANTHROPIC;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
@@ -23,9 +23,8 @@
|
||||
"scripts/postinstall.js"
|
||||
],
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.36.1",
|
||||
"@google/genai": "1.30.0",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@google/genai": "1.16.0",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"async-mutex": "^0.5.0",
|
||||
"@opentelemetry/exporter-logs-otlp-grpc": "^0.203.0",
|
||||
@@ -35,6 +34,7 @@
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "^0.203.0",
|
||||
"@opentelemetry/exporter-trace-otlp-http": "^0.203.0",
|
||||
"@opentelemetry/instrumentation-http": "^0.203.0",
|
||||
"@opentelemetry/resource-detector-gcp": "^0.40.0",
|
||||
"@opentelemetry/sdk-node": "^0.203.0",
|
||||
"@types/html-to-text": "^9.0.4",
|
||||
"@xterm/headless": "5.5.0",
|
||||
@@ -48,7 +48,7 @@
|
||||
"fdir": "^6.4.6",
|
||||
"fzf": "^0.5.2",
|
||||
"glob": "^10.5.0",
|
||||
"google-auth-library": "^10.5.0",
|
||||
"google-auth-library": "^9.11.0",
|
||||
"html-to-text": "^9.0.5",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ignore": "^7.0.0",
|
||||
|
||||
54
packages/core/src/code_assist/codeAssist.ts
Normal file
54
packages/core/src/code_assist/codeAssist.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { ContentGenerator } from '../core/contentGenerator.js';
|
||||
import { AuthType } from '../core/contentGenerator.js';
|
||||
import { getOauthClient } from './oauth2.js';
|
||||
import { setupUser } from './setup.js';
|
||||
import type { HttpOptions } from './server.js';
|
||||
import { CodeAssistServer } from './server.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from '../core/loggingContentGenerator.js';
|
||||
|
||||
export async function createCodeAssistContentGenerator(
|
||||
httpOptions: HttpOptions,
|
||||
authType: AuthType,
|
||||
config: Config,
|
||||
sessionId?: string,
|
||||
): Promise<ContentGenerator> {
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
const authClient = await getOauthClient(authType, config);
|
||||
const userData = await setupUser(authClient);
|
||||
return new CodeAssistServer(
|
||||
authClient,
|
||||
userData.projectId,
|
||||
httpOptions,
|
||||
sessionId,
|
||||
userData.userTier,
|
||||
);
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported authType: ${authType}`);
|
||||
}
|
||||
|
||||
export function getCodeAssistServer(
|
||||
config: Config,
|
||||
): CodeAssistServer | undefined {
|
||||
let server = config.getContentGenerator();
|
||||
|
||||
// Unwrap LoggingContentGenerator if present
|
||||
if (server instanceof LoggingContentGenerator) {
|
||||
server = server.getWrapped();
|
||||
}
|
||||
|
||||
if (!(server instanceof CodeAssistServer)) {
|
||||
return undefined;
|
||||
}
|
||||
return server;
|
||||
}
|
||||
456
packages/core/src/code_assist/converter.test.ts
Normal file
456
packages/core/src/code_assist/converter.test.ts
Normal file
@@ -0,0 +1,456 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import type { CaGenerateContentResponse } from './converter.js';
|
||||
import {
|
||||
toGenerateContentRequest,
|
||||
fromGenerateContentResponse,
|
||||
toContents,
|
||||
} from './converter.js';
|
||||
import type {
|
||||
ContentListUnion,
|
||||
GenerateContentParameters,
|
||||
} from '@google/genai';
|
||||
import {
|
||||
GenerateContentResponse,
|
||||
FinishReason,
|
||||
BlockedReason,
|
||||
type Part,
|
||||
} from '@google/genai';
|
||||
|
||||
describe('converter', () => {
|
||||
describe('toCodeAssistRequest', () => {
|
||||
it('should convert a simple request with project', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq).toEqual({
|
||||
model: 'gemini-pro',
|
||||
project: 'my-project',
|
||||
request: {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
systemInstruction: undefined,
|
||||
cachedContent: undefined,
|
||||
tools: undefined,
|
||||
toolConfig: undefined,
|
||||
labels: undefined,
|
||||
safetySettings: undefined,
|
||||
generationConfig: undefined,
|
||||
session_id: 'my-session',
|
||||
},
|
||||
user_prompt_id: 'my-prompt',
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert a request without a project', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
undefined,
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq).toEqual({
|
||||
model: 'gemini-pro',
|
||||
project: undefined,
|
||||
request: {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
systemInstruction: undefined,
|
||||
cachedContent: undefined,
|
||||
tools: undefined,
|
||||
toolConfig: undefined,
|
||||
labels: undefined,
|
||||
safetySettings: undefined,
|
||||
generationConfig: undefined,
|
||||
session_id: 'my-session',
|
||||
},
|
||||
user_prompt_id: 'my-prompt',
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert a request with sessionId', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'session-123',
|
||||
);
|
||||
expect(codeAssistReq).toEqual({
|
||||
model: 'gemini-pro',
|
||||
project: 'my-project',
|
||||
request: {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
systemInstruction: undefined,
|
||||
cachedContent: undefined,
|
||||
tools: undefined,
|
||||
toolConfig: undefined,
|
||||
labels: undefined,
|
||||
safetySettings: undefined,
|
||||
generationConfig: undefined,
|
||||
session_id: 'session-123',
|
||||
},
|
||||
user_prompt_id: 'my-prompt',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle string content', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.contents).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'Hello' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle Part[] content', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: [{ text: 'Hello' }, { text: 'World' }],
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.contents).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'Hello' }] },
|
||||
{ role: 'user', parts: [{ text: 'World' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle system instructions', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
systemInstruction: 'You are a helpful assistant.',
|
||||
},
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.systemInstruction).toEqual({
|
||||
role: 'user',
|
||||
parts: [{ text: 'You are a helpful assistant.' }],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle generation config', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
temperature: 0.8,
|
||||
topK: 40,
|
||||
},
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.generationConfig).toEqual({
|
||||
temperature: 0.8,
|
||||
topK: 40,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle all generation config fields', () => {
|
||||
const genaiReq: GenerateContentParameters = {
|
||||
model: 'gemini-pro',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
temperature: 0.1,
|
||||
topP: 0.2,
|
||||
topK: 3,
|
||||
candidateCount: 4,
|
||||
maxOutputTokens: 5,
|
||||
stopSequences: ['a'],
|
||||
responseLogprobs: true,
|
||||
logprobs: 6,
|
||||
presencePenalty: 0.7,
|
||||
frequencyPenalty: 0.8,
|
||||
seed: 9,
|
||||
responseMimeType: 'application/json',
|
||||
},
|
||||
};
|
||||
const codeAssistReq = toGenerateContentRequest(
|
||||
genaiReq,
|
||||
'my-prompt',
|
||||
'my-project',
|
||||
'my-session',
|
||||
);
|
||||
expect(codeAssistReq.request.generationConfig).toEqual({
|
||||
temperature: 0.1,
|
||||
topP: 0.2,
|
||||
topK: 3,
|
||||
candidateCount: 4,
|
||||
maxOutputTokens: 5,
|
||||
stopSequences: ['a'],
|
||||
responseLogprobs: true,
|
||||
logprobs: 6,
|
||||
presencePenalty: 0.7,
|
||||
frequencyPenalty: 0.8,
|
||||
seed: 9,
|
||||
responseMimeType: 'application/json',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('fromCodeAssistResponse', () => {
|
||||
it('should convert a simple response', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'Hi there!' }],
|
||||
},
|
||||
finishReason: FinishReason.STOP,
|
||||
safetyRatings: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes).toBeInstanceOf(GenerateContentResponse);
|
||||
expect(genaiRes.candidates).toEqual(codeAssistRes.response.candidates);
|
||||
});
|
||||
|
||||
it('should handle prompt feedback and usage metadata', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [],
|
||||
promptFeedback: {
|
||||
blockReason: BlockedReason.SAFETY,
|
||||
safetyRatings: [],
|
||||
},
|
||||
usageMetadata: {
|
||||
promptTokenCount: 10,
|
||||
candidatesTokenCount: 20,
|
||||
totalTokenCount: 30,
|
||||
},
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes.promptFeedback).toEqual(
|
||||
codeAssistRes.response.promptFeedback,
|
||||
);
|
||||
expect(genaiRes.usageMetadata).toEqual(
|
||||
codeAssistRes.response.usageMetadata,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle automatic function calling history', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [],
|
||||
automaticFunctionCallingHistory: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: {
|
||||
name: 'test_function',
|
||||
args: {
|
||||
foo: 'bar',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes.automaticFunctionCallingHistory).toEqual(
|
||||
codeAssistRes.response.automaticFunctionCallingHistory,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle modelVersion', () => {
|
||||
const codeAssistRes: CaGenerateContentResponse = {
|
||||
response: {
|
||||
candidates: [],
|
||||
modelVersion: 'qwen3-coder-plus',
|
||||
},
|
||||
};
|
||||
const genaiRes = fromGenerateContentResponse(codeAssistRes);
|
||||
expect(genaiRes.modelVersion).toEqual('qwen3-coder-plus');
|
||||
});
|
||||
});
|
||||
|
||||
describe('toContents', () => {
|
||||
it('should handle Content', () => {
|
||||
const content: ContentListUnion = {
|
||||
role: 'user',
|
||||
parts: [{ text: 'hello' }],
|
||||
};
|
||||
expect(toContents(content)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'hello' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle array of Contents', () => {
|
||||
const contents: ContentListUnion = [
|
||||
{ role: 'user', parts: [{ text: 'hello' }] },
|
||||
{ role: 'model', parts: [{ text: 'hi' }] },
|
||||
];
|
||||
expect(toContents(contents)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'hello' }] },
|
||||
{ role: 'model', parts: [{ text: 'hi' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle Part', () => {
|
||||
const part: ContentListUnion = { text: 'a part' };
|
||||
expect(toContents(part)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'a part' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle array of Parts', () => {
|
||||
const parts = [{ text: 'part 1' }, 'part 2'];
|
||||
expect(toContents(parts)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'part 1' }] },
|
||||
{ role: 'user', parts: [{ text: 'part 2' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle string', () => {
|
||||
const str: ContentListUnion = 'a string';
|
||||
expect(toContents(str)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'a string' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle array of strings', () => {
|
||||
const strings: ContentListUnion = ['string 1', 'string 2'];
|
||||
expect(toContents(strings)).toEqual([
|
||||
{ role: 'user', parts: [{ text: 'string 1' }] },
|
||||
{ role: 'user', parts: [{ text: 'string 2' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should convert thought parts to text parts for API compatibility', () => {
|
||||
const contentWithThought: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'regular text' },
|
||||
{ thought: 'thinking about the problem' } as Part & {
|
||||
thought: string;
|
||||
},
|
||||
{ text: 'more text' },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithThought)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'regular text' },
|
||||
{ text: '[Thought: thinking about the problem]' },
|
||||
{ text: 'more text' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should combine text and thought for text parts with thoughts', () => {
|
||||
const contentWithTextAndThought: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 'Here is my response',
|
||||
thought: 'I need to be careful here',
|
||||
} as Part & { thought: string },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithTextAndThought)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 'Here is my response\n[Thought: I need to be careful here]',
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should preserve non-thought properties while removing thought', () => {
|
||||
const contentWithComplexPart: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: { name: 'calculate', args: { x: 5, y: 10 } },
|
||||
thought: 'Performing calculation',
|
||||
} as Part & { thought: string },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithComplexPart)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
functionCall: { name: 'calculate', args: { x: 5, y: 10 } },
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should convert invalid text content to valid text part with thought', () => {
|
||||
const contentWithInvalidText: ContentListUnion = {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 123, // Invalid - should be string
|
||||
thought: 'Processing number',
|
||||
} as Part & { thought: string; text: number },
|
||||
],
|
||||
};
|
||||
expect(toContents(contentWithInvalidText)).toEqual([
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: '123\n[Thought: Processing number]',
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
285
packages/core/src/code_assist/converter.ts
Normal file
285
packages/core/src/code_assist/converter.ts
Normal file
@@ -0,0 +1,285 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Content,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
GenerateContentConfig,
|
||||
GenerateContentParameters,
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
GenerationConfigRoutingConfig,
|
||||
MediaResolution,
|
||||
Candidate,
|
||||
ModelSelectionConfig,
|
||||
GenerateContentResponsePromptFeedback,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
Part,
|
||||
SafetySetting,
|
||||
PartUnion,
|
||||
SpeechConfigUnion,
|
||||
ThinkingConfig,
|
||||
ToolListUnion,
|
||||
ToolConfig,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
|
||||
export interface CAGenerateContentRequest {
|
||||
model: string;
|
||||
project?: string;
|
||||
user_prompt_id?: string;
|
||||
request: VertexGenerateContentRequest;
|
||||
}
|
||||
|
||||
interface VertexGenerateContentRequest {
|
||||
contents: Content[];
|
||||
systemInstruction?: Content;
|
||||
cachedContent?: string;
|
||||
tools?: ToolListUnion;
|
||||
toolConfig?: ToolConfig;
|
||||
labels?: Record<string, string>;
|
||||
safetySettings?: SafetySetting[];
|
||||
generationConfig?: VertexGenerationConfig;
|
||||
session_id?: string;
|
||||
}
|
||||
|
||||
interface VertexGenerationConfig {
|
||||
temperature?: number;
|
||||
topP?: number;
|
||||
topK?: number;
|
||||
candidateCount?: number;
|
||||
maxOutputTokens?: number;
|
||||
stopSequences?: string[];
|
||||
responseLogprobs?: boolean;
|
||||
logprobs?: number;
|
||||
presencePenalty?: number;
|
||||
frequencyPenalty?: number;
|
||||
seed?: number;
|
||||
responseMimeType?: string;
|
||||
responseJsonSchema?: unknown;
|
||||
responseSchema?: unknown;
|
||||
routingConfig?: GenerationConfigRoutingConfig;
|
||||
modelSelectionConfig?: ModelSelectionConfig;
|
||||
responseModalities?: string[];
|
||||
mediaResolution?: MediaResolution;
|
||||
speechConfig?: SpeechConfigUnion;
|
||||
audioTimestamp?: boolean;
|
||||
thinkingConfig?: ThinkingConfig;
|
||||
}
|
||||
|
||||
export interface CaGenerateContentResponse {
|
||||
response: VertexGenerateContentResponse;
|
||||
}
|
||||
|
||||
interface VertexGenerateContentResponse {
|
||||
candidates: Candidate[];
|
||||
automaticFunctionCallingHistory?: Content[];
|
||||
promptFeedback?: GenerateContentResponsePromptFeedback;
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata;
|
||||
modelVersion?: string;
|
||||
}
|
||||
|
||||
export interface CaCountTokenRequest {
|
||||
request: VertexCountTokenRequest;
|
||||
}
|
||||
|
||||
interface VertexCountTokenRequest {
|
||||
model: string;
|
||||
contents: Content[];
|
||||
}
|
||||
|
||||
export interface CaCountTokenResponse {
|
||||
totalTokens: number;
|
||||
}
|
||||
|
||||
export function toCountTokenRequest(
|
||||
req: CountTokensParameters,
|
||||
): CaCountTokenRequest {
|
||||
return {
|
||||
request: {
|
||||
model: 'models/' + req.model,
|
||||
contents: toContents(req.contents),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function fromCountTokenResponse(
|
||||
res: CaCountTokenResponse,
|
||||
): CountTokensResponse {
|
||||
return {
|
||||
totalTokens: res.totalTokens,
|
||||
};
|
||||
}
|
||||
|
||||
export function toGenerateContentRequest(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
project?: string,
|
||||
sessionId?: string,
|
||||
): CAGenerateContentRequest {
|
||||
return {
|
||||
model: req.model,
|
||||
project,
|
||||
user_prompt_id: userPromptId,
|
||||
request: toVertexGenerateContentRequest(req, sessionId),
|
||||
};
|
||||
}
|
||||
|
||||
export function fromGenerateContentResponse(
|
||||
res: CaGenerateContentResponse,
|
||||
): GenerateContentResponse {
|
||||
const inres = res.response;
|
||||
const out = new GenerateContentResponse();
|
||||
out.candidates = inres.candidates;
|
||||
out.automaticFunctionCallingHistory = inres.automaticFunctionCallingHistory;
|
||||
out.promptFeedback = inres.promptFeedback;
|
||||
out.usageMetadata = inres.usageMetadata;
|
||||
out.modelVersion = inres.modelVersion;
|
||||
return out;
|
||||
}
|
||||
|
||||
function toVertexGenerateContentRequest(
|
||||
req: GenerateContentParameters,
|
||||
sessionId?: string,
|
||||
): VertexGenerateContentRequest {
|
||||
return {
|
||||
contents: toContents(req.contents),
|
||||
systemInstruction: maybeToContent(req.config?.systemInstruction),
|
||||
cachedContent: req.config?.cachedContent,
|
||||
tools: req.config?.tools,
|
||||
toolConfig: req.config?.toolConfig,
|
||||
labels: req.config?.labels,
|
||||
safetySettings: req.config?.safetySettings,
|
||||
generationConfig: toVertexGenerationConfig(req.config),
|
||||
session_id: sessionId,
|
||||
};
|
||||
}
|
||||
|
||||
export function toContents(contents: ContentListUnion): Content[] {
|
||||
if (Array.isArray(contents)) {
|
||||
// it's a Content[] or a PartsUnion[]
|
||||
return contents.map(toContent);
|
||||
}
|
||||
// it's a Content or a PartsUnion
|
||||
return [toContent(contents)];
|
||||
}
|
||||
|
||||
function maybeToContent(content?: ContentUnion): Content | undefined {
|
||||
if (!content) {
|
||||
return undefined;
|
||||
}
|
||||
return toContent(content);
|
||||
}
|
||||
|
||||
function toContent(content: ContentUnion): Content {
|
||||
if (Array.isArray(content)) {
|
||||
// it's a PartsUnion[]
|
||||
return {
|
||||
role: 'user',
|
||||
parts: toParts(content),
|
||||
};
|
||||
}
|
||||
if (typeof content === 'string') {
|
||||
// it's a string
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [{ text: content }],
|
||||
};
|
||||
}
|
||||
if ('parts' in content) {
|
||||
// it's a Content - process parts to handle thought filtering
|
||||
return {
|
||||
...content,
|
||||
parts: content.parts
|
||||
? toParts(content.parts.filter((p) => p != null))
|
||||
: [],
|
||||
};
|
||||
}
|
||||
// it's a Part
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [toPart(content as Part)],
|
||||
};
|
||||
}
|
||||
|
||||
export function toParts(parts: PartUnion[]): Part[] {
|
||||
return parts.map(toPart);
|
||||
}
|
||||
|
||||
function toPart(part: PartUnion): Part {
|
||||
if (typeof part === 'string') {
|
||||
// it's a string
|
||||
return { text: part };
|
||||
}
|
||||
|
||||
// Handle thought parts for CountToken API compatibility
|
||||
// The CountToken API expects parts to have certain required "oneof" fields initialized,
|
||||
// but thought parts don't conform to this schema and cause API failures
|
||||
if ('thought' in part && part.thought) {
|
||||
const thoughtText = `[Thought: ${part.thought}]`;
|
||||
|
||||
const newPart = { ...part };
|
||||
delete (newPart as Record<string, unknown>)['thought'];
|
||||
|
||||
const hasApiContent =
|
||||
'functionCall' in newPart ||
|
||||
'functionResponse' in newPart ||
|
||||
'inlineData' in newPart ||
|
||||
'fileData' in newPart;
|
||||
|
||||
if (hasApiContent) {
|
||||
// It's a functionCall or other non-text part. Just strip the thought.
|
||||
return newPart;
|
||||
}
|
||||
|
||||
// If no other valid API content, this must be a text part.
|
||||
// Combine existing text (if any) with the thought, preserving other properties.
|
||||
const text = (newPart as { text?: unknown }).text;
|
||||
const existingText = text ? String(text) : '';
|
||||
const combinedText = existingText
|
||||
? `${existingText}\n${thoughtText}`
|
||||
: thoughtText;
|
||||
|
||||
return {
|
||||
...newPart,
|
||||
text: combinedText,
|
||||
};
|
||||
}
|
||||
|
||||
return part;
|
||||
}
|
||||
|
||||
function toVertexGenerationConfig(
|
||||
config?: GenerateContentConfig,
|
||||
): VertexGenerationConfig | undefined {
|
||||
if (!config) {
|
||||
return undefined;
|
||||
}
|
||||
return {
|
||||
temperature: config.temperature,
|
||||
topP: config.topP,
|
||||
topK: config.topK,
|
||||
candidateCount: config.candidateCount,
|
||||
maxOutputTokens: config.maxOutputTokens,
|
||||
stopSequences: config.stopSequences,
|
||||
responseLogprobs: config.responseLogprobs,
|
||||
logprobs: config.logprobs,
|
||||
presencePenalty: config.presencePenalty,
|
||||
frequencyPenalty: config.frequencyPenalty,
|
||||
seed: config.seed,
|
||||
responseMimeType: config.responseMimeType,
|
||||
responseSchema: config.responseSchema,
|
||||
responseJsonSchema: config.responseJsonSchema,
|
||||
routingConfig: config.routingConfig,
|
||||
modelSelectionConfig: config.modelSelectionConfig,
|
||||
responseModalities: config.responseModalities,
|
||||
mediaResolution: config.mediaResolution,
|
||||
speechConfig: config.speechConfig,
|
||||
audioTimestamp: config.audioTimestamp,
|
||||
thinkingConfig: config.thinkingConfig,
|
||||
};
|
||||
}
|
||||
217
packages/core/src/code_assist/oauth-credential-storage.test.ts
Normal file
217
packages/core/src/code_assist/oauth-credential-storage.test.ts
Normal file
@@ -0,0 +1,217 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { type Credentials } from 'google-auth-library';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { OAuthCredentialStorage } from './oauth-credential-storage.js';
|
||||
import type { OAuthCredentials } from '../mcp/token-storage/types.js';
|
||||
|
||||
import * as path from 'node:path';
|
||||
import * as os from 'node:os';
|
||||
import { promises as fs } from 'node:fs';
|
||||
|
||||
// Mock external dependencies
|
||||
const mockHybridTokenStorage = vi.hoisted(() => ({
|
||||
getCredentials: vi.fn(),
|
||||
setCredentials: vi.fn(),
|
||||
deleteCredentials: vi.fn(),
|
||||
}));
|
||||
vi.mock('../mcp/token-storage/hybrid-token-storage.js', () => ({
|
||||
HybridTokenStorage: vi.fn(() => mockHybridTokenStorage),
|
||||
}));
|
||||
vi.mock('node:fs', () => ({
|
||||
promises: {
|
||||
readFile: vi.fn(),
|
||||
rm: vi.fn(),
|
||||
},
|
||||
}));
|
||||
vi.mock('node:os');
|
||||
vi.mock('node:path');
|
||||
|
||||
describe('OAuthCredentialStorage', () => {
|
||||
const mockCredentials: Credentials = {
|
||||
access_token: 'mock_access_token',
|
||||
refresh_token: 'mock_refresh_token',
|
||||
expiry_date: Date.now() + 3600 * 1000,
|
||||
token_type: 'Bearer',
|
||||
scope: 'email profile',
|
||||
};
|
||||
|
||||
const mockMcpCredentials: OAuthCredentials = {
|
||||
serverName: 'main-account',
|
||||
token: {
|
||||
accessToken: 'mock_access_token',
|
||||
refreshToken: 'mock_refresh_token',
|
||||
tokenType: 'Bearer',
|
||||
scope: 'email profile',
|
||||
expiresAt: mockCredentials.expiry_date!,
|
||||
},
|
||||
updatedAt: expect.any(Number),
|
||||
};
|
||||
|
||||
const oldFilePath = '/mock/home/.qwen/oauth.json';
|
||||
|
||||
beforeEach(() => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(null);
|
||||
vi.spyOn(mockHybridTokenStorage, 'setCredentials').mockResolvedValue(
|
||||
undefined,
|
||||
);
|
||||
vi.spyOn(mockHybridTokenStorage, 'deleteCredentials').mockResolvedValue(
|
||||
undefined,
|
||||
);
|
||||
|
||||
vi.spyOn(fs, 'readFile').mockRejectedValue(new Error('File not found'));
|
||||
vi.spyOn(fs, 'rm').mockResolvedValue(undefined);
|
||||
|
||||
vi.spyOn(os, 'homedir').mockReturnValue('/mock/home');
|
||||
vi.spyOn(path, 'join').mockReturnValue(oldFilePath);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('loadCredentials', () => {
|
||||
it('should load credentials from HybridTokenStorage if available', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
mockMcpCredentials,
|
||||
);
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(mockHybridTokenStorage.getCredentials).toHaveBeenCalledWith(
|
||||
'main-account',
|
||||
);
|
||||
expect(result).toEqual(mockCredentials);
|
||||
});
|
||||
|
||||
it('should fallback to migrateFromFileStorage if no credentials in HybridTokenStorage', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
null,
|
||||
);
|
||||
vi.spyOn(fs, 'readFile').mockResolvedValue(
|
||||
JSON.stringify(mockCredentials),
|
||||
);
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(mockHybridTokenStorage.getCredentials).toHaveBeenCalledWith(
|
||||
'main-account',
|
||||
);
|
||||
expect(fs.readFile).toHaveBeenCalledWith(oldFilePath, 'utf-8');
|
||||
expect(mockHybridTokenStorage.setCredentials).toHaveBeenCalled(); // Verify credentials were saved
|
||||
expect(fs.rm).toHaveBeenCalledWith(oldFilePath, { force: true }); // Verify old file was removed
|
||||
expect(result).toEqual(mockCredentials);
|
||||
});
|
||||
|
||||
it('should return null if no credentials found and no old file to migrate', async () => {
|
||||
vi.spyOn(fs, 'readFile').mockRejectedValue({
|
||||
message: 'File not found',
|
||||
code: 'ENOENT',
|
||||
});
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should throw an error if loading fails', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockRejectedValue(
|
||||
new Error('Loading error'),
|
||||
);
|
||||
|
||||
await expect(OAuthCredentialStorage.loadCredentials()).rejects.toThrow(
|
||||
'Failed to load OAuth credentials',
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error if read file fails', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
null,
|
||||
);
|
||||
vi.spyOn(fs, 'readFile').mockRejectedValue(
|
||||
new Error('Permission denied'),
|
||||
);
|
||||
|
||||
await expect(OAuthCredentialStorage.loadCredentials()).rejects.toThrow(
|
||||
'Failed to load OAuth credentials',
|
||||
);
|
||||
});
|
||||
|
||||
it('should not throw error if migration file removal failed', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'getCredentials').mockResolvedValue(
|
||||
null,
|
||||
);
|
||||
vi.spyOn(fs, 'readFile').mockResolvedValue(
|
||||
JSON.stringify(mockCredentials),
|
||||
);
|
||||
vi.spyOn(OAuthCredentialStorage, 'saveCredentials').mockResolvedValue(
|
||||
undefined,
|
||||
);
|
||||
vi.spyOn(fs, 'rm').mockRejectedValue(new Error('Deletion failed'));
|
||||
|
||||
const result = await OAuthCredentialStorage.loadCredentials();
|
||||
|
||||
expect(result).toEqual(mockCredentials);
|
||||
});
|
||||
});
|
||||
|
||||
describe('saveCredentials', () => {
|
||||
it('should save credentials to HybridTokenStorage', async () => {
|
||||
await OAuthCredentialStorage.saveCredentials(mockCredentials);
|
||||
|
||||
expect(mockHybridTokenStorage.setCredentials).toHaveBeenCalledWith(
|
||||
mockMcpCredentials,
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error if access_token is missing', async () => {
|
||||
const invalidCredentials: Credentials = {
|
||||
...mockCredentials,
|
||||
access_token: undefined,
|
||||
};
|
||||
await expect(
|
||||
OAuthCredentialStorage.saveCredentials(invalidCredentials),
|
||||
).rejects.toThrow(
|
||||
'Attempted to save credentials without an access token.',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('clearCredentials', () => {
|
||||
it('should delete credentials from HybridTokenStorage', async () => {
|
||||
await OAuthCredentialStorage.clearCredentials();
|
||||
|
||||
expect(mockHybridTokenStorage.deleteCredentials).toHaveBeenCalledWith(
|
||||
'main-account',
|
||||
);
|
||||
});
|
||||
|
||||
it('should attempt to remove the old file-based storage', async () => {
|
||||
await OAuthCredentialStorage.clearCredentials();
|
||||
|
||||
expect(fs.rm).toHaveBeenCalledWith(oldFilePath, { force: true });
|
||||
});
|
||||
|
||||
it('should not throw an error if deleting old file fails', async () => {
|
||||
vi.spyOn(fs, 'rm').mockRejectedValue(new Error('File deletion failed'));
|
||||
|
||||
await expect(
|
||||
OAuthCredentialStorage.clearCredentials(),
|
||||
).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it('should throw an error if clearing from HybridTokenStorage fails', async () => {
|
||||
vi.spyOn(mockHybridTokenStorage, 'deleteCredentials').mockRejectedValue(
|
||||
new Error('Deletion error'),
|
||||
);
|
||||
|
||||
await expect(OAuthCredentialStorage.clearCredentials()).rejects.toThrow(
|
||||
'Failed to clear OAuth credentials',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
130
packages/core/src/code_assist/oauth-credential-storage.ts
Normal file
130
packages/core/src/code_assist/oauth-credential-storage.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { type Credentials } from 'google-auth-library';
|
||||
import { HybridTokenStorage } from '../mcp/token-storage/hybrid-token-storage.js';
|
||||
import { OAUTH_FILE } from '../config/storage.js';
|
||||
import type { OAuthCredentials } from '../mcp/token-storage/types.js';
|
||||
import * as path from 'node:path';
|
||||
import * as os from 'node:os';
|
||||
import { promises as fs } from 'node:fs';
|
||||
|
||||
const QWEN_DIR = '.qwen';
|
||||
const KEYCHAIN_SERVICE_NAME = 'qwen-code-oauth';
|
||||
const MAIN_ACCOUNT_KEY = 'main-account';
|
||||
|
||||
export class OAuthCredentialStorage {
|
||||
private static storage: HybridTokenStorage = new HybridTokenStorage(
|
||||
KEYCHAIN_SERVICE_NAME,
|
||||
);
|
||||
|
||||
/**
|
||||
* Load cached OAuth credentials
|
||||
*/
|
||||
static async loadCredentials(): Promise<Credentials | null> {
|
||||
try {
|
||||
const credentials = await this.storage.getCredentials(MAIN_ACCOUNT_KEY);
|
||||
|
||||
if (credentials?.token) {
|
||||
const { accessToken, refreshToken, expiresAt, tokenType, scope } =
|
||||
credentials.token;
|
||||
// Convert from OAuthCredentials format to Google Credentials format
|
||||
const googleCreds: Credentials = {
|
||||
access_token: accessToken,
|
||||
refresh_token: refreshToken || undefined,
|
||||
token_type: tokenType || undefined,
|
||||
scope: scope || undefined,
|
||||
};
|
||||
|
||||
if (expiresAt) {
|
||||
googleCreds.expiry_date = expiresAt;
|
||||
}
|
||||
|
||||
return googleCreds;
|
||||
}
|
||||
|
||||
// Fallback: Try to migrate from old file-based storage
|
||||
return await this.migrateFromFileStorage();
|
||||
} catch (error: unknown) {
|
||||
console.error(error);
|
||||
throw new Error('Failed to load OAuth credentials');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save OAuth credentials
|
||||
*/
|
||||
static async saveCredentials(credentials: Credentials): Promise<void> {
|
||||
if (!credentials.access_token) {
|
||||
throw new Error('Attempted to save credentials without an access token.');
|
||||
}
|
||||
|
||||
// Convert Google Credentials to OAuthCredentials format
|
||||
const mcpCredentials: OAuthCredentials = {
|
||||
serverName: MAIN_ACCOUNT_KEY,
|
||||
token: {
|
||||
accessToken: credentials.access_token,
|
||||
refreshToken: credentials.refresh_token || undefined,
|
||||
tokenType: credentials.token_type || 'Bearer',
|
||||
scope: credentials.scope || undefined,
|
||||
expiresAt: credentials.expiry_date || undefined,
|
||||
},
|
||||
updatedAt: Date.now(),
|
||||
};
|
||||
|
||||
await this.storage.setCredentials(mcpCredentials);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached OAuth credentials
|
||||
*/
|
||||
static async clearCredentials(): Promise<void> {
|
||||
try {
|
||||
await this.storage.deleteCredentials(MAIN_ACCOUNT_KEY);
|
||||
|
||||
// Also try to remove the old file if it exists
|
||||
const oldFilePath = path.join(os.homedir(), QWEN_DIR, OAUTH_FILE);
|
||||
await fs.rm(oldFilePath, { force: true }).catch(() => {});
|
||||
} catch (error: unknown) {
|
||||
console.error(error);
|
||||
throw new Error('Failed to clear OAuth credentials');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrate credentials from old file-based storage to keychain
|
||||
*/
|
||||
private static async migrateFromFileStorage(): Promise<Credentials | null> {
|
||||
const oldFilePath = path.join(os.homedir(), QWEN_DIR, OAUTH_FILE);
|
||||
|
||||
let credsJson: string;
|
||||
try {
|
||||
credsJson = await fs.readFile(oldFilePath, 'utf-8');
|
||||
} catch (error: unknown) {
|
||||
if (
|
||||
typeof error === 'object' &&
|
||||
error !== null &&
|
||||
'code' in error &&
|
||||
error.code === 'ENOENT'
|
||||
) {
|
||||
// File doesn't exist, so no migration.
|
||||
return null;
|
||||
}
|
||||
// Other read errors should propagate.
|
||||
throw error;
|
||||
}
|
||||
|
||||
const credentials = JSON.parse(credsJson) as Credentials;
|
||||
|
||||
// Save to new storage
|
||||
await this.saveCredentials(credentials);
|
||||
|
||||
// Remove old file after successful migration
|
||||
await fs.rm(oldFilePath, { force: true }).catch(() => {});
|
||||
|
||||
return credentials;
|
||||
}
|
||||
}
|
||||
1166
packages/core/src/code_assist/oauth2.test.ts
Normal file
1166
packages/core/src/code_assist/oauth2.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
563
packages/core/src/code_assist/oauth2.ts
Normal file
563
packages/core/src/code_assist/oauth2.ts
Normal file
@@ -0,0 +1,563 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Credentials } from 'google-auth-library';
|
||||
import {
|
||||
CodeChallengeMethod,
|
||||
Compute,
|
||||
OAuth2Client,
|
||||
} from 'google-auth-library';
|
||||
import crypto from 'node:crypto';
|
||||
import { promises as fs } from 'node:fs';
|
||||
import * as http from 'node:http';
|
||||
import * as net from 'node:net';
|
||||
import path from 'node:path';
|
||||
import readline from 'node:readline';
|
||||
import url from 'node:url';
|
||||
import open from 'open';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { Storage } from '../config/storage.js';
|
||||
import { AuthType } from '../core/contentGenerator.js';
|
||||
import { FatalAuthenticationError, getErrorMessage } from '../utils/errors.js';
|
||||
import { UserAccountManager } from '../utils/userAccountManager.js';
|
||||
import { OAuthCredentialStorage } from './oauth-credential-storage.js';
|
||||
import { FORCE_ENCRYPTED_FILE_ENV_VAR } from '../mcp/token-storage/index.js';
|
||||
|
||||
const userAccountManager = new UserAccountManager();
|
||||
|
||||
// OAuth Client ID used to initiate OAuth2Client class.
|
||||
const OAUTH_CLIENT_ID =
|
||||
'681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com';
|
||||
|
||||
// OAuth Secret value used to initiate OAuth2Client class.
|
||||
// Note: It's ok to save this in git because this is an installed application
|
||||
// as described here: https://developers.google.com/identity/protocols/oauth2#installed
|
||||
// "The process results in a client ID and, in some cases, a client secret,
|
||||
// which you embed in the source code of your application. (In this context,
|
||||
// the client secret is obviously not treated as a secret.)"
|
||||
const OAUTH_CLIENT_SECRET = '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';
|
||||
|
||||
// OAuth Scopes for Cloud Code authorization.
|
||||
const OAUTH_SCOPE = [
|
||||
'https://www.googleapis.com/auth/cloud-platform',
|
||||
'https://www.googleapis.com/auth/userinfo.email',
|
||||
'https://www.googleapis.com/auth/userinfo.profile',
|
||||
];
|
||||
|
||||
const HTTP_REDIRECT = 301;
|
||||
const SIGN_IN_SUCCESS_URL =
|
||||
'https://developers.google.com/gemini-code-assist/auth_success_gemini';
|
||||
const SIGN_IN_FAILURE_URL =
|
||||
'https://developers.google.com/gemini-code-assist/auth_failure_gemini';
|
||||
|
||||
/**
|
||||
* An Authentication URL for updating the credentials of a Oauth2Client
|
||||
* as well as a promise that will resolve when the credentials have
|
||||
* been refreshed (or which throws error when refreshing credentials failed).
|
||||
*/
|
||||
export interface OauthWebLogin {
|
||||
authUrl: string;
|
||||
loginCompletePromise: Promise<void>;
|
||||
}
|
||||
|
||||
const oauthClientPromises = new Map<AuthType, Promise<OAuth2Client>>();
|
||||
|
||||
function getUseEncryptedStorageFlag() {
|
||||
return process.env[FORCE_ENCRYPTED_FILE_ENV_VAR] === 'true';
|
||||
}
|
||||
|
||||
async function initOauthClient(
|
||||
authType: AuthType,
|
||||
config: Config,
|
||||
): Promise<OAuth2Client> {
|
||||
const client = new OAuth2Client({
|
||||
clientId: OAUTH_CLIENT_ID,
|
||||
clientSecret: OAUTH_CLIENT_SECRET,
|
||||
transporterOptions: {
|
||||
proxy: config.getProxy(),
|
||||
},
|
||||
});
|
||||
const useEncryptedStorage = getUseEncryptedStorageFlag();
|
||||
|
||||
if (
|
||||
process.env['GOOGLE_GENAI_USE_GCA'] &&
|
||||
process.env['GOOGLE_CLOUD_ACCESS_TOKEN']
|
||||
) {
|
||||
client.setCredentials({
|
||||
access_token: process.env['GOOGLE_CLOUD_ACCESS_TOKEN'],
|
||||
});
|
||||
await fetchAndCacheUserInfo(client);
|
||||
return client;
|
||||
}
|
||||
|
||||
client.on('tokens', async (tokens: Credentials) => {
|
||||
if (useEncryptedStorage) {
|
||||
await OAuthCredentialStorage.saveCredentials(tokens);
|
||||
} else {
|
||||
await cacheCredentials(tokens);
|
||||
}
|
||||
});
|
||||
|
||||
// If there are cached creds on disk, they always take precedence
|
||||
if (await loadCachedCredentials(client)) {
|
||||
// Found valid cached credentials.
|
||||
// Check if we need to retrieve Google Account ID or Email
|
||||
if (!userAccountManager.getCachedGoogleAccount()) {
|
||||
try {
|
||||
await fetchAndCacheUserInfo(client);
|
||||
} catch (error) {
|
||||
// Non-fatal, continue with existing auth.
|
||||
console.warn('Failed to fetch user info:', getErrorMessage(error));
|
||||
}
|
||||
}
|
||||
console.log('Loaded cached credentials.');
|
||||
return client;
|
||||
}
|
||||
|
||||
// In Google Cloud Shell, we can use Application Default Credentials (ADC)
|
||||
// provided via its metadata server to authenticate non-interactively using
|
||||
// the identity of the user logged into Cloud Shell.
|
||||
if (authType === AuthType.CLOUD_SHELL) {
|
||||
try {
|
||||
console.log("Attempting to authenticate via Cloud Shell VM's ADC.");
|
||||
const computeClient = new Compute({
|
||||
// We can leave this empty, since the metadata server will provide
|
||||
// the service account email.
|
||||
});
|
||||
await computeClient.getAccessToken();
|
||||
console.log('Authentication successful.');
|
||||
|
||||
// Do not cache creds in this case; note that Compute client will handle its own refresh
|
||||
return computeClient;
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
`Could not authenticate using Cloud Shell credentials. Please select a different authentication method or ensure you are in a properly configured environment. Error: ${getErrorMessage(
|
||||
e,
|
||||
)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (config.isBrowserLaunchSuppressed()) {
|
||||
let success = false;
|
||||
const maxRetries = 2;
|
||||
for (let i = 0; !success && i < maxRetries; i++) {
|
||||
success = await authWithUserCode(client);
|
||||
if (!success) {
|
||||
console.error(
|
||||
'\nFailed to authenticate with user code.',
|
||||
i === maxRetries - 1 ? '' : 'Retrying...\n',
|
||||
);
|
||||
}
|
||||
}
|
||||
if (!success) {
|
||||
throw new FatalAuthenticationError(
|
||||
'Failed to authenticate with user code.',
|
||||
);
|
||||
}
|
||||
} else {
|
||||
const webLogin = await authWithWeb(client);
|
||||
|
||||
console.log(
|
||||
`\n\nCode Assist login required.\n` +
|
||||
`Attempting to open authentication page in your browser.\n` +
|
||||
`Otherwise navigate to:\n\n${webLogin.authUrl}\n\n`,
|
||||
);
|
||||
try {
|
||||
// Attempt to open the authentication URL in the default browser.
|
||||
// We do not use the `wait` option here because the main script's execution
|
||||
// is already paused by `loginCompletePromise`, which awaits the server callback.
|
||||
const childProcess = await open(webLogin.authUrl);
|
||||
|
||||
// IMPORTANT: Attach an error handler to the returned child process.
|
||||
// Without this, if `open` fails to spawn a process (e.g., `xdg-open` is not found
|
||||
// in a minimal Docker container), it will emit an unhandled 'error' event,
|
||||
// causing the entire Node.js process to crash.
|
||||
childProcess.on('error', (error) => {
|
||||
console.error(
|
||||
'Failed to open browser automatically. Please try running again with NO_BROWSER=true set.',
|
||||
);
|
||||
console.error('Browser error details:', getErrorMessage(error));
|
||||
});
|
||||
} catch (err) {
|
||||
console.error(
|
||||
'An unexpected error occurred while trying to open the browser:',
|
||||
getErrorMessage(err),
|
||||
'\nThis might be due to browser compatibility issues or system configuration.',
|
||||
'\nPlease try running again with NO_BROWSER=true set for manual authentication.',
|
||||
);
|
||||
throw new FatalAuthenticationError(
|
||||
`Failed to open browser: ${getErrorMessage(err)}`,
|
||||
);
|
||||
}
|
||||
console.log('Waiting for authentication...');
|
||||
|
||||
// Add timeout to prevent infinite waiting when browser tab gets stuck
|
||||
const authTimeout = 5 * 60 * 1000; // 5 minutes timeout
|
||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||
setTimeout(() => {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'Authentication timed out after 5 minutes. The browser tab may have gotten stuck in a loading state. ' +
|
||||
'Please try again or use NO_BROWSER=true for manual authentication.',
|
||||
),
|
||||
);
|
||||
}, authTimeout);
|
||||
});
|
||||
|
||||
await Promise.race([webLogin.loginCompletePromise, timeoutPromise]);
|
||||
}
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
export async function getOauthClient(
|
||||
authType: AuthType,
|
||||
config: Config,
|
||||
): Promise<OAuth2Client> {
|
||||
if (!oauthClientPromises.has(authType)) {
|
||||
oauthClientPromises.set(authType, initOauthClient(authType, config));
|
||||
}
|
||||
return oauthClientPromises.get(authType)!;
|
||||
}
|
||||
|
||||
async function authWithUserCode(client: OAuth2Client): Promise<boolean> {
|
||||
const redirectUri = 'https://codeassist.google.com/authcode';
|
||||
const codeVerifier = await client.generateCodeVerifierAsync();
|
||||
const state = crypto.randomBytes(32).toString('hex');
|
||||
const authUrl: string = client.generateAuthUrl({
|
||||
redirect_uri: redirectUri,
|
||||
access_type: 'offline',
|
||||
scope: OAUTH_SCOPE,
|
||||
code_challenge_method: CodeChallengeMethod.S256,
|
||||
code_challenge: codeVerifier.codeChallenge,
|
||||
state,
|
||||
});
|
||||
console.log('Please visit the following URL to authorize the application:');
|
||||
console.log('');
|
||||
console.log(authUrl);
|
||||
console.log('');
|
||||
|
||||
const code = await new Promise<string>((resolve) => {
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
});
|
||||
rl.question('Enter the authorization code: ', (code) => {
|
||||
rl.close();
|
||||
resolve(code.trim());
|
||||
});
|
||||
});
|
||||
|
||||
if (!code) {
|
||||
console.error('Authorization code is required.');
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const { tokens } = await client.getToken({
|
||||
code,
|
||||
codeVerifier: codeVerifier.codeVerifier,
|
||||
redirect_uri: redirectUri,
|
||||
});
|
||||
client.setCredentials(tokens);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
'Failed to authenticate with authorization code:',
|
||||
getErrorMessage(error),
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
async function authWithWeb(client: OAuth2Client): Promise<OauthWebLogin> {
|
||||
const port = await getAvailablePort();
|
||||
// The hostname used for the HTTP server binding (e.g., '0.0.0.0' in Docker).
|
||||
const host = process.env['OAUTH_CALLBACK_HOST'] || 'localhost';
|
||||
// The `redirectUri` sent to Google's authorization server MUST use a loopback IP literal
|
||||
// (i.e., 'localhost' or '127.0.0.1'). This is a strict security policy for credentials of
|
||||
// type 'Desktop app' or 'Web application' (when using loopback flow) to mitigate
|
||||
// authorization code interception attacks.
|
||||
const redirectUri = `http://localhost:${port}/oauth2callback`;
|
||||
const state = crypto.randomBytes(32).toString('hex');
|
||||
const authUrl = client.generateAuthUrl({
|
||||
redirect_uri: redirectUri,
|
||||
access_type: 'offline',
|
||||
scope: OAUTH_SCOPE,
|
||||
state,
|
||||
});
|
||||
|
||||
const loginCompletePromise = new Promise<void>((resolve, reject) => {
|
||||
const server = http.createServer(async (req, res) => {
|
||||
try {
|
||||
if (req.url!.indexOf('/oauth2callback') === -1) {
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
||||
res.end();
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'OAuth callback not received. Unexpected request: ' + req.url,
|
||||
),
|
||||
);
|
||||
}
|
||||
// acquire the code from the querystring, and close the web server.
|
||||
const qs = new url.URL(req.url!, 'http://localhost:3000').searchParams;
|
||||
if (qs.get('error')) {
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
||||
res.end();
|
||||
|
||||
const errorCode = qs.get('error');
|
||||
const errorDescription =
|
||||
qs.get('error_description') || 'No additional details provided';
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`Google OAuth error: ${errorCode}. ${errorDescription}`,
|
||||
),
|
||||
);
|
||||
} else if (qs.get('state') !== state) {
|
||||
res.end('State mismatch. Possible CSRF attack');
|
||||
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'OAuth state mismatch. Possible CSRF attack or browser session issue.',
|
||||
),
|
||||
);
|
||||
} else if (qs.get('code')) {
|
||||
try {
|
||||
const { tokens } = await client.getToken({
|
||||
code: qs.get('code')!,
|
||||
redirect_uri: redirectUri,
|
||||
});
|
||||
client.setCredentials(tokens);
|
||||
|
||||
// Retrieve and cache Google Account ID during authentication
|
||||
try {
|
||||
await fetchAndCacheUserInfo(client);
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
'Failed to retrieve Google Account ID during authentication:',
|
||||
getErrorMessage(error),
|
||||
);
|
||||
// Don't fail the auth flow if Google Account ID retrieval fails
|
||||
}
|
||||
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_SUCCESS_URL });
|
||||
res.end();
|
||||
resolve();
|
||||
} catch (error) {
|
||||
res.writeHead(HTTP_REDIRECT, { Location: SIGN_IN_FAILURE_URL });
|
||||
res.end();
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`Failed to exchange authorization code for tokens: ${getErrorMessage(error)}`,
|
||||
),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
'No authorization code received from Google OAuth. Please try authenticating again.',
|
||||
),
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
// Provide more specific error message for unexpected errors during OAuth flow
|
||||
if (e instanceof FatalAuthenticationError) {
|
||||
reject(e);
|
||||
} else {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`Unexpected error during OAuth authentication: ${getErrorMessage(e)}`,
|
||||
),
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
server.listen(port, host, () => {
|
||||
// Server started successfully
|
||||
});
|
||||
|
||||
server.on('error', (err) => {
|
||||
reject(
|
||||
new FatalAuthenticationError(
|
||||
`OAuth callback server error: ${getErrorMessage(err)}`,
|
||||
),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
return {
|
||||
authUrl,
|
||||
loginCompletePromise,
|
||||
};
|
||||
}
|
||||
|
||||
export function getAvailablePort(): Promise<number> {
|
||||
return new Promise((resolve, reject) => {
|
||||
let port = 0;
|
||||
try {
|
||||
const portStr = process.env['OAUTH_CALLBACK_PORT'];
|
||||
if (portStr) {
|
||||
port = parseInt(portStr, 10);
|
||||
if (isNaN(port) || port <= 0 || port > 65535) {
|
||||
return reject(
|
||||
new Error(`Invalid value for OAUTH_CALLBACK_PORT: "${portStr}"`),
|
||||
);
|
||||
}
|
||||
return resolve(port);
|
||||
}
|
||||
const server = net.createServer();
|
||||
server.listen(0, () => {
|
||||
const address = server.address()! as net.AddressInfo;
|
||||
port = address.port;
|
||||
});
|
||||
server.on('listening', () => {
|
||||
server.close();
|
||||
server.unref();
|
||||
});
|
||||
server.on('error', (e) => reject(e));
|
||||
server.on('close', () => resolve(port));
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function loadCachedCredentials(client: OAuth2Client): Promise<boolean> {
|
||||
const useEncryptedStorage = getUseEncryptedStorageFlag();
|
||||
if (useEncryptedStorage) {
|
||||
const credentials = await OAuthCredentialStorage.loadCredentials();
|
||||
if (credentials) {
|
||||
client.setCredentials(credentials);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const pathsToTry = [
|
||||
Storage.getOAuthCredsPath(),
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'],
|
||||
].filter((p): p is string => !!p);
|
||||
|
||||
for (const keyFile of pathsToTry) {
|
||||
try {
|
||||
const creds = await fs.readFile(keyFile, 'utf-8');
|
||||
client.setCredentials(JSON.parse(creds));
|
||||
|
||||
// This will verify locally that the credentials look good.
|
||||
const { token } = await client.getAccessToken();
|
||||
if (!token) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// This will check with the server to see if it hasn't been revoked.
|
||||
await client.getTokenInfo(token);
|
||||
|
||||
return true;
|
||||
} catch (error) {
|
||||
// Log specific error for debugging, but continue trying other paths
|
||||
console.debug(
|
||||
`Failed to load credentials from ${keyFile}:`,
|
||||
getErrorMessage(error),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async function cacheCredentials(credentials: Credentials) {
|
||||
const filePath = Storage.getOAuthCredsPath();
|
||||
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
||||
|
||||
const credString = JSON.stringify(credentials, null, 2);
|
||||
await fs.writeFile(filePath, credString, { mode: 0o600 });
|
||||
try {
|
||||
await fs.chmod(filePath, 0o600);
|
||||
} catch {
|
||||
/* empty */
|
||||
}
|
||||
}
|
||||
|
||||
export function clearOauthClientCache() {
|
||||
oauthClientPromises.clear();
|
||||
}
|
||||
|
||||
export async function clearCachedCredentialFile() {
|
||||
try {
|
||||
const useEncryptedStorage = getUseEncryptedStorageFlag();
|
||||
if (useEncryptedStorage) {
|
||||
await OAuthCredentialStorage.clearCredentials();
|
||||
} else {
|
||||
await fs.rm(Storage.getOAuthCredsPath(), { force: true });
|
||||
}
|
||||
// Clear the Google Account ID cache when credentials are cleared
|
||||
await userAccountManager.clearCachedGoogleAccount();
|
||||
// Clear the in-memory OAuth client cache to force re-authentication
|
||||
clearOauthClientCache();
|
||||
|
||||
/**
|
||||
* Also clear Qwen SharedTokenManager cache and credentials file to prevent stale credentials
|
||||
* when switching between auth types
|
||||
* TODO: We do not depend on code_assist, we'll have to build an independent auth-cleaning procedure.
|
||||
*/
|
||||
try {
|
||||
const { SharedTokenManager } = await import(
|
||||
'../qwen/sharedTokenManager.js'
|
||||
);
|
||||
const { clearQwenCredentials } = await import('../qwen/qwenOAuth2.js');
|
||||
|
||||
const sharedManager = SharedTokenManager.getInstance();
|
||||
sharedManager.clearCache();
|
||||
|
||||
await clearQwenCredentials();
|
||||
} catch (qwenError) {
|
||||
console.debug('Could not clear Qwen credentials:', qwenError);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to clear cached credentials:', e);
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchAndCacheUserInfo(client: OAuth2Client): Promise<void> {
|
||||
try {
|
||||
const { token } = await client.getAccessToken();
|
||||
if (!token) {
|
||||
return;
|
||||
}
|
||||
|
||||
const response = await fetch(
|
||||
'https://www.googleapis.com/oauth2/v2/userinfo',
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
console.error(
|
||||
'Failed to fetch user info:',
|
||||
response.status,
|
||||
response.statusText,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const userInfo = await response.json();
|
||||
await userAccountManager.cacheGoogleAccount(userInfo.email);
|
||||
} catch (error) {
|
||||
console.error('Error retrieving user info:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to ensure test isolation
|
||||
export function resetOauthClientForTesting() {
|
||||
oauthClientPromises.clear();
|
||||
}
|
||||
255
packages/core/src/code_assist/server.test.ts
Normal file
255
packages/core/src/code_assist/server.test.ts
Normal file
@@ -0,0 +1,255 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, it, expect, vi } from 'vitest';
|
||||
import { CodeAssistServer } from './server.js';
|
||||
import { OAuth2Client } from 'google-auth-library';
|
||||
import { UserTierId } from './types.js';
|
||||
|
||||
vi.mock('google-auth-library');
|
||||
|
||||
describe('CodeAssistServer', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
it('should be able to be constructed', () => {
|
||||
const auth = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
auth,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
expect(server).toBeInstanceOf(CodeAssistServer);
|
||||
});
|
||||
|
||||
it('should call the generateContent endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'response' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
safetyRatings: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.generateContent(
|
||||
{
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
},
|
||||
'user-prompt-id',
|
||||
);
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'generateContent',
|
||||
expect.any(Object),
|
||||
undefined,
|
||||
);
|
||||
expect(response.candidates?.[0]?.content?.parts?.[0]?.text).toBe(
|
||||
'response',
|
||||
);
|
||||
});
|
||||
|
||||
it('should call the generateContentStream endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = (async function* () {
|
||||
yield {
|
||||
response: {
|
||||
candidates: [
|
||||
{
|
||||
index: 0,
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ text: 'response' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
safetyRatings: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
})();
|
||||
vi.spyOn(server, 'requestStreamingPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const stream = await server.generateContentStream(
|
||||
{
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
},
|
||||
'user-prompt-id',
|
||||
);
|
||||
|
||||
for await (const res of stream) {
|
||||
expect(server.requestStreamingPost).toHaveBeenCalledWith(
|
||||
'streamGenerateContent',
|
||||
expect.any(Object),
|
||||
undefined,
|
||||
);
|
||||
expect(res.candidates?.[0]?.content?.parts?.[0]?.text).toBe('response');
|
||||
}
|
||||
});
|
||||
|
||||
it('should call the onboardUser endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
name: 'operations/123',
|
||||
done: true,
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.onboardUser({
|
||||
tierId: 'test-tier',
|
||||
cloudaicompanionProject: 'test-project',
|
||||
metadata: {},
|
||||
});
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'onboardUser',
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(response.name).toBe('operations/123');
|
||||
});
|
||||
|
||||
it('should call the loadCodeAssist endpoint', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
currentTier: {
|
||||
id: UserTierId.FREE,
|
||||
name: 'Free',
|
||||
description: 'free tier',
|
||||
},
|
||||
allowedTiers: [],
|
||||
ineligibleTiers: [],
|
||||
cloudaicompanionProject: 'projects/test',
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.loadCodeAssist({
|
||||
metadata: {},
|
||||
});
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'loadCodeAssist',
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(response).toEqual(mockResponse);
|
||||
});
|
||||
|
||||
it('should return 0 for countTokens', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockResponse = {
|
||||
totalTokens: 100,
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse);
|
||||
|
||||
const response = await server.countTokens({
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
});
|
||||
expect(response.totalTokens).toBe(100);
|
||||
});
|
||||
|
||||
it('should throw an error for embedContent', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
await expect(
|
||||
server.embedContent({
|
||||
model: 'test-model',
|
||||
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
|
||||
}),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle VPC-SC errors when calling loadCodeAssist', async () => {
|
||||
const client = new OAuth2Client();
|
||||
const server = new CodeAssistServer(
|
||||
client,
|
||||
'test-project',
|
||||
{},
|
||||
'test-session',
|
||||
UserTierId.FREE,
|
||||
);
|
||||
const mockVpcScError = {
|
||||
response: {
|
||||
data: {
|
||||
error: {
|
||||
details: [
|
||||
{
|
||||
reason: 'SECURITY_POLICY_VIOLATED',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
vi.spyOn(server, 'requestPost').mockRejectedValue(mockVpcScError);
|
||||
|
||||
const response = await server.loadCodeAssist({
|
||||
metadata: {},
|
||||
});
|
||||
|
||||
expect(server.requestPost).toHaveBeenCalledWith(
|
||||
'loadCodeAssist',
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(response).toEqual({
|
||||
currentTier: { id: UserTierId.STANDARD },
|
||||
});
|
||||
});
|
||||
});
|
||||
253
packages/core/src/code_assist/server.ts
Normal file
253
packages/core/src/code_assist/server.ts
Normal file
@@ -0,0 +1,253 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { OAuth2Client } from 'google-auth-library';
|
||||
import type {
|
||||
CodeAssistGlobalUserSettingResponse,
|
||||
GoogleRpcResponse,
|
||||
LoadCodeAssistRequest,
|
||||
LoadCodeAssistResponse,
|
||||
LongRunningOperationResponse,
|
||||
OnboardUserRequest,
|
||||
SetCodeAssistGlobalUserSettingRequest,
|
||||
} from './types.js';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
} from '@google/genai';
|
||||
import * as readline from 'node:readline';
|
||||
import type { ContentGenerator } from '../core/contentGenerator.js';
|
||||
import { UserTierId } from './types.js';
|
||||
import type {
|
||||
CaCountTokenResponse,
|
||||
CaGenerateContentResponse,
|
||||
} from './converter.js';
|
||||
import {
|
||||
fromCountTokenResponse,
|
||||
fromGenerateContentResponse,
|
||||
toCountTokenRequest,
|
||||
toGenerateContentRequest,
|
||||
} from './converter.js';
|
||||
|
||||
/** HTTP options to be used in each of the requests. */
|
||||
export interface HttpOptions {
|
||||
/** Additional HTTP headers to be sent with the request. */
|
||||
headers?: Record<string, string>;
|
||||
}
|
||||
|
||||
export const CODE_ASSIST_ENDPOINT = 'https://localhost:0'; // Disable Google Code Assist API Request
|
||||
export const CODE_ASSIST_API_VERSION = 'v1internal';
|
||||
|
||||
export class CodeAssistServer implements ContentGenerator {
|
||||
constructor(
|
||||
readonly client: OAuth2Client,
|
||||
readonly projectId?: string,
|
||||
readonly httpOptions: HttpOptions = {},
|
||||
readonly sessionId?: string,
|
||||
readonly userTier?: UserTierId,
|
||||
) {}
|
||||
|
||||
async generateContentStream(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const resps = await this.requestStreamingPost<CaGenerateContentResponse>(
|
||||
'streamGenerateContent',
|
||||
toGenerateContentRequest(
|
||||
req,
|
||||
userPromptId,
|
||||
this.projectId,
|
||||
this.sessionId,
|
||||
),
|
||||
req.config?.abortSignal,
|
||||
);
|
||||
return (async function* (): AsyncGenerator<GenerateContentResponse> {
|
||||
for await (const resp of resps) {
|
||||
yield fromGenerateContentResponse(resp);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const resp = await this.requestPost<CaGenerateContentResponse>(
|
||||
'generateContent',
|
||||
toGenerateContentRequest(
|
||||
req,
|
||||
userPromptId,
|
||||
this.projectId,
|
||||
this.sessionId,
|
||||
),
|
||||
req.config?.abortSignal,
|
||||
);
|
||||
return fromGenerateContentResponse(resp);
|
||||
}
|
||||
|
||||
async onboardUser(
|
||||
req: OnboardUserRequest,
|
||||
): Promise<LongRunningOperationResponse> {
|
||||
return await this.requestPost<LongRunningOperationResponse>(
|
||||
'onboardUser',
|
||||
req,
|
||||
);
|
||||
}
|
||||
|
||||
async loadCodeAssist(
|
||||
req: LoadCodeAssistRequest,
|
||||
): Promise<LoadCodeAssistResponse> {
|
||||
try {
|
||||
return await this.requestPost<LoadCodeAssistResponse>(
|
||||
'loadCodeAssist',
|
||||
req,
|
||||
);
|
||||
} catch (e) {
|
||||
if (isVpcScAffectedUser(e)) {
|
||||
return {
|
||||
currentTier: { id: UserTierId.STANDARD },
|
||||
};
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async getCodeAssistGlobalUserSetting(): Promise<CodeAssistGlobalUserSettingResponse> {
|
||||
return await this.requestGet<CodeAssistGlobalUserSettingResponse>(
|
||||
'getCodeAssistGlobalUserSetting',
|
||||
);
|
||||
}
|
||||
|
||||
async setCodeAssistGlobalUserSetting(
|
||||
req: SetCodeAssistGlobalUserSettingRequest,
|
||||
): Promise<CodeAssistGlobalUserSettingResponse> {
|
||||
return await this.requestPost<CodeAssistGlobalUserSettingResponse>(
|
||||
'setCodeAssistGlobalUserSetting',
|
||||
req,
|
||||
);
|
||||
}
|
||||
|
||||
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
|
||||
const resp = await this.requestPost<CaCountTokenResponse>(
|
||||
'countTokens',
|
||||
toCountTokenRequest(req),
|
||||
);
|
||||
return fromCountTokenResponse(resp);
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
_req: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
throw Error();
|
||||
}
|
||||
|
||||
async requestPost<T>(
|
||||
method: string,
|
||||
req: object,
|
||||
signal?: AbortSignal,
|
||||
): Promise<T> {
|
||||
const res = await this.client.request({
|
||||
url: this.getMethodUrl(method),
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...this.httpOptions.headers,
|
||||
},
|
||||
responseType: 'json',
|
||||
body: JSON.stringify(req),
|
||||
signal,
|
||||
});
|
||||
return res.data as T;
|
||||
}
|
||||
|
||||
async requestGet<T>(method: string, signal?: AbortSignal): Promise<T> {
|
||||
const res = await this.client.request({
|
||||
url: this.getMethodUrl(method),
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...this.httpOptions.headers,
|
||||
},
|
||||
responseType: 'json',
|
||||
signal,
|
||||
});
|
||||
return res.data as T;
|
||||
}
|
||||
|
||||
async requestStreamingPost<T>(
|
||||
method: string,
|
||||
req: object,
|
||||
signal?: AbortSignal,
|
||||
): Promise<AsyncGenerator<T>> {
|
||||
const res = await this.client.request({
|
||||
url: this.getMethodUrl(method),
|
||||
method: 'POST',
|
||||
params: {
|
||||
alt: 'sse',
|
||||
},
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...this.httpOptions.headers,
|
||||
},
|
||||
responseType: 'stream',
|
||||
body: JSON.stringify(req),
|
||||
signal,
|
||||
});
|
||||
|
||||
return (async function* (): AsyncGenerator<T> {
|
||||
const rl = readline.createInterface({
|
||||
input: res.data as NodeJS.ReadableStream,
|
||||
crlfDelay: Infinity, // Recognizes '\r\n' and '\n' as line breaks
|
||||
});
|
||||
|
||||
let bufferedLines: string[] = [];
|
||||
for await (const line of rl) {
|
||||
// blank lines are used to separate JSON objects in the stream
|
||||
if (line === '') {
|
||||
if (bufferedLines.length === 0) {
|
||||
continue; // no data to yield
|
||||
}
|
||||
yield JSON.parse(bufferedLines.join('\n')) as T;
|
||||
bufferedLines = []; // Reset the buffer after yielding
|
||||
} else if (line.startsWith('data: ')) {
|
||||
bufferedLines.push(line.slice(6).trim());
|
||||
} else {
|
||||
throw new Error(`Unexpected line format in response: ${line}`);
|
||||
}
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
getMethodUrl(method: string): string {
|
||||
const endpoint =
|
||||
process.env['CODE_ASSIST_ENDPOINT'] ?? CODE_ASSIST_ENDPOINT;
|
||||
return `${endpoint}/${CODE_ASSIST_API_VERSION}:${method}`;
|
||||
}
|
||||
}
|
||||
|
||||
function isVpcScAffectedUser(error: unknown): boolean {
|
||||
if (error && typeof error === 'object' && 'response' in error) {
|
||||
const gaxiosError = error as {
|
||||
response?: {
|
||||
data?: unknown;
|
||||
};
|
||||
};
|
||||
const response = gaxiosError.response?.data as
|
||||
| GoogleRpcResponse
|
||||
| undefined;
|
||||
if (Array.isArray(response?.error?.details)) {
|
||||
return response.error.details.some(
|
||||
(detail) => detail.reason === 'SECURITY_POLICY_VIOLATED',
|
||||
);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
224
packages/core/src/code_assist/setup.test.ts
Normal file
224
packages/core/src/code_assist/setup.test.ts
Normal file
@@ -0,0 +1,224 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { setupUser, ProjectIdRequiredError } from './setup.js';
|
||||
import { CodeAssistServer } from '../code_assist/server.js';
|
||||
import type { OAuth2Client } from 'google-auth-library';
|
||||
import type { GeminiUserTier } from './types.js';
|
||||
import { UserTierId } from './types.js';
|
||||
|
||||
vi.mock('../code_assist/server.js');
|
||||
|
||||
const mockPaidTier: GeminiUserTier = {
|
||||
id: UserTierId.STANDARD,
|
||||
name: 'paid',
|
||||
description: 'Paid tier',
|
||||
isDefault: true,
|
||||
};
|
||||
|
||||
const mockFreeTier: GeminiUserTier = {
|
||||
id: UserTierId.FREE,
|
||||
name: 'free',
|
||||
description: 'Free tier',
|
||||
isDefault: true,
|
||||
};
|
||||
|
||||
describe('setupUser for existing user', () => {
|
||||
let mockLoad: ReturnType<typeof vi.fn>;
|
||||
let mockOnboardUser: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
mockLoad = vi.fn();
|
||||
mockOnboardUser = vi.fn().mockResolvedValue({
|
||||
done: true,
|
||||
response: {
|
||||
cloudaicompanionProject: {
|
||||
id: 'server-project',
|
||||
},
|
||||
},
|
||||
});
|
||||
vi.mocked(CodeAssistServer).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
loadCodeAssist: mockLoad,
|
||||
onboardUser: mockOnboardUser,
|
||||
}) as unknown as CodeAssistServer,
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should use GOOGLE_CLOUD_PROJECT when set and project from server is undefined', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
currentTier: mockPaidTier,
|
||||
});
|
||||
await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
'test-project',
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
});
|
||||
|
||||
it('should ignore GOOGLE_CLOUD_PROJECT when project from server is set', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
cloudaicompanionProject: 'server-project',
|
||||
currentTier: mockPaidTier,
|
||||
});
|
||||
const projectId = await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
'test-project',
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
expect(projectId).toEqual({
|
||||
projectId: 'server-project',
|
||||
userTier: 'standard-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
// And the server itself requires a project ID internally
|
||||
vi.mocked(CodeAssistServer).mockImplementation(() => {
|
||||
throw new ProjectIdRequiredError();
|
||||
});
|
||||
|
||||
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
|
||||
ProjectIdRequiredError,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('setupUser for new user', () => {
|
||||
let mockLoad: ReturnType<typeof vi.fn>;
|
||||
let mockOnboardUser: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
mockLoad = vi.fn();
|
||||
mockOnboardUser = vi.fn().mockResolvedValue({
|
||||
done: true,
|
||||
response: {
|
||||
cloudaicompanionProject: {
|
||||
id: 'server-project',
|
||||
},
|
||||
},
|
||||
});
|
||||
vi.mocked(CodeAssistServer).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
loadCodeAssist: mockLoad,
|
||||
onboardUser: mockOnboardUser,
|
||||
}) as unknown as CodeAssistServer,
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
it('should use GOOGLE_CLOUD_PROJECT when set and onboard a new paid user', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockPaidTier],
|
||||
});
|
||||
const userData = await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
'test-project',
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
expect(mockLoad).toHaveBeenCalled();
|
||||
expect(mockOnboardUser).toHaveBeenCalledWith({
|
||||
tierId: 'standard-tier',
|
||||
cloudaicompanionProject: 'test-project',
|
||||
metadata: {
|
||||
ideType: 'IDE_UNSPECIFIED',
|
||||
platform: 'PLATFORM_UNSPECIFIED',
|
||||
pluginType: 'GEMINI',
|
||||
duetProject: 'test-project',
|
||||
},
|
||||
});
|
||||
expect(userData).toEqual({
|
||||
projectId: 'server-project',
|
||||
userTier: 'standard-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should onboard a new free user when GOOGLE_CLOUD_PROJECT is not set', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockFreeTier],
|
||||
});
|
||||
const userData = await setupUser({} as OAuth2Client);
|
||||
expect(CodeAssistServer).toHaveBeenCalledWith(
|
||||
{},
|
||||
undefined,
|
||||
{},
|
||||
'',
|
||||
undefined,
|
||||
);
|
||||
expect(mockLoad).toHaveBeenCalled();
|
||||
expect(mockOnboardUser).toHaveBeenCalledWith({
|
||||
tierId: 'free-tier',
|
||||
cloudaicompanionProject: undefined,
|
||||
metadata: {
|
||||
ideType: 'IDE_UNSPECIFIED',
|
||||
platform: 'PLATFORM_UNSPECIFIED',
|
||||
pluginType: 'GEMINI',
|
||||
},
|
||||
});
|
||||
expect(userData).toEqual({
|
||||
projectId: 'server-project',
|
||||
userTier: 'free-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should use GOOGLE_CLOUD_PROJECT when onboard response has no project ID', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', 'test-project');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockPaidTier],
|
||||
});
|
||||
mockOnboardUser.mockResolvedValue({
|
||||
done: true,
|
||||
response: {
|
||||
cloudaicompanionProject: undefined,
|
||||
},
|
||||
});
|
||||
const userData = await setupUser({} as OAuth2Client);
|
||||
expect(userData).toEqual({
|
||||
projectId: 'test-project',
|
||||
userTier: 'standard-tier',
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw ProjectIdRequiredError when no project ID is available', async () => {
|
||||
vi.stubEnv('GOOGLE_CLOUD_PROJECT', '');
|
||||
mockLoad.mockResolvedValue({
|
||||
allowedTiers: [mockPaidTier],
|
||||
});
|
||||
mockOnboardUser.mockResolvedValue({
|
||||
done: true,
|
||||
response: {},
|
||||
});
|
||||
await expect(setupUser({} as OAuth2Client)).rejects.toThrow(
|
||||
ProjectIdRequiredError,
|
||||
);
|
||||
});
|
||||
});
|
||||
124
packages/core/src/code_assist/setup.ts
Normal file
124
packages/core/src/code_assist/setup.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
ClientMetadata,
|
||||
GeminiUserTier,
|
||||
LoadCodeAssistResponse,
|
||||
OnboardUserRequest,
|
||||
} from './types.js';
|
||||
import { UserTierId } from './types.js';
|
||||
import { CodeAssistServer } from './server.js';
|
||||
import type { OAuth2Client } from 'google-auth-library';
|
||||
|
||||
export class ProjectIdRequiredError extends Error {
|
||||
constructor() {
|
||||
super(
|
||||
'This account requires setting the GOOGLE_CLOUD_PROJECT env var. See https://goo.gle/gemini-cli-auth-docs#workspace-gca',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export interface UserData {
|
||||
projectId: string;
|
||||
userTier: UserTierId;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param projectId the user's project id, if any
|
||||
* @returns the user's actual project id
|
||||
*/
|
||||
export async function setupUser(client: OAuth2Client): Promise<UserData> {
|
||||
const projectId = process.env['GOOGLE_CLOUD_PROJECT'] || undefined;
|
||||
const caServer = new CodeAssistServer(client, projectId, {}, '', undefined);
|
||||
const coreClientMetadata: ClientMetadata = {
|
||||
ideType: 'IDE_UNSPECIFIED',
|
||||
platform: 'PLATFORM_UNSPECIFIED',
|
||||
pluginType: 'GEMINI',
|
||||
};
|
||||
|
||||
const loadRes = await caServer.loadCodeAssist({
|
||||
cloudaicompanionProject: projectId,
|
||||
metadata: {
|
||||
...coreClientMetadata,
|
||||
duetProject: projectId,
|
||||
},
|
||||
});
|
||||
|
||||
if (loadRes.currentTier) {
|
||||
if (!loadRes.cloudaicompanionProject) {
|
||||
if (projectId) {
|
||||
return {
|
||||
projectId,
|
||||
userTier: loadRes.currentTier.id,
|
||||
};
|
||||
}
|
||||
throw new ProjectIdRequiredError();
|
||||
}
|
||||
return {
|
||||
projectId: loadRes.cloudaicompanionProject,
|
||||
userTier: loadRes.currentTier.id,
|
||||
};
|
||||
}
|
||||
|
||||
const tier = getOnboardTier(loadRes);
|
||||
|
||||
let onboardReq: OnboardUserRequest;
|
||||
if (tier.id === UserTierId.FREE) {
|
||||
// The free tier uses a managed google cloud project. Setting a project in the `onboardUser` request causes a `Precondition Failed` error.
|
||||
onboardReq = {
|
||||
tierId: tier.id,
|
||||
cloudaicompanionProject: undefined,
|
||||
metadata: coreClientMetadata,
|
||||
};
|
||||
} else {
|
||||
onboardReq = {
|
||||
tierId: tier.id,
|
||||
cloudaicompanionProject: projectId,
|
||||
metadata: {
|
||||
...coreClientMetadata,
|
||||
duetProject: projectId,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Poll onboardUser until long running operation is complete.
|
||||
let lroRes = await caServer.onboardUser(onboardReq);
|
||||
while (!lroRes.done) {
|
||||
await new Promise((f) => setTimeout(f, 5000));
|
||||
lroRes = await caServer.onboardUser(onboardReq);
|
||||
}
|
||||
|
||||
if (!lroRes.response?.cloudaicompanionProject?.id) {
|
||||
if (projectId) {
|
||||
return {
|
||||
projectId,
|
||||
userTier: tier.id,
|
||||
};
|
||||
}
|
||||
throw new ProjectIdRequiredError();
|
||||
}
|
||||
|
||||
return {
|
||||
projectId: lroRes.response.cloudaicompanionProject.id,
|
||||
userTier: tier.id,
|
||||
};
|
||||
}
|
||||
|
||||
function getOnboardTier(res: LoadCodeAssistResponse): GeminiUserTier {
|
||||
for (const tier of res.allowedTiers || []) {
|
||||
if (tier.isDefault) {
|
||||
return tier;
|
||||
}
|
||||
}
|
||||
return {
|
||||
name: '',
|
||||
description: '',
|
||||
id: UserTierId.LEGACY,
|
||||
userDefinedCloudaicompanionProject: true,
|
||||
};
|
||||
}
|
||||
201
packages/core/src/code_assist/types.ts
Normal file
201
packages/core/src/code_assist/types.ts
Normal file
@@ -0,0 +1,201 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export interface ClientMetadata {
|
||||
ideType?: ClientMetadataIdeType;
|
||||
ideVersion?: string;
|
||||
pluginVersion?: string;
|
||||
platform?: ClientMetadataPlatform;
|
||||
updateChannel?: string;
|
||||
duetProject?: string;
|
||||
pluginType?: ClientMetadataPluginType;
|
||||
ideName?: string;
|
||||
}
|
||||
|
||||
export type ClientMetadataIdeType =
|
||||
| 'IDE_UNSPECIFIED'
|
||||
| 'VSCODE'
|
||||
| 'INTELLIJ'
|
||||
| 'VSCODE_CLOUD_WORKSTATION'
|
||||
| 'INTELLIJ_CLOUD_WORKSTATION'
|
||||
| 'CLOUD_SHELL';
|
||||
export type ClientMetadataPlatform =
|
||||
| 'PLATFORM_UNSPECIFIED'
|
||||
| 'DARWIN_AMD64'
|
||||
| 'DARWIN_ARM64'
|
||||
| 'LINUX_AMD64'
|
||||
| 'LINUX_ARM64'
|
||||
| 'WINDOWS_AMD64';
|
||||
export type ClientMetadataPluginType =
|
||||
| 'PLUGIN_UNSPECIFIED'
|
||||
| 'CLOUD_CODE'
|
||||
| 'GEMINI'
|
||||
| 'AIPLUGIN_INTELLIJ'
|
||||
| 'AIPLUGIN_STUDIO';
|
||||
|
||||
export interface LoadCodeAssistRequest {
|
||||
cloudaicompanionProject?: string;
|
||||
metadata: ClientMetadata;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents LoadCodeAssistResponse proto json field
|
||||
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=224
|
||||
*/
|
||||
export interface LoadCodeAssistResponse {
|
||||
currentTier?: GeminiUserTier | null;
|
||||
allowedTiers?: GeminiUserTier[] | null;
|
||||
ineligibleTiers?: IneligibleTier[] | null;
|
||||
cloudaicompanionProject?: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* GeminiUserTier reflects the structure received from the CodeAssist when calling LoadCodeAssist.
|
||||
*/
|
||||
export interface GeminiUserTier {
|
||||
id: UserTierId;
|
||||
name?: string;
|
||||
description?: string;
|
||||
// This value is used to declare whether a given tier requires the user to configure the project setting on the IDE settings or not.
|
||||
userDefinedCloudaicompanionProject?: boolean | null;
|
||||
isDefault?: boolean;
|
||||
privacyNotice?: PrivacyNotice;
|
||||
hasAcceptedTos?: boolean;
|
||||
hasOnboardedPreviously?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Includes information specifying the reasons for a user's ineligibility for a specific tier.
|
||||
* @param reasonCode mnemonic code representing the reason for in-eligibility.
|
||||
* @param reasonMessage message to display to the user.
|
||||
* @param tierId id of the tier.
|
||||
* @param tierName name of the tier.
|
||||
*/
|
||||
export interface IneligibleTier {
|
||||
reasonCode: IneligibleTierReasonCode;
|
||||
reasonMessage: string;
|
||||
tierId: UserTierId;
|
||||
tierName: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* List of predefined reason codes when a tier is blocked from a specific tier.
|
||||
* https://source.corp.google.com/piper///depot/google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=378
|
||||
*/
|
||||
export enum IneligibleTierReasonCode {
|
||||
// go/keep-sorted start
|
||||
DASHER_USER = 'DASHER_USER',
|
||||
INELIGIBLE_ACCOUNT = 'INELIGIBLE_ACCOUNT',
|
||||
NON_USER_ACCOUNT = 'NON_USER_ACCOUNT',
|
||||
RESTRICTED_AGE = 'RESTRICTED_AGE',
|
||||
RESTRICTED_NETWORK = 'RESTRICTED_NETWORK',
|
||||
UNKNOWN = 'UNKNOWN',
|
||||
UNKNOWN_LOCATION = 'UNKNOWN_LOCATION',
|
||||
UNSUPPORTED_LOCATION = 'UNSUPPORTED_LOCATION',
|
||||
// go/keep-sorted end
|
||||
}
|
||||
/**
|
||||
* UserTierId represents IDs returned from the Cloud Code Private API representing a user's tier
|
||||
*
|
||||
* //depot/google3/cloud/developer_experience/cloudcode/pa/service/usertier.go;l=16
|
||||
*/
|
||||
export enum UserTierId {
|
||||
FREE = 'free-tier',
|
||||
LEGACY = 'legacy-tier',
|
||||
STANDARD = 'standard-tier',
|
||||
}
|
||||
|
||||
/**
|
||||
* PrivacyNotice reflects the structure received from the CodeAssist in regards to a tier
|
||||
* privacy notice.
|
||||
*/
|
||||
export interface PrivacyNotice {
|
||||
showNotice: boolean;
|
||||
noticeText?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Proto signature of OnboardUserRequest as payload to OnboardUser call
|
||||
*/
|
||||
export interface OnboardUserRequest {
|
||||
tierId: string | undefined;
|
||||
cloudaicompanionProject: string | undefined;
|
||||
metadata: ClientMetadata | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents LongRunningOperation proto
|
||||
* http://google3/google/longrunning/operations.proto;rcl=698857719;l=107
|
||||
*/
|
||||
export interface LongRunningOperationResponse {
|
||||
name: string;
|
||||
done?: boolean;
|
||||
response?: OnboardUserResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents OnboardUserResponse proto
|
||||
* http://google3/google/internal/cloud/code/v1internal/cloudcode.proto;l=215
|
||||
*/
|
||||
export interface OnboardUserResponse {
|
||||
// tslint:disable-next-line:enforce-name-casing This is the name of the field in the proto.
|
||||
cloudaicompanionProject?: {
|
||||
id: string;
|
||||
name: string;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Status code of user license status
|
||||
* it does not strictly correspond to the proto
|
||||
* Error value is an additional value assigned to error responses from OnboardUser
|
||||
*/
|
||||
export enum OnboardUserStatusCode {
|
||||
Default = 'DEFAULT',
|
||||
Notice = 'NOTICE',
|
||||
Warning = 'WARNING',
|
||||
Error = 'ERROR',
|
||||
}
|
||||
|
||||
/**
|
||||
* Status of user onboarded to gemini
|
||||
*/
|
||||
export interface OnboardUserStatus {
|
||||
statusCode: OnboardUserStatusCode;
|
||||
displayMessage: string;
|
||||
helpLink: HelpLinkUrl | undefined;
|
||||
}
|
||||
|
||||
export interface HelpLinkUrl {
|
||||
description: string;
|
||||
url: string;
|
||||
}
|
||||
|
||||
export interface SetCodeAssistGlobalUserSettingRequest {
|
||||
cloudaicompanionProject?: string;
|
||||
freeTierDataCollectionOptin: boolean;
|
||||
}
|
||||
|
||||
export interface CodeAssistGlobalUserSettingResponse {
|
||||
cloudaicompanionProject?: string;
|
||||
freeTierDataCollectionOptin: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Relevant fields that can be returned from a Google RPC response
|
||||
*/
|
||||
export interface GoogleRpcResponse {
|
||||
error?: {
|
||||
details?: GoogleRpcErrorInfo[];
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Relevant fields that can be returned in the details of an error returned from GoogleRPCs
|
||||
*/
|
||||
interface GoogleRpcErrorInfo {
|
||||
reason?: string;
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
QwenLogger,
|
||||
} from '../telemetry/index.js';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
@@ -272,7 +273,7 @@ describe('Server Config (config.ts)', () => {
|
||||
authType,
|
||||
{
|
||||
model: MODEL,
|
||||
baseUrl: undefined,
|
||||
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
|
||||
},
|
||||
);
|
||||
// Verify that contentGeneratorConfig is updated
|
||||
@@ -282,6 +283,23 @@ describe('Server Config (config.ts)', () => {
|
||||
expect(config.isInFallbackMode()).toBe(false);
|
||||
});
|
||||
|
||||
it('should strip thoughts when switching from GenAI to Vertex', async () => {
|
||||
const config = new Config(baseParams);
|
||||
|
||||
vi.mocked(createContentGeneratorConfig).mockImplementation(
|
||||
(_: Config, authType: AuthType | undefined) =>
|
||||
({ authType }) as unknown as ContentGeneratorConfig,
|
||||
);
|
||||
|
||||
await config.refreshAuth(AuthType.USE_GEMINI);
|
||||
|
||||
await config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE);
|
||||
|
||||
expect(
|
||||
config.getGeminiClient().stripThoughtsFromHistory,
|
||||
).toHaveBeenCalledWith();
|
||||
});
|
||||
|
||||
it('should not strip thoughts when switching from Vertex to GenAI', async () => {
|
||||
const config = new Config(baseParams);
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import { ProxyAgent, setGlobalDispatcher } from 'undici';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
AuthType,
|
||||
} from '../core/contentGenerator.js';
|
||||
import type { FallbackModelHandler } from '../fallback/types.js';
|
||||
import type { MCPOAuthConfig } from '../mcp/oauth-provider.js';
|
||||
@@ -27,6 +26,7 @@ import type { AnyToolInvocation } from '../tools/tools.js';
|
||||
import { BaseLlmClient } from '../core/baseLlmClient.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGenerator,
|
||||
createContentGeneratorConfig,
|
||||
} from '../core/contentGenerator.js';
|
||||
@@ -54,7 +54,6 @@ import { canUseRipgrep } from '../utils/ripgrepUtils.js';
|
||||
import { RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { ShellTool } from '../tools/shell.js';
|
||||
import { SmartEditTool } from '../tools/smart-edit.js';
|
||||
import { SkillTool } from '../tools/skill.js';
|
||||
import { TaskTool } from '../tools/task.js';
|
||||
import { TodoWriteTool } from '../tools/todoWrite.js';
|
||||
import { ToolRegistry } from '../tools/tool-registry.js';
|
||||
@@ -66,7 +65,6 @@ import { WriteFileTool } from '../tools/write-file.js';
|
||||
import { ideContextStore } from '../ide/ideContext.js';
|
||||
import { InputFormat, OutputFormat } from '../output/types.js';
|
||||
import { PromptRegistry } from '../prompts/prompt-registry.js';
|
||||
import { SkillManager } from '../skills/skill-manager.js';
|
||||
import { SubagentManager } from '../subagents/subagent-manager.js';
|
||||
import type { SubagentConfig } from '../subagents/types.js';
|
||||
import {
|
||||
@@ -96,6 +94,7 @@ import {
|
||||
} from './constants.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
|
||||
import { Storage } from './storage.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import { ChatRecordingService } from '../services/chatRecordingService.js';
|
||||
import {
|
||||
SessionService,
|
||||
@@ -306,7 +305,6 @@ export interface ConfigParameters {
|
||||
extensionContextFilePaths?: string[];
|
||||
maxSessionTurns?: number;
|
||||
sessionTokenLimit?: number;
|
||||
experimentalSkills?: boolean;
|
||||
experimentalZedIntegration?: boolean;
|
||||
listExtensions?: boolean;
|
||||
extensions?: GeminiCLIExtension[];
|
||||
@@ -391,7 +389,6 @@ export class Config {
|
||||
private toolRegistry!: ToolRegistry;
|
||||
private promptRegistry!: PromptRegistry;
|
||||
private subagentManager!: SubagentManager;
|
||||
private skillManager!: SkillManager;
|
||||
private fileSystemService: FileSystemService;
|
||||
private contentGeneratorConfig!: ContentGeneratorConfig;
|
||||
private contentGenerator!: ContentGenerator;
|
||||
@@ -461,7 +458,6 @@ export class Config {
|
||||
| undefined;
|
||||
private readonly cliVersion?: string;
|
||||
private readonly experimentalZedIntegration: boolean = false;
|
||||
private readonly experimentalSkills: boolean = false;
|
||||
private readonly chatRecordingEnabled: boolean;
|
||||
private readonly loadMemoryFromIncludeDirectories: boolean = false;
|
||||
private readonly webSearch?: {
|
||||
@@ -561,7 +557,6 @@ export class Config {
|
||||
this.sessionTokenLimit = params.sessionTokenLimit ?? -1;
|
||||
this.experimentalZedIntegration =
|
||||
params.experimentalZedIntegration ?? false;
|
||||
this.experimentalSkills = params.experimentalSkills ?? false;
|
||||
this.listExtensions = params.listExtensions ?? false;
|
||||
this._extensions = params.extensions ?? [];
|
||||
this._blockedMcpServers = params.blockedMcpServers ?? [];
|
||||
@@ -573,7 +568,7 @@ export class Config {
|
||||
this._generationConfig = {
|
||||
model: params.model,
|
||||
...(params.generationConfig || {}),
|
||||
baseUrl: params.generationConfig?.baseUrl,
|
||||
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
|
||||
};
|
||||
this.contentGeneratorConfig = this
|
||||
._generationConfig as ContentGeneratorConfig;
|
||||
@@ -649,7 +644,6 @@ export class Config {
|
||||
}
|
||||
this.promptRegistry = new PromptRegistry();
|
||||
this.subagentManager = new SubagentManager(this);
|
||||
this.skillManager = new SkillManager(this);
|
||||
|
||||
// Load session subagents if they were provided before initialization
|
||||
if (this.sessionSubagents.length > 0) {
|
||||
@@ -690,6 +684,16 @@ export class Config {
|
||||
}
|
||||
|
||||
async refreshAuth(authMethod: AuthType, isInitialAuth?: boolean) {
|
||||
// Vertex and Genai have incompatible encryption and sending history with
|
||||
// throughtSignature from Genai to Vertex will fail, we need to strip them
|
||||
if (
|
||||
this.contentGeneratorConfig?.authType === AuthType.USE_GEMINI &&
|
||||
authMethod === AuthType.LOGIN_WITH_GOOGLE
|
||||
) {
|
||||
// Restore the conversation history to the new client
|
||||
this.geminiClient.stripThoughtsFromHistory();
|
||||
}
|
||||
|
||||
const newContentGeneratorConfig = createContentGeneratorConfig(
|
||||
this,
|
||||
authMethod,
|
||||
@@ -1072,10 +1076,6 @@ export class Config {
|
||||
return this.experimentalZedIntegration;
|
||||
}
|
||||
|
||||
getExperimentalSkills(): boolean {
|
||||
return this.experimentalSkills;
|
||||
}
|
||||
|
||||
getListExtensions(): boolean {
|
||||
return this.listExtensions;
|
||||
}
|
||||
@@ -1306,10 +1306,6 @@ export class Config {
|
||||
return this.subagentManager;
|
||||
}
|
||||
|
||||
getSkillManager(): SkillManager {
|
||||
return this.skillManager;
|
||||
}
|
||||
|
||||
async createToolRegistry(
|
||||
sendSdkMcpMessage?: SendSdkMcpMessage,
|
||||
): Promise<ToolRegistry> {
|
||||
@@ -1352,9 +1348,6 @@ export class Config {
|
||||
};
|
||||
|
||||
registerCoreTool(TaskTool, this);
|
||||
if (this.getExperimentalSkills()) {
|
||||
registerCoreTool(SkillTool, this);
|
||||
}
|
||||
registerCoreTool(LSTool, this);
|
||||
registerCoreTool(ReadFileTool, this);
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
config as unknown as { contentGeneratorConfig: unknown }
|
||||
).contentGeneratorConfig = {
|
||||
model: DEFAULT_GEMINI_MODEL,
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
};
|
||||
});
|
||||
|
||||
|
||||
@@ -126,10 +126,6 @@ export class Storage {
|
||||
return path.join(this.getExtensionsDir(), 'qwen-extension.json');
|
||||
}
|
||||
|
||||
getUserSkillsDir(): string {
|
||||
return path.join(Storage.getGlobalQwenDir(), 'skills');
|
||||
}
|
||||
|
||||
getHistoryFilePath(): string {
|
||||
return path.join(this.getProjectTempDir(), 'shell_history');
|
||||
}
|
||||
|
||||
@@ -73,7 +73,6 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
}),
|
||||
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
// Create generator instance
|
||||
@@ -300,7 +299,6 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
}),
|
||||
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
new OpenAIContentGenerator(
|
||||
@@ -335,7 +333,6 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
}),
|
||||
buildClient: vi.fn().mockReturnValue(mockOpenAIClient),
|
||||
buildRequest: vi.fn().mockImplementation((req) => req),
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
new OpenAIContentGenerator(
|
||||
|
||||
@@ -1,500 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
GenerateContentParameters,
|
||||
} from '@google/genai';
|
||||
import { FinishReason, GenerateContentResponse } from '@google/genai';
|
||||
|
||||
// Mock the request tokenizer module BEFORE importing the class that uses it.
|
||||
const mockTokenizer = {
|
||||
calculateTokens: vi.fn(),
|
||||
dispose: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock('../../utils/request-tokenizer/index.js', () => ({
|
||||
getDefaultTokenizer: vi.fn(() => mockTokenizer),
|
||||
DefaultRequestTokenizer: vi.fn(() => mockTokenizer),
|
||||
disposeDefaultTokenizer: vi.fn(),
|
||||
}));
|
||||
|
||||
type AnthropicCreateArgs = [unknown, { signal?: AbortSignal }?];
|
||||
|
||||
const anthropicMockState: {
|
||||
constructorOptions?: Record<string, unknown>;
|
||||
lastCreateArgs?: AnthropicCreateArgs;
|
||||
createImpl: ReturnType<typeof vi.fn>;
|
||||
} = {
|
||||
constructorOptions: undefined,
|
||||
lastCreateArgs: undefined,
|
||||
createImpl: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock('@anthropic-ai/sdk', () => {
|
||||
class AnthropicMock {
|
||||
messages: { create: (...args: AnthropicCreateArgs) => unknown };
|
||||
|
||||
constructor(options: Record<string, unknown>) {
|
||||
anthropicMockState.constructorOptions = options;
|
||||
this.messages = {
|
||||
create: (...args: AnthropicCreateArgs) => {
|
||||
anthropicMockState.lastCreateArgs = args;
|
||||
return anthropicMockState.createImpl(...args);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
default: AnthropicMock,
|
||||
__anthropicState: anthropicMockState,
|
||||
};
|
||||
});
|
||||
|
||||
// Now import the modules that depend on the mocked modules.
|
||||
import type { Config } from '../../config/config.js';
|
||||
|
||||
const importGenerator = async (): Promise<{
|
||||
AnthropicContentGenerator: typeof import('./anthropicContentGenerator.js').AnthropicContentGenerator;
|
||||
}> => import('./anthropicContentGenerator.js');
|
||||
|
||||
const importConverter = async (): Promise<{
|
||||
AnthropicContentConverter: typeof import('./converter.js').AnthropicContentConverter;
|
||||
}> => import('./converter.js');
|
||||
|
||||
describe('AnthropicContentGenerator', () => {
|
||||
let mockConfig: Config;
|
||||
let anthropicState: {
|
||||
constructorOptions?: Record<string, unknown>;
|
||||
lastCreateArgs?: AnthropicCreateArgs;
|
||||
createImpl: ReturnType<typeof vi.fn>;
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
vi.resetModules();
|
||||
|
||||
mockTokenizer.calculateTokens.mockResolvedValue({
|
||||
totalTokens: 50,
|
||||
breakdown: {
|
||||
textTokens: 50,
|
||||
imageTokens: 0,
|
||||
audioTokens: 0,
|
||||
otherTokens: 0,
|
||||
},
|
||||
processingTime: 1,
|
||||
});
|
||||
anthropicState = anthropicMockState;
|
||||
|
||||
anthropicState.createImpl.mockReset();
|
||||
anthropicState.lastCreateArgs = undefined;
|
||||
anthropicState.constructorOptions = undefined;
|
||||
|
||||
mockConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.2.3'),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('passes a QwenCode User-Agent header to the Anthropic SDK', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['User-Agent']).toContain('QwenCode/1.2.3');
|
||||
expect(headers['User-Agent']).toContain(
|
||||
`(${process.platform}; ${process.arch})`,
|
||||
);
|
||||
});
|
||||
|
||||
it('adds the effort beta header when reasoning.effort is set', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'medium' },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).toContain('effort-2025-11-24');
|
||||
});
|
||||
|
||||
it('does not add the effort beta header when reasoning.effort is not set', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).not.toContain('effort-2025-11-24');
|
||||
});
|
||||
|
||||
it('omits the anthropic beta header when reasoning is disabled', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: false,
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).toBeUndefined();
|
||||
});
|
||||
|
||||
describe('generateContent', () => {
|
||||
it('builds request with config sampling params (config overrides request) and thinking budget', async () => {
|
||||
const { AnthropicContentConverter } = await importConverter();
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
|
||||
const convertResponseSpy = vi
|
||||
.spyOn(
|
||||
AnthropicContentConverter.prototype,
|
||||
'convertAnthropicResponseToGemini',
|
||||
)
|
||||
.mockReturnValue(
|
||||
(() => {
|
||||
const r = new GenerateContentResponse();
|
||||
r.responseId = 'gemini-1';
|
||||
return r;
|
||||
})(),
|
||||
);
|
||||
|
||||
anthropicState.createImpl.mockResolvedValue({
|
||||
id: 'anthropic-1',
|
||||
model: 'claude-test',
|
||||
content: [{ type: 'text', text: 'hi' }],
|
||||
});
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {
|
||||
temperature: 0.7,
|
||||
max_tokens: 1000,
|
||||
top_p: 0.9,
|
||||
top_k: 20,
|
||||
},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'high', budget_tokens: 1000 },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const abortController = new AbortController();
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
temperature: 0.1,
|
||||
maxOutputTokens: 200,
|
||||
topP: 0.5,
|
||||
topK: 5,
|
||||
abortSignal: abortController.signal,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await generator.generateContent(request);
|
||||
expect(result.responseId).toBe('gemini-1');
|
||||
|
||||
expect(anthropicState.lastCreateArgs).toBeDefined();
|
||||
const [anthropicRequest, options] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
|
||||
expect(options?.signal).toBe(abortController.signal);
|
||||
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.objectContaining({
|
||||
model: 'claude-test',
|
||||
max_tokens: 1000,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
top_k: 20,
|
||||
thinking: { type: 'enabled', budget_tokens: 1000 },
|
||||
output_config: { effort: 'high' },
|
||||
}),
|
||||
);
|
||||
|
||||
expect(convertResponseSpy).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('omits thinking when request.config.thinkingConfig.includeThoughts is false', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
anthropicState.createImpl.mockResolvedValue({
|
||||
id: 'anthropic-1',
|
||||
model: 'claude-test',
|
||||
content: [{ type: 'text', text: 'hi' }],
|
||||
});
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: { max_tokens: 500 },
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'high' },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
await generator.generateContent({
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
config: { thinkingConfig: { includeThoughts: false } },
|
||||
} as unknown as GenerateContentParameters);
|
||||
|
||||
const [anthropicRequest] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.not.objectContaining({ thinking: expect.anything() }),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('countTokens', () => {
|
||||
it('counts tokens using the request tokenizer', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: CountTokensParameters = {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello world' }] }],
|
||||
model: 'claude-test',
|
||||
};
|
||||
|
||||
const result = await generator.countTokens(request);
|
||||
expect(mockTokenizer.calculateTokens).toHaveBeenCalledWith(request, {
|
||||
textEncoding: 'cl100k_base',
|
||||
});
|
||||
expect(result.totalTokens).toBe(50);
|
||||
});
|
||||
|
||||
it('falls back to character approximation when tokenizer throws', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
mockTokenizer.calculateTokens.mockRejectedValueOnce(new Error('boom'));
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: CountTokensParameters = {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
model: 'claude-test',
|
||||
};
|
||||
|
||||
const content = JSON.stringify(request.contents);
|
||||
const expected = Math.ceil(content.length / 4);
|
||||
const result = await generator.countTokens(request);
|
||||
expect(result.totalTokens).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateContentStream', () => {
|
||||
it('requests stream=true and converts streamed events into Gemini chunks', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
anthropicState.createImpl.mockResolvedValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: 'message_start',
|
||||
message: {
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
usage: { cache_read_input_tokens: 2, input_tokens: 3 },
|
||||
},
|
||||
};
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 0,
|
||||
content_block: { type: 'text' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 0,
|
||||
delta: { type: 'text_delta', text: 'Hello' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 0 };
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 1,
|
||||
content_block: { type: 'thinking', signature: '' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 1,
|
||||
delta: { type: 'thinking_delta', thinking: 'Think' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 1,
|
||||
delta: { type: 'signature_delta', signature: 'abc' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 1 };
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 2,
|
||||
content_block: {
|
||||
type: 'tool_use',
|
||||
id: 't1',
|
||||
name: 'tool',
|
||||
input: {},
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 2,
|
||||
delta: { type: 'input_json_delta', partial_json: '{"x":' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 2,
|
||||
delta: { type: 'input_json_delta', partial_json: '1}' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 2 };
|
||||
|
||||
yield {
|
||||
type: 'message_delta',
|
||||
delta: { stop_reason: 'end_turn' },
|
||||
usage: {
|
||||
output_tokens: 5,
|
||||
input_tokens: 7,
|
||||
cache_read_input_tokens: 2,
|
||||
},
|
||||
};
|
||||
yield { type: 'message_stop' };
|
||||
})(),
|
||||
);
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: { max_tokens: 123 },
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const stream = await generator.generateContentStream({
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters);
|
||||
|
||||
const chunks: GenerateContentResponse[] = [];
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
const [anthropicRequest] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.objectContaining({ stream: true }),
|
||||
);
|
||||
|
||||
// Text chunk.
|
||||
expect(chunks[0]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
text: 'Hello',
|
||||
});
|
||||
|
||||
// Thinking chunk.
|
||||
expect(chunks[1]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
text: 'Think',
|
||||
thought: true,
|
||||
});
|
||||
|
||||
// Signature chunk.
|
||||
expect(chunks[2]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
thought: true,
|
||||
thoughtSignature: 'abc',
|
||||
});
|
||||
|
||||
// Tool call chunk.
|
||||
expect(chunks[3]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
functionCall: { id: 't1', name: 'tool', args: { x: 1 } },
|
||||
});
|
||||
|
||||
// Usage/finish chunks exist; check the last one.
|
||||
const last = chunks[chunks.length - 1]!;
|
||||
expect(last.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
|
||||
expect(last.usageMetadata).toEqual({
|
||||
cachedContentTokenCount: 2,
|
||||
promptTokenCount: 9, // cached(2) + input(7)
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 14,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,502 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import Anthropic from '@anthropic-ai/sdk';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
Part,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
type Message = Anthropic.Message;
|
||||
type MessageCreateParamsNonStreaming =
|
||||
Anthropic.MessageCreateParamsNonStreaming;
|
||||
type MessageCreateParamsStreaming = Anthropic.MessageCreateParamsStreaming;
|
||||
type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
|
||||
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import { AnthropicContentConverter } from './converter.js';
|
||||
|
||||
type StreamingBlockState = {
|
||||
type: string;
|
||||
id?: string;
|
||||
name?: string;
|
||||
inputJson: string;
|
||||
signature: string;
|
||||
};
|
||||
|
||||
type MessageCreateParamsWithThinking = MessageCreateParamsNonStreaming & {
|
||||
thinking?: { type: 'enabled'; budget_tokens: number };
|
||||
// Anthropic beta feature: output_config.effort (requires beta header effort-2025-11-24)
|
||||
// This is not yet represented in the official SDK types we depend on.
|
||||
output_config?: { effort: 'low' | 'medium' | 'high' };
|
||||
};
|
||||
|
||||
export class AnthropicContentGenerator implements ContentGenerator {
|
||||
private client: Anthropic;
|
||||
private converter: AnthropicContentConverter;
|
||||
|
||||
constructor(
|
||||
private contentGeneratorConfig: ContentGeneratorConfig,
|
||||
private readonly cliConfig: Config,
|
||||
) {
|
||||
const defaultHeaders = this.buildHeaders();
|
||||
const baseURL = contentGeneratorConfig.baseUrl;
|
||||
|
||||
this.client = new Anthropic({
|
||||
apiKey: contentGeneratorConfig.apiKey,
|
||||
baseURL,
|
||||
timeout: contentGeneratorConfig.timeout,
|
||||
maxRetries: contentGeneratorConfig.maxRetries,
|
||||
defaultHeaders,
|
||||
});
|
||||
|
||||
this.converter = new AnthropicContentConverter(
|
||||
contentGeneratorConfig.model,
|
||||
contentGeneratorConfig.schemaCompliance,
|
||||
);
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const anthropicRequest = await this.buildRequest(request);
|
||||
const response = (await this.client.messages.create(anthropicRequest, {
|
||||
signal: request.config?.abortSignal,
|
||||
})) as Message;
|
||||
|
||||
return this.converter.convertAnthropicResponseToGemini(response);
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const anthropicRequest = await this.buildRequest(request);
|
||||
const streamingRequest: MessageCreateParamsStreaming & {
|
||||
thinking?: { type: 'enabled'; budget_tokens: number };
|
||||
} = {
|
||||
...anthropicRequest,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
const stream = (await this.client.messages.create(
|
||||
streamingRequest as MessageCreateParamsStreaming,
|
||||
{
|
||||
signal: request.config?.abortSignal,
|
||||
},
|
||||
)) as AsyncIterable<RawMessageStreamEvent>;
|
||||
|
||||
return this.processStream(stream);
|
||||
}
|
||||
|
||||
async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
try {
|
||||
const tokenizer = getDefaultTokenizer();
|
||||
const result = await tokenizer.calculateTokens(request, {
|
||||
textEncoding: 'cl100k_base',
|
||||
});
|
||||
|
||||
return {
|
||||
totalTokens: result.totalTokens,
|
||||
};
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
'Failed to calculate tokens with tokenizer, ' +
|
||||
'falling back to simple method:',
|
||||
error,
|
||||
);
|
||||
|
||||
const content = JSON.stringify(request.contents);
|
||||
const totalTokens = Math.ceil(content.length / 4);
|
||||
return {
|
||||
totalTokens,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
_request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
throw new Error('Anthropic does not support embeddings.');
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
private buildHeaders(): Record<string, string> {
|
||||
const version = this.cliConfig.getCliVersion() || 'unknown';
|
||||
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
|
||||
const betas: string[] = [];
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
// Interleaved thinking is used when we send the `thinking` field.
|
||||
if (reasoning !== false) {
|
||||
betas.push('interleaved-thinking-2025-05-14');
|
||||
}
|
||||
|
||||
// Effort (beta) is enabled when reasoning.effort is set.
|
||||
if (reasoning !== false && reasoning?.effort !== undefined) {
|
||||
betas.push('effort-2025-11-24');
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
if (betas.length) {
|
||||
headers['anthropic-beta'] = betas.join(',');
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
private async buildRequest(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<MessageCreateParamsWithThinking> {
|
||||
const { system, messages } =
|
||||
this.converter.convertGeminiRequestToAnthropic(request);
|
||||
|
||||
const tools = request.config?.tools
|
||||
? await this.converter.convertGeminiToolsToAnthropic(request.config.tools)
|
||||
: undefined;
|
||||
|
||||
const sampling = this.buildSamplingParameters(request);
|
||||
const thinking = this.buildThinkingConfig(request);
|
||||
const outputConfig = this.buildOutputConfig();
|
||||
|
||||
return {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
system,
|
||||
messages,
|
||||
tools,
|
||||
...sampling,
|
||||
...(thinking ? { thinking } : {}),
|
||||
...(outputConfig ? { output_config: outputConfig } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
private buildSamplingParameters(request: GenerateContentParameters): {
|
||||
max_tokens: number;
|
||||
temperature?: number;
|
||||
top_p?: number;
|
||||
top_k?: number;
|
||||
} {
|
||||
const configSamplingParams = this.contentGeneratorConfig.samplingParams;
|
||||
const requestConfig = request.config || {};
|
||||
|
||||
const getParam = <T>(
|
||||
configKey: keyof NonNullable<typeof configSamplingParams>,
|
||||
requestKey?: keyof NonNullable<typeof requestConfig>,
|
||||
): T | undefined => {
|
||||
const configValue = configSamplingParams?.[configKey] as T | undefined;
|
||||
const requestValue = requestKey
|
||||
? (requestConfig[requestKey] as T | undefined)
|
||||
: undefined;
|
||||
return configValue !== undefined ? configValue : requestValue;
|
||||
};
|
||||
|
||||
const maxTokens =
|
||||
getParam<number>('max_tokens', 'maxOutputTokens') ?? 10_000;
|
||||
|
||||
return {
|
||||
max_tokens: maxTokens,
|
||||
temperature: getParam<number>('temperature', 'temperature') ?? 1,
|
||||
top_p: getParam<number>('top_p', 'topP'),
|
||||
top_k: getParam<number>('top_k', 'topK'),
|
||||
};
|
||||
}
|
||||
|
||||
private buildThinkingConfig(
|
||||
request: GenerateContentParameters,
|
||||
): { type: 'enabled'; budget_tokens: number } | undefined {
|
||||
if (request.config?.thinkingConfig?.includeThoughts === false) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (reasoning?.budget_tokens !== undefined) {
|
||||
return {
|
||||
type: 'enabled',
|
||||
budget_tokens: reasoning.budget_tokens,
|
||||
};
|
||||
}
|
||||
|
||||
const effort = reasoning?.effort;
|
||||
// When using interleaved thinking with tools, this budget token limit is the entire context window(200k tokens).
|
||||
const budgetTokens =
|
||||
effort === 'low' ? 16_000 : effort === 'high' ? 64_000 : 32_000;
|
||||
|
||||
return {
|
||||
type: 'enabled',
|
||||
budget_tokens: budgetTokens,
|
||||
};
|
||||
}
|
||||
|
||||
private buildOutputConfig():
|
||||
| { effort: 'low' | 'medium' | 'high' }
|
||||
| undefined {
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
if (reasoning === false || reasoning === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (reasoning.effort === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return { effort: reasoning.effort };
|
||||
}
|
||||
|
||||
private async *processStream(
|
||||
stream: AsyncIterable<RawMessageStreamEvent>,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
let messageId: string | undefined;
|
||||
let model = this.contentGeneratorConfig.model;
|
||||
let cachedTokens = 0;
|
||||
let promptTokens = 0;
|
||||
let completionTokens = 0;
|
||||
let finishReason: string | undefined;
|
||||
|
||||
const blocks = new Map<number, StreamingBlockState>();
|
||||
const collectedResponses: GenerateContentResponse[] = [];
|
||||
|
||||
for await (const event of stream) {
|
||||
switch (event.type) {
|
||||
case 'message_start': {
|
||||
messageId = event.message.id ?? messageId;
|
||||
model = event.message.model ?? model;
|
||||
cachedTokens =
|
||||
event.message.usage?.cache_read_input_tokens ?? cachedTokens;
|
||||
promptTokens = event.message.usage?.input_tokens ?? promptTokens;
|
||||
break;
|
||||
}
|
||||
case 'content_block_start': {
|
||||
const index = event.index ?? 0;
|
||||
const type = String(event.content_block.type || 'text');
|
||||
const initialInput =
|
||||
type === 'tool_use' && 'input' in event.content_block
|
||||
? JSON.stringify(event.content_block.input)
|
||||
: '';
|
||||
blocks.set(index, {
|
||||
type,
|
||||
id:
|
||||
'id' in event.content_block ? event.content_block.id : undefined,
|
||||
name:
|
||||
'name' in event.content_block
|
||||
? event.content_block.name
|
||||
: undefined,
|
||||
inputJson: initialInput !== '{}' ? initialInput : '',
|
||||
signature:
|
||||
type === 'thinking' &&
|
||||
'signature' in event.content_block &&
|
||||
typeof event.content_block.signature === 'string'
|
||||
? event.content_block.signature
|
||||
: '',
|
||||
});
|
||||
break;
|
||||
}
|
||||
case 'content_block_delta': {
|
||||
const index = event.index ?? 0;
|
||||
const deltaType = (event.delta as { type?: string }).type || '';
|
||||
const blockState = blocks.get(index);
|
||||
|
||||
if (deltaType === 'text_delta') {
|
||||
const text = 'text' in event.delta ? event.delta.text : '';
|
||||
if (text) {
|
||||
const chunk = this.buildGeminiChunk({ text }, messageId, model);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'thinking_delta') {
|
||||
const thinking =
|
||||
(event.delta as { thinking?: string }).thinking || '';
|
||||
if (thinking) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{ text: thinking, thought: true },
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'signature_delta' && blockState) {
|
||||
const signature =
|
||||
(event.delta as { signature?: string }).signature || '';
|
||||
if (signature) {
|
||||
blockState.signature += signature;
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{ thought: true, thoughtSignature: signature },
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'input_json_delta' && blockState) {
|
||||
const jsonDelta =
|
||||
(event.delta as { partial_json?: string }).partial_json || '';
|
||||
if (jsonDelta) {
|
||||
blockState.inputJson += jsonDelta;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'content_block_stop': {
|
||||
const index = event.index ?? 0;
|
||||
const blockState = blocks.get(index);
|
||||
if (blockState?.type === 'tool_use') {
|
||||
const args = safeJsonParse(blockState.inputJson || '{}', {});
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{
|
||||
functionCall: {
|
||||
id: blockState.id,
|
||||
name: blockState.name,
|
||||
args,
|
||||
},
|
||||
},
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
blocks.delete(index);
|
||||
break;
|
||||
}
|
||||
case 'message_delta': {
|
||||
const stopReasonValue = event.delta.stop_reason;
|
||||
if (stopReasonValue) {
|
||||
finishReason = stopReasonValue;
|
||||
}
|
||||
|
||||
// Some Anthropic-compatible providers may include additional usage fields
|
||||
// (e.g. `input_tokens`, `cache_read_input_tokens`) even though the official
|
||||
// Anthropic SDK types only expose `output_tokens` here.
|
||||
const usageUnknown = event.usage as unknown;
|
||||
const usageRecord =
|
||||
usageUnknown && typeof usageUnknown === 'object'
|
||||
? (usageUnknown as Record<string, unknown>)
|
||||
: undefined;
|
||||
|
||||
if (event.usage?.output_tokens !== undefined) {
|
||||
completionTokens = event.usage.output_tokens;
|
||||
}
|
||||
if (usageRecord?.['input_tokens'] !== undefined) {
|
||||
const inputTokens = usageRecord['input_tokens'];
|
||||
if (typeof inputTokens === 'number') {
|
||||
promptTokens = inputTokens;
|
||||
}
|
||||
}
|
||||
if (usageRecord?.['cache_read_input_tokens'] !== undefined) {
|
||||
const cacheRead = usageRecord['cache_read_input_tokens'];
|
||||
if (typeof cacheRead === 'number') {
|
||||
cachedTokens = cacheRead;
|
||||
}
|
||||
}
|
||||
|
||||
if (finishReason || event.usage) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
undefined,
|
||||
messageId,
|
||||
model,
|
||||
finishReason,
|
||||
{
|
||||
cachedContentTokenCount: cachedTokens,
|
||||
promptTokenCount: cachedTokens + promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: cachedTokens + promptTokens + completionTokens,
|
||||
},
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'message_stop': {
|
||||
if (promptTokens || completionTokens) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
undefined,
|
||||
messageId,
|
||||
model,
|
||||
finishReason,
|
||||
{
|
||||
cachedContentTokenCount: cachedTokens,
|
||||
promptTokenCount: cachedTokens + promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: cachedTokens + promptTokens + completionTokens,
|
||||
},
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private buildGeminiChunk(
|
||||
part?: {
|
||||
text?: string;
|
||||
thought?: boolean;
|
||||
thoughtSignature?: string;
|
||||
functionCall?: unknown;
|
||||
},
|
||||
responseId?: string,
|
||||
model?: string,
|
||||
finishReason?: string,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
): GenerateContentResponse {
|
||||
const response = new GenerateContentResponse();
|
||||
response.responseId = responseId;
|
||||
response.createTime = Date.now().toString();
|
||||
response.modelVersion = model || this.contentGeneratorConfig.model;
|
||||
response.promptFeedback = { safetyRatings: [] };
|
||||
|
||||
const candidateParts = part ? [part as unknown as Part] : [];
|
||||
const mappedFinishReason =
|
||||
finishReason !== undefined
|
||||
? this.converter.mapAnthropicFinishReasonToGemini(finishReason)
|
||||
: undefined;
|
||||
response.candidates = [
|
||||
{
|
||||
content: {
|
||||
parts: candidateParts,
|
||||
role: 'model' as const,
|
||||
},
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
...(mappedFinishReason ? { finishReason: mappedFinishReason } : {}),
|
||||
},
|
||||
];
|
||||
|
||||
if (usageMetadata) {
|
||||
response.usageMetadata = usageMetadata;
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
@@ -1,377 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type { CallableTool, Content, Tool } from '@google/genai';
|
||||
import { FinishReason } from '@google/genai';
|
||||
import type Anthropic from '@anthropic-ai/sdk';
|
||||
|
||||
// Mock schema conversion so we can force edge-cases (e.g. missing `type`).
|
||||
vi.mock('../../utils/schemaConverter.js', () => ({
|
||||
convertSchema: vi.fn((schema: unknown) => schema),
|
||||
}));
|
||||
|
||||
import { convertSchema } from '../../utils/schemaConverter.js';
|
||||
import { AnthropicContentConverter } from './converter.js';
|
||||
|
||||
describe('AnthropicContentConverter', () => {
|
||||
let converter: AnthropicContentConverter;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
converter = new AnthropicContentConverter('test-model', 'auto');
|
||||
});
|
||||
|
||||
describe('convertGeminiRequestToAnthropic', () => {
|
||||
it('extracts systemInstruction text from string', () => {
|
||||
const { system } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'hi',
|
||||
config: { systemInstruction: 'sys' },
|
||||
});
|
||||
|
||||
expect(system).toBe('sys');
|
||||
});
|
||||
|
||||
it('extracts systemInstruction text from parts and joins with newlines', () => {
|
||||
const { system } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'hi',
|
||||
config: {
|
||||
systemInstruction: {
|
||||
role: 'system',
|
||||
parts: [{ text: 'a' }, { text: 'b' }],
|
||||
} as unknown as Content,
|
||||
},
|
||||
});
|
||||
|
||||
expect(system).toBe('a\nb');
|
||||
});
|
||||
|
||||
it('converts a plain string content into a user message', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'Hello',
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts user content parts into a user message with text blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Hello' }, { text: 'World' }],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: 'World' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts assistant thought parts into Anthropic thinking blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'internal', thought: true, thoughtSignature: 'sig' },
|
||||
{ text: 'visible' },
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'thinking', thinking: 'internal', signature: 'sig' },
|
||||
{ type: 'text', text: 'visible' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts functionCall parts from model role into tool_use blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'preface' },
|
||||
{
|
||||
functionCall: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
args: { a: 1 },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'text', text: 'preface' },
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
input: { a: 1 },
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts functionResponse parts into user tool_result messages', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
response: { output: 'ok' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'call-1',
|
||||
content: 'ok',
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('extracts function response error field when present', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
response: { error: 'boom' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages[0]).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'call-1',
|
||||
content: 'boom',
|
||||
},
|
||||
],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertGeminiToolsToAnthropic', () => {
|
||||
it('converts Tool.functionDeclarations to Anthropic tools and runs schema conversion', async () => {
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'get_weather',
|
||||
description: 'Get weather',
|
||||
parametersJsonSchema: {
|
||||
type: 'object',
|
||||
properties: { location: { type: 'string' } },
|
||||
required: ['location'],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
name: 'get_weather',
|
||||
description: 'Get weather',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: { location: { type: 'string' } },
|
||||
required: ['location'],
|
||||
},
|
||||
});
|
||||
|
||||
expect(vi.mocked(convertSchema)).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('resolves CallableTool.tool() and converts its functionDeclarations', async () => {
|
||||
const callable = [
|
||||
{
|
||||
tool: async () =>
|
||||
({
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'dynamic_tool',
|
||||
description: 'resolved tool',
|
||||
parametersJsonSchema: { type: 'object', properties: {} },
|
||||
},
|
||||
],
|
||||
}) as unknown as Tool,
|
||||
},
|
||||
] as CallableTool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(callable);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].name).toBe('dynamic_tool');
|
||||
});
|
||||
|
||||
it('defaults missing parameters to an empty object schema', async () => {
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{ name: 'no_params', description: 'no params' },
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
name: 'no_params',
|
||||
description: 'no params',
|
||||
input_schema: { type: 'object', properties: {} },
|
||||
});
|
||||
});
|
||||
|
||||
it('forces input_schema.type to "object" when schema conversion yields no type', async () => {
|
||||
vi.mocked(convertSchema).mockImplementationOnce(() => ({
|
||||
properties: {},
|
||||
}));
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'edge',
|
||||
description: 'edge',
|
||||
parametersJsonSchema: { type: 'object', properties: {} },
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
expect(result[0]?.input_schema?.type).toBe('object');
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertAnthropicResponseToGemini', () => {
|
||||
it('converts text, tool_use, thinking, and redacted_thinking blocks', () => {
|
||||
const response = converter.convertAnthropicResponseToGemini({
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
stop_reason: 'end_turn',
|
||||
content: [
|
||||
{ type: 'thinking', thinking: 'thought', signature: 'sig' },
|
||||
{ type: 'text', text: 'hello' },
|
||||
{ type: 'tool_use', id: 't1', name: 'tool', input: { x: 1 } },
|
||||
{ type: 'redacted_thinking' },
|
||||
],
|
||||
usage: { input_tokens: 3, output_tokens: 5 },
|
||||
} as unknown as Anthropic.Message);
|
||||
|
||||
expect(response.responseId).toBe('msg-1');
|
||||
expect(response.modelVersion).toBe('claude-test');
|
||||
expect(response.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
|
||||
expect(response.usageMetadata).toEqual({
|
||||
promptTokenCount: 3,
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 8,
|
||||
});
|
||||
|
||||
const parts = response.candidates?.[0]?.content?.parts || [];
|
||||
expect(parts).toEqual([
|
||||
{ text: 'thought', thought: true, thoughtSignature: 'sig' },
|
||||
{ text: 'hello' },
|
||||
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
|
||||
{ text: '', thought: true },
|
||||
]);
|
||||
});
|
||||
|
||||
it('handles tool_use input that is a JSON string', () => {
|
||||
const response = converter.convertAnthropicResponseToGemini({
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
stop_reason: null,
|
||||
content: [
|
||||
{ type: 'tool_use', id: 't1', name: 'tool', input: '{"x":1}' },
|
||||
],
|
||||
} as unknown as Anthropic.Message);
|
||||
|
||||
const parts = response.candidates?.[0]?.content?.parts || [];
|
||||
expect(parts).toEqual([
|
||||
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mapAnthropicFinishReasonToGemini', () => {
|
||||
it('maps known reasons', () => {
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('end_turn')).toBe(
|
||||
FinishReason.STOP,
|
||||
);
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('max_tokens')).toBe(
|
||||
FinishReason.MAX_TOKENS,
|
||||
);
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('content_filter')).toBe(
|
||||
FinishReason.SAFETY,
|
||||
);
|
||||
});
|
||||
|
||||
it('returns undefined for null/empty', () => {
|
||||
expect(converter.mapAnthropicFinishReasonToGemini(null)).toBeUndefined();
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('')).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,448 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Candidate,
|
||||
CallableTool,
|
||||
Content,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
FunctionCall,
|
||||
FunctionResponse,
|
||||
GenerateContentParameters,
|
||||
Part,
|
||||
PartUnion,
|
||||
Tool,
|
||||
ToolListUnion,
|
||||
} from '@google/genai';
|
||||
import { FinishReason, GenerateContentResponse } from '@google/genai';
|
||||
import type Anthropic from '@anthropic-ai/sdk';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import {
|
||||
convertSchema,
|
||||
type SchemaComplianceMode,
|
||||
} from '../../utils/schemaConverter.js';
|
||||
|
||||
type AnthropicMessageParam = Anthropic.MessageParam;
|
||||
type AnthropicToolParam = Anthropic.Tool;
|
||||
type AnthropicContentBlockParam = Anthropic.ContentBlockParam;
|
||||
|
||||
type ThoughtPart = { text: string; signature?: string };
|
||||
|
||||
interface ParsedParts {
|
||||
thoughtParts: ThoughtPart[];
|
||||
contentParts: string[];
|
||||
functionCalls: FunctionCall[];
|
||||
functionResponses: FunctionResponse[];
|
||||
}
|
||||
|
||||
export class AnthropicContentConverter {
|
||||
private model: string;
|
||||
private schemaCompliance: SchemaComplianceMode;
|
||||
|
||||
constructor(model: string, schemaCompliance: SchemaComplianceMode = 'auto') {
|
||||
this.model = model;
|
||||
this.schemaCompliance = schemaCompliance;
|
||||
}
|
||||
|
||||
convertGeminiRequestToAnthropic(request: GenerateContentParameters): {
|
||||
system?: string;
|
||||
messages: AnthropicMessageParam[];
|
||||
} {
|
||||
const messages: AnthropicMessageParam[] = [];
|
||||
|
||||
const system = this.extractTextFromContentUnion(
|
||||
request.config?.systemInstruction,
|
||||
);
|
||||
|
||||
this.processContents(request.contents, messages);
|
||||
|
||||
return {
|
||||
system: system || undefined,
|
||||
messages,
|
||||
};
|
||||
}
|
||||
|
||||
async convertGeminiToolsToAnthropic(
|
||||
geminiTools: ToolListUnion,
|
||||
): Promise<AnthropicToolParam[]> {
|
||||
const tools: AnthropicToolParam[] = [];
|
||||
|
||||
for (const tool of geminiTools) {
|
||||
let actualTool: Tool;
|
||||
|
||||
if ('tool' in tool) {
|
||||
actualTool = await (tool as CallableTool).tool();
|
||||
} else {
|
||||
actualTool = tool as Tool;
|
||||
}
|
||||
|
||||
if (!actualTool.functionDeclarations) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const func of actualTool.functionDeclarations) {
|
||||
if (!func.name) continue;
|
||||
|
||||
let inputSchema: Record<string, unknown> | undefined;
|
||||
if (func.parametersJsonSchema) {
|
||||
inputSchema = {
|
||||
...(func.parametersJsonSchema as Record<string, unknown>),
|
||||
};
|
||||
} else if (func.parameters) {
|
||||
inputSchema = func.parameters as Record<string, unknown>;
|
||||
}
|
||||
|
||||
if (!inputSchema) {
|
||||
inputSchema = { type: 'object', properties: {} };
|
||||
}
|
||||
|
||||
inputSchema = convertSchema(inputSchema, this.schemaCompliance);
|
||||
if (typeof inputSchema['type'] !== 'string') {
|
||||
inputSchema['type'] = 'object';
|
||||
}
|
||||
|
||||
tools.push({
|
||||
name: func.name,
|
||||
description: func.description,
|
||||
input_schema: inputSchema as Anthropic.Tool.InputSchema,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return tools;
|
||||
}
|
||||
|
||||
convertAnthropicResponseToGemini(
|
||||
response: Anthropic.Message,
|
||||
): GenerateContentResponse {
|
||||
const geminiResponse = new GenerateContentResponse();
|
||||
const parts: Part[] = [];
|
||||
|
||||
for (const block of response.content || []) {
|
||||
const blockType = String((block as { type?: string })['type'] || '');
|
||||
if (blockType === 'text') {
|
||||
const text =
|
||||
typeof (block as { text?: string }).text === 'string'
|
||||
? (block as { text?: string }).text
|
||||
: '';
|
||||
if (text) {
|
||||
parts.push({ text });
|
||||
}
|
||||
} else if (blockType === 'tool_use') {
|
||||
const toolUse = block as {
|
||||
id?: string;
|
||||
name?: string;
|
||||
input?: unknown;
|
||||
};
|
||||
parts.push({
|
||||
functionCall: {
|
||||
id: typeof toolUse.id === 'string' ? toolUse.id : undefined,
|
||||
name: typeof toolUse.name === 'string' ? toolUse.name : undefined,
|
||||
args: this.safeInputToArgs(toolUse.input),
|
||||
},
|
||||
});
|
||||
} else if (blockType === 'thinking') {
|
||||
const thinking =
|
||||
typeof (block as { thinking?: string }).thinking === 'string'
|
||||
? (block as { thinking?: string }).thinking
|
||||
: '';
|
||||
const signature =
|
||||
typeof (block as { signature?: string }).signature === 'string'
|
||||
? (block as { signature?: string }).signature
|
||||
: '';
|
||||
if (thinking || signature) {
|
||||
const thoughtPart: Part = {
|
||||
text: thinking,
|
||||
thought: true,
|
||||
thoughtSignature: signature,
|
||||
};
|
||||
parts.push(thoughtPart);
|
||||
}
|
||||
} else if (blockType === 'redacted_thinking') {
|
||||
parts.push({ text: '', thought: true });
|
||||
}
|
||||
}
|
||||
|
||||
const candidate: Candidate = {
|
||||
content: {
|
||||
parts,
|
||||
role: 'model' as const,
|
||||
},
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
};
|
||||
|
||||
const finishReason = this.mapAnthropicFinishReasonToGemini(
|
||||
response.stop_reason,
|
||||
);
|
||||
if (finishReason) {
|
||||
candidate.finishReason = finishReason;
|
||||
}
|
||||
|
||||
geminiResponse.candidates = [candidate];
|
||||
geminiResponse.responseId = response.id;
|
||||
geminiResponse.createTime = Date.now().toString();
|
||||
geminiResponse.modelVersion = response.model || this.model;
|
||||
geminiResponse.promptFeedback = { safetyRatings: [] };
|
||||
|
||||
if (response.usage) {
|
||||
const promptTokens = response.usage.input_tokens || 0;
|
||||
const completionTokens = response.usage.output_tokens || 0;
|
||||
geminiResponse.usageMetadata = {
|
||||
promptTokenCount: promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: promptTokens + completionTokens,
|
||||
};
|
||||
}
|
||||
|
||||
return geminiResponse;
|
||||
}
|
||||
|
||||
private processContents(
|
||||
contents: ContentListUnion,
|
||||
messages: AnthropicMessageParam[],
|
||||
): void {
|
||||
if (Array.isArray(contents)) {
|
||||
for (const content of contents) {
|
||||
this.processContent(content, messages);
|
||||
}
|
||||
} else if (contents) {
|
||||
this.processContent(contents, messages);
|
||||
}
|
||||
}
|
||||
|
||||
private processContent(
|
||||
content: ContentUnion | PartUnion,
|
||||
messages: AnthropicMessageParam[],
|
||||
): void {
|
||||
if (typeof content === 'string') {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: content }],
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.isContentObject(content)) return;
|
||||
|
||||
const parsed = this.parseParts(content.parts || []);
|
||||
|
||||
if (parsed.functionResponses.length > 0) {
|
||||
for (const response of parsed.functionResponses) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: response.id || '',
|
||||
content: this.extractFunctionResponseContent(response.response),
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (content.role === 'model' && parsed.functionCalls.length > 0) {
|
||||
const thinkingBlocks: AnthropicContentBlockParam[] =
|
||||
parsed.thoughtParts.map((part) => {
|
||||
const thinkingBlock: unknown = {
|
||||
type: 'thinking',
|
||||
thinking: part.text,
|
||||
};
|
||||
if (part.signature) {
|
||||
(thinkingBlock as { signature?: string }).signature =
|
||||
part.signature;
|
||||
}
|
||||
return thinkingBlock as AnthropicContentBlockParam;
|
||||
});
|
||||
const toolUses: AnthropicContentBlockParam[] = parsed.functionCalls.map(
|
||||
(call, index) => ({
|
||||
type: 'tool_use',
|
||||
id: call.id || `tool_${index}`,
|
||||
name: call.name || '',
|
||||
input: (call.args as Record<string, unknown>) || {},
|
||||
}),
|
||||
);
|
||||
|
||||
const textBlocks: AnthropicContentBlockParam[] = parsed.contentParts.map(
|
||||
(text) => ({
|
||||
type: 'text' as const,
|
||||
text,
|
||||
}),
|
||||
);
|
||||
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: [...thinkingBlocks, ...textBlocks, ...toolUses],
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const role = content.role === 'model' ? 'assistant' : 'user';
|
||||
const thinkingBlocks: AnthropicContentBlockParam[] =
|
||||
role === 'assistant'
|
||||
? parsed.thoughtParts.map((part) => {
|
||||
const thinkingBlock: unknown = {
|
||||
type: 'thinking',
|
||||
thinking: part.text,
|
||||
};
|
||||
if (part.signature) {
|
||||
(thinkingBlock as { signature?: string }).signature =
|
||||
part.signature;
|
||||
}
|
||||
return thinkingBlock as AnthropicContentBlockParam;
|
||||
})
|
||||
: [];
|
||||
const textBlocks: AnthropicContentBlockParam[] = [
|
||||
...thinkingBlocks,
|
||||
...parsed.contentParts.map((text) => ({
|
||||
type: 'text' as const,
|
||||
text,
|
||||
})),
|
||||
];
|
||||
if (textBlocks.length > 0) {
|
||||
messages.push({ role, content: textBlocks });
|
||||
}
|
||||
}
|
||||
|
||||
private parseParts(parts: Part[]): ParsedParts {
|
||||
const thoughtParts: ThoughtPart[] = [];
|
||||
const contentParts: string[] = [];
|
||||
const functionCalls: FunctionCall[] = [];
|
||||
const functionResponses: FunctionResponse[] = [];
|
||||
|
||||
for (const part of parts) {
|
||||
if (typeof part === 'string') {
|
||||
contentParts.push(part);
|
||||
} else if (
|
||||
'text' in part &&
|
||||
part.text &&
|
||||
!('thought' in part && part.thought)
|
||||
) {
|
||||
contentParts.push(part.text);
|
||||
} else if ('text' in part && 'thought' in part && part.thought) {
|
||||
thoughtParts.push({
|
||||
text: part.text || '',
|
||||
signature:
|
||||
'thoughtSignature' in part &&
|
||||
typeof part.thoughtSignature === 'string'
|
||||
? part.thoughtSignature
|
||||
: undefined,
|
||||
});
|
||||
} else if ('functionCall' in part && part.functionCall) {
|
||||
functionCalls.push(part.functionCall);
|
||||
} else if ('functionResponse' in part && part.functionResponse) {
|
||||
functionResponses.push(part.functionResponse);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
thoughtParts,
|
||||
contentParts,
|
||||
functionCalls,
|
||||
functionResponses,
|
||||
};
|
||||
}
|
||||
|
||||
private extractTextFromContentUnion(contentUnion: unknown): string {
|
||||
if (typeof contentUnion === 'string') {
|
||||
return contentUnion;
|
||||
}
|
||||
|
||||
if (Array.isArray(contentUnion)) {
|
||||
return contentUnion
|
||||
.map((item) => this.extractTextFromContentUnion(item))
|
||||
.filter(Boolean)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
if (typeof contentUnion === 'object' && contentUnion !== null) {
|
||||
if ('parts' in contentUnion) {
|
||||
const content = contentUnion as Content;
|
||||
return (
|
||||
content.parts
|
||||
?.map((part: Part) => {
|
||||
if (typeof part === 'string') return part;
|
||||
if ('text' in part) return part.text || '';
|
||||
return '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join('\n') || ''
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
private extractFunctionResponseContent(response: unknown): string {
|
||||
if (response === null || response === undefined) {
|
||||
return '';
|
||||
}
|
||||
|
||||
if (typeof response === 'string') {
|
||||
return response;
|
||||
}
|
||||
|
||||
if (typeof response === 'object') {
|
||||
const responseObject = response as Record<string, unknown>;
|
||||
const output = responseObject['output'];
|
||||
if (typeof output === 'string') {
|
||||
return output;
|
||||
}
|
||||
|
||||
const error = responseObject['error'];
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const serialized = JSON.stringify(response);
|
||||
return serialized ?? String(response);
|
||||
} catch {
|
||||
return String(response);
|
||||
}
|
||||
}
|
||||
|
||||
private safeInputToArgs(input: unknown): Record<string, unknown> {
|
||||
if (input && typeof input === 'object') {
|
||||
return input as Record<string, unknown>;
|
||||
}
|
||||
if (typeof input === 'string') {
|
||||
return safeJsonParse(input, {});
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
mapAnthropicFinishReasonToGemini(
|
||||
reason?: string | null,
|
||||
): FinishReason | undefined {
|
||||
if (!reason) return undefined;
|
||||
const mapping: Record<string, FinishReason> = {
|
||||
end_turn: FinishReason.STOP,
|
||||
stop_sequence: FinishReason.STOP,
|
||||
tool_use: FinishReason.STOP,
|
||||
max_tokens: FinishReason.MAX_TOKENS,
|
||||
content_filter: FinishReason.SAFETY,
|
||||
};
|
||||
return mapping[reason] || FinishReason.FINISH_REASON_UNSPECIFIED;
|
||||
}
|
||||
|
||||
private isContentObject(
|
||||
content: unknown,
|
||||
): content is { role: string; parts: Part[] } {
|
||||
return (
|
||||
typeof content === 'object' &&
|
||||
content !== null &&
|
||||
'role' in content &&
|
||||
'parts' in content &&
|
||||
Array.isArray((content as Record<string, unknown>)['parts'])
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { AnthropicContentGenerator } from './anthropicContentGenerator.js';
|
||||
|
||||
export { AnthropicContentGenerator } from './anthropicContentGenerator.js';
|
||||
|
||||
export function createAnthropicContentGenerator(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
cliConfig: Config,
|
||||
): ContentGenerator {
|
||||
return new AnthropicContentGenerator(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
@@ -146,11 +146,12 @@ describe('BaseLlmClient', () => {
|
||||
// Validate the parameters passed to the underlying generator
|
||||
expect(mockGenerateContent).toHaveBeenCalledTimes(1);
|
||||
expect(mockGenerateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
{
|
||||
model: 'test-model',
|
||||
contents: defaultOptions.contents,
|
||||
config: expect.objectContaining({
|
||||
config: {
|
||||
abortSignal: defaultOptions.abortSignal,
|
||||
topP: 0.8,
|
||||
tools: [
|
||||
{
|
||||
functionDeclarations: [
|
||||
@@ -162,8 +163,9 @@ describe('BaseLlmClient', () => {
|
||||
],
|
||||
},
|
||||
],
|
||||
}),
|
||||
}),
|
||||
// Crucial: systemInstruction should NOT be in the config object if not provided
|
||||
},
|
||||
},
|
||||
'test-prompt-id',
|
||||
);
|
||||
});
|
||||
@@ -186,6 +188,7 @@ describe('BaseLlmClient', () => {
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
temperature: 0.8,
|
||||
topP: 0.8, // Default should remain if not overridden
|
||||
topK: 10,
|
||||
tools: expect.any(Array),
|
||||
}),
|
||||
|
||||
@@ -64,6 +64,11 @@ export interface GenerateJsonOptions {
|
||||
* A client dedicated to stateless, utility-focused LLM calls.
|
||||
*/
|
||||
export class BaseLlmClient {
|
||||
// Default configuration for utility tasks
|
||||
private readonly defaultUtilityConfig: GenerateContentConfig = {
|
||||
topP: 0.8,
|
||||
};
|
||||
|
||||
constructor(
|
||||
private readonly contentGenerator: ContentGenerator,
|
||||
private readonly config: Config,
|
||||
@@ -84,6 +89,7 @@ export class BaseLlmClient {
|
||||
|
||||
const requestConfig: GenerateContentConfig = {
|
||||
abortSignal,
|
||||
...this.defaultUtilityConfig,
|
||||
...options.config,
|
||||
...(systemInstruction && { systemInstruction }),
|
||||
};
|
||||
|
||||
@@ -15,7 +15,11 @@ import {
|
||||
} from 'vitest';
|
||||
|
||||
import type { Content, GenerateContentResponse, Part } from '@google/genai';
|
||||
import { GeminiClient } from './client.js';
|
||||
import {
|
||||
isThinkingDefault,
|
||||
isThinkingSupported,
|
||||
GeminiClient,
|
||||
} from './client.js';
|
||||
import { findCompressSplitPoint } from '../services/chatCompressionService.js';
|
||||
import {
|
||||
AuthType,
|
||||
@@ -243,6 +247,40 @@ describe('findCompressSplitPoint', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('isThinkingSupported', () => {
|
||||
it('should return true for gemini-2.5', () => {
|
||||
expect(isThinkingSupported('gemini-2.5')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for gemini-2.5-pro', () => {
|
||||
expect(isThinkingSupported('gemini-2.5-pro')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other models', () => {
|
||||
expect(isThinkingSupported('gemini-1.5-flash')).toBe(false);
|
||||
expect(isThinkingSupported('some-other-model')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isThinkingDefault', () => {
|
||||
it('should return false for gemini-2.5-flash-lite', () => {
|
||||
expect(isThinkingDefault('gemini-2.5-flash-lite')).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true for gemini-2.5', () => {
|
||||
expect(isThinkingDefault('gemini-2.5')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for gemini-2.5-pro', () => {
|
||||
expect(isThinkingDefault('gemini-2.5-pro')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other models', () => {
|
||||
expect(isThinkingDefault('gemini-1.5-flash')).toBe(false);
|
||||
expect(isThinkingDefault('some-other-model')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Gemini Client (client.ts)', () => {
|
||||
let mockContentGenerator: ContentGenerator;
|
||||
let mockConfig: Config;
|
||||
@@ -2266,15 +2304,16 @@ ${JSON.stringify(
|
||||
);
|
||||
|
||||
expect(mockContentGenerator.generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
{
|
||||
model: DEFAULT_GEMINI_FLASH_MODEL,
|
||||
config: expect.objectContaining({
|
||||
config: {
|
||||
abortSignal,
|
||||
systemInstruction: getCoreSystemPrompt(''),
|
||||
temperature: 0.5,
|
||||
}),
|
||||
topP: 0.8,
|
||||
},
|
||||
contents,
|
||||
}),
|
||||
},
|
||||
'test-session-id',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -15,7 +15,11 @@ import type {
|
||||
|
||||
// Config
|
||||
import { ApprovalMode, type Config } from '../config/config.js';
|
||||
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
||||
import {
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
DEFAULT_GEMINI_MODEL_AUTO,
|
||||
DEFAULT_THINKING_MODE,
|
||||
} from '../config/models.js';
|
||||
|
||||
// Core modules
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
@@ -74,10 +78,24 @@ import { type File, type IdeContext } from '../ide/types.js';
|
||||
// Fallback handling
|
||||
import { handleFallback } from '../fallback/handler.js';
|
||||
|
||||
export function isThinkingSupported(model: string) {
|
||||
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
|
||||
}
|
||||
|
||||
export function isThinkingDefault(model: string) {
|
||||
if (model.startsWith('gemini-2.5-flash-lite')) {
|
||||
return false;
|
||||
}
|
||||
return model.startsWith('gemini-2.5') || model === DEFAULT_GEMINI_MODEL_AUTO;
|
||||
}
|
||||
|
||||
const MAX_TURNS = 100;
|
||||
|
||||
export class GeminiClient {
|
||||
private chat?: GeminiChat;
|
||||
private readonly generateContentConfig: GenerateContentConfig = {
|
||||
topP: 0.8,
|
||||
};
|
||||
private sessionTurnCount = 0;
|
||||
|
||||
private readonly loopDetector: LoopDetectionService;
|
||||
@@ -189,10 +207,20 @@ export class GeminiClient {
|
||||
const model = this.config.getModel();
|
||||
const systemInstruction = getCoreSystemPrompt(userMemory, model);
|
||||
|
||||
const config: GenerateContentConfig = { ...this.generateContentConfig };
|
||||
|
||||
if (isThinkingSupported(model)) {
|
||||
config.thinkingConfig = {
|
||||
includeThoughts: true,
|
||||
thinkingBudget: DEFAULT_THINKING_MODE,
|
||||
};
|
||||
}
|
||||
|
||||
return new GeminiChat(
|
||||
this.config,
|
||||
{
|
||||
systemInstruction,
|
||||
...config,
|
||||
tools,
|
||||
},
|
||||
history,
|
||||
@@ -589,6 +617,11 @@ export class GeminiClient {
|
||||
): Promise<GenerateContentResponse> {
|
||||
let currentAttemptModel: string = model;
|
||||
|
||||
const configToUse: GenerateContentConfig = {
|
||||
...this.generateContentConfig,
|
||||
...generationConfig,
|
||||
};
|
||||
|
||||
try {
|
||||
const userMemory = this.config.getUserMemory();
|
||||
const finalSystemInstruction = generationConfig.systemInstruction
|
||||
@@ -597,7 +630,7 @@ export class GeminiClient {
|
||||
|
||||
const requestConfig: GenerateContentConfig = {
|
||||
abortSignal,
|
||||
...generationConfig,
|
||||
...configToUse,
|
||||
systemInstruction: finalSystemInstruction,
|
||||
};
|
||||
|
||||
@@ -638,7 +671,7 @@ export class GeminiClient {
|
||||
`Error generating content via API with model ${currentAttemptModel}.`,
|
||||
{
|
||||
requestContents: contents,
|
||||
requestConfig: generationConfig,
|
||||
requestConfig: configToUse,
|
||||
},
|
||||
'generateContent-api',
|
||||
);
|
||||
|
||||
@@ -5,19 +5,42 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
vi.mock('../code_assist/codeAssist.js');
|
||||
vi.mock('@google/genai');
|
||||
|
||||
const mockConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
describe('createContentGenerator', () => {
|
||||
it('should create a Gemini content generator', async () => {
|
||||
it('should create a CodeAssistContentGenerator', async () => {
|
||||
const mockGenerator = {} as unknown as ContentGenerator;
|
||||
vi.mocked(createCodeAssistContentGenerator).mockResolvedValue(
|
||||
mockGenerator as never,
|
||||
);
|
||||
const generator = await createContentGenerator(
|
||||
{
|
||||
model: 'test-model',
|
||||
authType: AuthType.LOGIN_WITH_GOOGLE,
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
expect(createCodeAssistContentGenerator).toHaveBeenCalled();
|
||||
expect(generator).toEqual(
|
||||
new LoggingContentGenerator(mockGenerator, mockConfig),
|
||||
);
|
||||
});
|
||||
|
||||
it('should create a GoogleGenAI content generator', async () => {
|
||||
const mockConfig = {
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getContentGeneratorConfig: () => ({}),
|
||||
getCliVersion: () => '1.0.0',
|
||||
} as unknown as Config;
|
||||
|
||||
const mockGenerator = {
|
||||
@@ -42,17 +65,17 @@ describe('createContentGenerator', () => {
|
||||
},
|
||||
},
|
||||
});
|
||||
// We expect it to be a LoggingContentGenerator wrapping a GeminiContentGenerator
|
||||
expect(generator).toBeInstanceOf(LoggingContentGenerator);
|
||||
const wrapped = (generator as LoggingContentGenerator).getWrapped();
|
||||
expect(wrapped).toBeDefined();
|
||||
expect(generator).toEqual(
|
||||
new LoggingContentGenerator(
|
||||
(mockGenerator as GoogleGenAI).models,
|
||||
mockConfig,
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
it('should create a Gemini content generator with client install id logging disabled', async () => {
|
||||
it('should create a GoogleGenAI content generator with client install id logging disabled', async () => {
|
||||
const mockConfig = {
|
||||
getUsageStatisticsEnabled: () => false,
|
||||
getContentGeneratorConfig: () => ({}),
|
||||
getCliVersion: () => '1.0.0',
|
||||
} as unknown as Config;
|
||||
const mockGenerator = {
|
||||
models: {},
|
||||
@@ -75,6 +98,11 @@ describe('createContentGenerator', () => {
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(generator).toBeInstanceOf(LoggingContentGenerator);
|
||||
expect(generator).toEqual(
|
||||
new LoggingContentGenerator(
|
||||
(mockGenerator as GoogleGenAI).models,
|
||||
mockConfig,
|
||||
),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -12,9 +12,14 @@ import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
} from '@google/genai';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
|
||||
|
||||
import type { UserTierId } from '../code_assist/types.js';
|
||||
import { InstallationManager } from '../utils/installationManager.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
/**
|
||||
* Interface abstracting the core functionalities for generating content and counting tokens.
|
||||
@@ -34,15 +39,16 @@ export interface ContentGenerator {
|
||||
|
||||
embedContent(request: EmbedContentParameters): Promise<EmbedContentResponse>;
|
||||
|
||||
useSummarizedThinking(): boolean;
|
||||
userTier?: UserTierId;
|
||||
}
|
||||
|
||||
export enum AuthType {
|
||||
LOGIN_WITH_GOOGLE = 'oauth-personal',
|
||||
USE_GEMINI = 'gemini-api-key',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
CLOUD_SHELL = 'cloud-shell',
|
||||
USE_OPENAI = 'openai',
|
||||
QWEN_OAUTH = 'qwen-oauth',
|
||||
USE_GEMINI = 'gemini',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
USE_ANTHROPIC = 'anthropic',
|
||||
}
|
||||
|
||||
export type ContentGeneratorConfig = {
|
||||
@@ -53,9 +59,12 @@ export type ContentGeneratorConfig = {
|
||||
authType?: AuthType | undefined;
|
||||
enableOpenAILogging?: boolean;
|
||||
openAILoggingDir?: string;
|
||||
timeout?: number; // Timeout configuration in milliseconds
|
||||
maxRetries?: number; // Maximum retries for failed requests
|
||||
disableCacheControl?: boolean; // Disable cache control for DashScope providers
|
||||
// Timeout configuration in milliseconds
|
||||
timeout?: number;
|
||||
// Maximum retries for failed requests
|
||||
maxRetries?: number;
|
||||
// Disable cache control for DashScope providers
|
||||
disableCacheControl?: boolean;
|
||||
samplingParams?: {
|
||||
top_p?: number;
|
||||
top_k?: number;
|
||||
@@ -65,12 +74,6 @@ export type ContentGeneratorConfig = {
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
};
|
||||
reasoning?:
|
||||
| false
|
||||
| {
|
||||
effort?: 'low' | 'medium' | 'high';
|
||||
budget_tokens?: number;
|
||||
};
|
||||
proxy?: string | undefined;
|
||||
userAgent?: string;
|
||||
// Schema compliance mode for tool definitions
|
||||
@@ -82,7 +85,7 @@ export function createContentGeneratorConfig(
|
||||
authType: AuthType | undefined,
|
||||
generationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): ContentGeneratorConfig {
|
||||
let newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
...(generationConfig || {}),
|
||||
authType,
|
||||
proxy: config?.getProxy(),
|
||||
@@ -99,16 +102,8 @@ export function createContentGeneratorConfig(
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_OPENAI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['OPENAI_API_KEY'],
|
||||
baseUrl:
|
||||
newContentGeneratorConfig.baseUrl || process.env['OPENAI_BASE_URL'],
|
||||
model: newContentGeneratorConfig.model || process.env['OPENAI_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('OPENAI_API_KEY environment variable not found.');
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -117,62 +112,10 @@ export function createContentGeneratorConfig(
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_ANTHROPIC) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey:
|
||||
newContentGeneratorConfig.apiKey || process.env['ANTHROPIC_API_KEY'],
|
||||
baseUrl:
|
||||
newContentGeneratorConfig.baseUrl || process.env['ANTHROPIC_BASE_URL'],
|
||||
model: newContentGeneratorConfig.model || process.env['ANTHROPIC_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.baseUrl) {
|
||||
throw new Error('ANTHROPIC_BASE_URL environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('ANTHROPIC_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_GEMINI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['GEMINI_API_KEY'],
|
||||
model: newContentGeneratorConfig.model || process.env['GEMINI_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('GEMINI_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('GEMINI_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_VERTEX_AI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['GOOGLE_API_KEY'],
|
||||
model: newContentGeneratorConfig.model || process.env['GOOGLE_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('GOOGLE_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('GOOGLE_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
return newContentGeneratorConfig as ContentGeneratorConfig;
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
export async function createContentGenerator(
|
||||
@@ -180,9 +123,53 @@ export async function createContentGenerator(
|
||||
gcConfig: Config,
|
||||
isInitialAuth?: boolean,
|
||||
): Promise<ContentGenerator> {
|
||||
const version = process.env['CLI_VERSION'] || process.version;
|
||||
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
const baseHeaders: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
if (
|
||||
config.authType === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
config.authType === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
const httpOptions = { headers: baseHeaders };
|
||||
return new LoggingContentGenerator(
|
||||
await createCodeAssistContentGenerator(
|
||||
httpOptions,
|
||||
config.authType,
|
||||
gcConfig,
|
||||
),
|
||||
gcConfig,
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
let headers: Record<string, string> = { ...baseHeaders };
|
||||
if (gcConfig?.getUsageStatisticsEnabled()) {
|
||||
const installationManager = new InstallationManager();
|
||||
const installationId = installationManager.getInstallationId();
|
||||
headers = {
|
||||
...headers,
|
||||
'x-gemini-api-privileged-user-id': `${installationId}`,
|
||||
};
|
||||
}
|
||||
const httpOptions = { headers };
|
||||
|
||||
const googleGenAI = new GoogleGenAI({
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
});
|
||||
return new LoggingContentGenerator(googleGenAI.models, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.USE_OPENAI) {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('OPENAI_API_KEY environment variable not found.');
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
|
||||
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
|
||||
@@ -191,8 +178,7 @@ export async function createContentGenerator(
|
||||
);
|
||||
|
||||
// Always use OpenAIContentGenerator, logging is controlled by enableOpenAILogging flag
|
||||
const generator = createOpenAIContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
return createOpenAIContentGenerator(config, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.QWEN_OAUTH) {
|
||||
@@ -213,8 +199,7 @@ export async function createContentGenerator(
|
||||
);
|
||||
|
||||
// Create the content generator with dynamic token management
|
||||
const generator = new QwenContentGenerator(qwenClient, config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
return new QwenContentGenerator(qwenClient, config, gcConfig);
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`${error instanceof Error ? error.message : String(error)}`,
|
||||
@@ -222,30 +207,6 @@ export async function createContentGenerator(
|
||||
}
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.USE_ANTHROPIC) {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
const { createAnthropicContentGenerator } = await import(
|
||||
'./anthropicContentGenerator/index.js'
|
||||
);
|
||||
|
||||
const generator = createAnthropicContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
}
|
||||
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
const { createGeminiContentGenerator } = await import(
|
||||
'./geminiContentGenerator/index.js'
|
||||
);
|
||||
const generator = createGeminiContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
||||
);
|
||||
|
||||
@@ -240,7 +240,7 @@ describe('CoreToolScheduler', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -318,7 +318,7 @@ describe('CoreToolScheduler', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -497,7 +497,7 @@ describe('CoreToolScheduler', () => {
|
||||
getExcludeTools: () => ['write_file', 'edit', 'run_shell_command'],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -584,7 +584,7 @@ describe('CoreToolScheduler', () => {
|
||||
getExcludeTools: () => ['write_file', 'edit'], // Different excluded tools
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -674,7 +674,7 @@ describe('CoreToolScheduler with payload', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1001,7 +1001,7 @@ describe('CoreToolScheduler edit cancellation', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1108,7 +1108,7 @@ describe('CoreToolScheduler YOLO mode', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1258,7 +1258,7 @@ describe('CoreToolScheduler cancellation during executing with live output', ()
|
||||
getApprovalMode: () => ApprovalMode.DEFAULT,
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getToolRegistry: () => mockToolRegistry,
|
||||
getShellExecutionConfig: () => ({
|
||||
@@ -1350,7 +1350,7 @@ describe('CoreToolScheduler request queueing', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1482,7 +1482,7 @@ describe('CoreToolScheduler request queueing', () => {
|
||||
getToolRegistry: () => toolRegistry,
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 80,
|
||||
@@ -1586,7 +1586,7 @@ describe('CoreToolScheduler request queueing', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1854,7 +1854,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
@@ -1975,7 +1975,7 @@ describe('CoreToolScheduler Sequential Execution', () => {
|
||||
getAllowedTools: () => [],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
|
||||
@@ -100,7 +100,6 @@ describe('GeminiChat', () => {
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
batchEmbedContents: vi.fn(),
|
||||
useSummarizedThinking: vi.fn().mockReturnValue(false),
|
||||
} as unknown as ContentGenerator;
|
||||
|
||||
mockHandleFallback.mockClear();
|
||||
@@ -112,7 +111,7 @@ describe('GeminiChat', () => {
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getDebugMode: () => false,
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({
|
||||
authType: 'gemini-api-key', // Ensure this is set for fallback tests
|
||||
authType: 'oauth-personal', // Ensure this is set for fallback tests
|
||||
model: 'test-model',
|
||||
}),
|
||||
getModel: vi.fn().mockReturnValue('gemini-pro'),
|
||||
@@ -719,39 +718,6 @@ describe('GeminiChat', () => {
|
||||
1,
|
||||
);
|
||||
});
|
||||
|
||||
it('should keep parts with thoughtSignature when consolidating history', async () => {
|
||||
const stream = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: 'p1',
|
||||
thoughtSignature: 's1',
|
||||
} as unknown as { text: string; thoughtSignature: string },
|
||||
],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream,
|
||||
);
|
||||
|
||||
const res = await chat.sendMessageStream('m1', { message: 'h1' }, 'p1');
|
||||
for await (const _ of res);
|
||||
|
||||
const history = chat.getHistory();
|
||||
expect(history[1].parts![0]).toEqual({
|
||||
text: 'p1',
|
||||
thoughtSignature: 's1',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('addHistory', () => {
|
||||
@@ -1416,7 +1382,7 @@ describe('GeminiChat', () => {
|
||||
});
|
||||
|
||||
it('should call handleFallback with the specific failed model and retry if handler returns true', async () => {
|
||||
const authType = AuthType.USE_GEMINI;
|
||||
const authType = AuthType.LOGIN_WITH_GOOGLE;
|
||||
vi.mocked(mockConfig.getContentGeneratorConfig).mockReturnValue({
|
||||
model: 'test-model',
|
||||
authType,
|
||||
@@ -1566,7 +1532,7 @@ describe('GeminiChat', () => {
|
||||
});
|
||||
|
||||
describe('stripThoughtsFromHistory', () => {
|
||||
it('should strip thoughts and thought signatures, and remove empty content objects', () => {
|
||||
it('should strip thought signatures', () => {
|
||||
chat.setHistory([
|
||||
{
|
||||
role: 'user',
|
||||
@@ -1578,15 +1544,10 @@ describe('GeminiChat', () => {
|
||||
{ text: 'thinking...', thought: true },
|
||||
{ text: 'hi' },
|
||||
{
|
||||
text: 'hidden metadata',
|
||||
thoughtSignature: 'abc',
|
||||
} as unknown as { text: string; thoughtSignature: string },
|
||||
functionCall: { name: 'test', args: {} },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'only thinking', thought: true }],
|
||||
},
|
||||
]);
|
||||
|
||||
chat.stripThoughtsFromHistory();
|
||||
@@ -1598,7 +1559,7 @@ describe('GeminiChat', () => {
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'hi' }, { text: 'hidden metadata' }],
|
||||
parts: [{ text: 'hi' }, { functionCall: { name: 'test', args: {} } }],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
@@ -92,7 +92,6 @@ export function isValidNonThoughtTextPart(part: Part): boolean {
|
||||
return (
|
||||
typeof part.text === 'string' &&
|
||||
!part.thought &&
|
||||
!part.thoughtSignature &&
|
||||
// Technically, the model should never generate parts that have text and
|
||||
// any of these but we don't trust them so check anyways.
|
||||
!part.functionCall &&
|
||||
@@ -110,24 +109,18 @@ function isValidContent(content: Content): boolean {
|
||||
if (part === undefined || Object.keys(part).length === 0) {
|
||||
return false;
|
||||
}
|
||||
if (!isValidContentPart(part)) {
|
||||
if (
|
||||
!part.thought &&
|
||||
part.text !== undefined &&
|
||||
part.text === '' &&
|
||||
part.functionCall === undefined
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function isValidContentPart(part: Part): boolean {
|
||||
const isInvalid =
|
||||
!part.thought &&
|
||||
!part.thoughtSignature &&
|
||||
part.text !== undefined &&
|
||||
part.text === '' &&
|
||||
part.functionCall === undefined;
|
||||
|
||||
return !isInvalid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the history contains the correct roles.
|
||||
*
|
||||
@@ -455,29 +448,15 @@ export class GeminiChat {
|
||||
if (!content.parts) return content;
|
||||
|
||||
// Filter out thought parts entirely
|
||||
const filteredParts = content.parts
|
||||
.filter(
|
||||
(part) =>
|
||||
!(
|
||||
part &&
|
||||
typeof part === 'object' &&
|
||||
'thought' in part &&
|
||||
part.thought
|
||||
),
|
||||
)
|
||||
.map((part) => {
|
||||
if (
|
||||
const filteredParts = content.parts.filter(
|
||||
(part) =>
|
||||
!(
|
||||
part &&
|
||||
typeof part === 'object' &&
|
||||
'thoughtSignature' in part
|
||||
) {
|
||||
const newPart = { ...part };
|
||||
delete (newPart as { thoughtSignature?: string })
|
||||
.thoughtSignature;
|
||||
return newPart;
|
||||
}
|
||||
return part;
|
||||
});
|
||||
'thought' in part &&
|
||||
part.thought
|
||||
),
|
||||
);
|
||||
|
||||
return {
|
||||
...content,
|
||||
@@ -559,27 +538,12 @@ export class GeminiChat {
|
||||
yield chunk; // Yield every chunk to the UI immediately.
|
||||
}
|
||||
|
||||
let thoughtContentPart: Part | undefined;
|
||||
const thoughtText = allModelParts
|
||||
.filter((part) => part.thought)
|
||||
const thoughtParts = allModelParts.filter((part) => part.thought);
|
||||
const thoughtText = thoughtParts
|
||||
.map((part) => part.text)
|
||||
.join('')
|
||||
.trim();
|
||||
|
||||
if (thoughtText !== '') {
|
||||
thoughtContentPart = {
|
||||
text: thoughtText,
|
||||
thought: true,
|
||||
};
|
||||
|
||||
const thoughtSignature = allModelParts.filter(
|
||||
(part) => part.thoughtSignature && part.thought,
|
||||
)?.[0]?.thoughtSignature;
|
||||
if (thoughtContentPart && thoughtSignature) {
|
||||
thoughtContentPart.thoughtSignature = thoughtSignature;
|
||||
}
|
||||
}
|
||||
|
||||
const contentParts = allModelParts.filter((part) => !part.thought);
|
||||
const consolidatedHistoryParts: Part[] = [];
|
||||
for (const part of contentParts) {
|
||||
@@ -591,7 +555,7 @@ export class GeminiChat {
|
||||
isValidNonThoughtTextPart(part)
|
||||
) {
|
||||
lastPart.text += part.text;
|
||||
} else if (isValidContentPart(part)) {
|
||||
} else {
|
||||
consolidatedHistoryParts.push(part);
|
||||
}
|
||||
}
|
||||
@@ -603,11 +567,11 @@ export class GeminiChat {
|
||||
.trim();
|
||||
|
||||
// Record assistant turn with raw Content and metadata
|
||||
if (thoughtContentPart || contentText || hasToolCall || usageMetadata) {
|
||||
if (thoughtText || contentText || hasToolCall || usageMetadata) {
|
||||
this.chatRecordingService?.recordAssistantTurn({
|
||||
model,
|
||||
message: [
|
||||
...(thoughtContentPart ? [thoughtContentPart] : []),
|
||||
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
|
||||
...(contentText ? [{ text: contentText }] : []),
|
||||
...(hasToolCall
|
||||
? contentParts
|
||||
@@ -643,7 +607,7 @@ export class GeminiChat {
|
||||
this.history.push({
|
||||
role: 'model',
|
||||
parts: [
|
||||
...(thoughtContentPart ? [thoughtContentPart] : []),
|
||||
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
|
||||
...consolidatedHistoryParts,
|
||||
],
|
||||
});
|
||||
|
||||
@@ -1,173 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
|
||||
vi.mock('@google/genai', () => {
|
||||
const mockGenerateContent = vi.fn();
|
||||
const mockGenerateContentStream = vi.fn();
|
||||
const mockCountTokens = vi.fn();
|
||||
const mockEmbedContent = vi.fn();
|
||||
|
||||
return {
|
||||
GoogleGenAI: vi.fn().mockImplementation(() => ({
|
||||
models: {
|
||||
generateContent: mockGenerateContent,
|
||||
generateContentStream: mockGenerateContentStream,
|
||||
countTokens: mockCountTokens,
|
||||
embedContent: mockEmbedContent,
|
||||
},
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
describe('GeminiContentGenerator', () => {
|
||||
let generator: GeminiContentGenerator;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let mockGoogleGenAI: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
generator = new GeminiContentGenerator({
|
||||
apiKey: 'test-api-key',
|
||||
});
|
||||
mockGoogleGenAI = vi.mocked(GoogleGenAI).mock.results[0].value;
|
||||
});
|
||||
|
||||
it('should call generateContent on the underlying model', async () => {
|
||||
const request = { model: 'gemini-1.5-flash', contents: [] };
|
||||
const expectedResponse = { responseId: 'test-id' };
|
||||
mockGoogleGenAI.models.generateContent.mockResolvedValue(expectedResponse);
|
||||
|
||||
const response = await generator.generateContent(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
...request,
|
||||
config: expect.objectContaining({
|
||||
temperature: 1,
|
||||
topP: 0.95,
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'THINKING_LEVEL_UNSPECIFIED',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(response).toBe(expectedResponse);
|
||||
});
|
||||
|
||||
it('should call generateContentStream on the underlying model', async () => {
|
||||
const request = { model: 'gemini-1.5-flash', contents: [] };
|
||||
const mockStream = (async function* () {
|
||||
yield { responseId: '1' };
|
||||
})();
|
||||
mockGoogleGenAI.models.generateContentStream.mockResolvedValue(mockStream);
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContentStream).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
...request,
|
||||
config: expect.objectContaining({
|
||||
temperature: 1,
|
||||
topP: 0.95,
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'THINKING_LEVEL_UNSPECIFIED',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(stream).toBe(mockStream);
|
||||
});
|
||||
|
||||
it('should call countTokens on the underlying model', async () => {
|
||||
const request = { model: 'gemini-1.5-flash', contents: [] };
|
||||
const expectedResponse = { totalTokens: 10 };
|
||||
mockGoogleGenAI.models.countTokens.mockResolvedValue(expectedResponse);
|
||||
|
||||
const response = await generator.countTokens(request);
|
||||
|
||||
expect(mockGoogleGenAI.models.countTokens).toHaveBeenCalledWith(request);
|
||||
expect(response).toBe(expectedResponse);
|
||||
});
|
||||
|
||||
it('should call embedContent on the underlying model', async () => {
|
||||
const request = { model: 'embedding-model', contents: [] };
|
||||
const expectedResponse = { embeddings: [] };
|
||||
mockGoogleGenAI.models.embedContent.mockResolvedValue(expectedResponse);
|
||||
|
||||
const response = await generator.embedContent(request);
|
||||
|
||||
expect(mockGoogleGenAI.models.embedContent).toHaveBeenCalledWith(request);
|
||||
expect(response).toBe(expectedResponse);
|
||||
});
|
||||
|
||||
it('should prioritize contentGeneratorConfig samplingParams over request config', async () => {
|
||||
const generatorWithParams = new GeminiContentGenerator({ apiKey: 'test' }, {
|
||||
model: 'gemini-1.5-flash',
|
||||
samplingParams: {
|
||||
temperature: 0.1,
|
||||
top_p: 0.2,
|
||||
},
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any);
|
||||
|
||||
const request = {
|
||||
model: 'gemini-1.5-flash',
|
||||
contents: [],
|
||||
config: {
|
||||
temperature: 0.9,
|
||||
topP: 0.9,
|
||||
},
|
||||
};
|
||||
|
||||
await generatorWithParams.generateContent(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
temperature: 0.1,
|
||||
topP: 0.2,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should map reasoning effort to thinkingConfig', async () => {
|
||||
const generatorWithReasoning = new GeminiContentGenerator(
|
||||
{ apiKey: 'test' },
|
||||
{
|
||||
model: 'gemini-2.5-pro',
|
||||
reasoning: {
|
||||
effort: 'high',
|
||||
},
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any,
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'gemini-2.5-pro',
|
||||
contents: [],
|
||||
};
|
||||
|
||||
await generatorWithReasoning.generateContent(request, 'prompt-id');
|
||||
|
||||
expect(mockGoogleGenAI.models.generateContent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'HIGH',
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -1,161 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponse,
|
||||
GenerateContentConfig,
|
||||
ThinkingLevel,
|
||||
} from '@google/genai';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
|
||||
/**
|
||||
* A wrapper for GoogleGenAI that implements the ContentGenerator interface.
|
||||
*/
|
||||
export class GeminiContentGenerator implements ContentGenerator {
|
||||
private readonly googleGenAI: GoogleGenAI;
|
||||
private readonly contentGeneratorConfig?: ContentGeneratorConfig;
|
||||
|
||||
constructor(
|
||||
options: {
|
||||
apiKey?: string;
|
||||
vertexai?: boolean;
|
||||
httpOptions?: { headers: Record<string, string> };
|
||||
},
|
||||
contentGeneratorConfig?: ContentGeneratorConfig,
|
||||
) {
|
||||
this.googleGenAI = new GoogleGenAI(options);
|
||||
this.contentGeneratorConfig = contentGeneratorConfig;
|
||||
}
|
||||
|
||||
private buildGenerateContentConfig(
|
||||
request: GenerateContentParameters,
|
||||
): GenerateContentConfig {
|
||||
const configSamplingParams = this.contentGeneratorConfig?.samplingParams;
|
||||
const requestConfig = request.config || {};
|
||||
|
||||
// Helper function to get parameter value with priority: config > request > default
|
||||
const getParameterValue = <T>(
|
||||
configValue: T | undefined,
|
||||
requestKey: keyof GenerateContentConfig,
|
||||
defaultValue?: T,
|
||||
): T | undefined => {
|
||||
const requestValue = requestConfig[requestKey] as T | undefined;
|
||||
|
||||
if (configValue !== undefined) return configValue;
|
||||
if (requestValue !== undefined) return requestValue;
|
||||
return defaultValue;
|
||||
};
|
||||
|
||||
return {
|
||||
...requestConfig,
|
||||
temperature: getParameterValue<number>(
|
||||
configSamplingParams?.temperature,
|
||||
'temperature',
|
||||
1,
|
||||
),
|
||||
topP: getParameterValue<number>(
|
||||
configSamplingParams?.top_p,
|
||||
'topP',
|
||||
0.95,
|
||||
),
|
||||
topK: getParameterValue<number>(configSamplingParams?.top_k, 'topK', 64),
|
||||
maxOutputTokens: getParameterValue<number>(
|
||||
configSamplingParams?.max_tokens,
|
||||
'maxOutputTokens',
|
||||
),
|
||||
presencePenalty: getParameterValue<number>(
|
||||
configSamplingParams?.presence_penalty,
|
||||
'presencePenalty',
|
||||
),
|
||||
frequencyPenalty: getParameterValue<number>(
|
||||
configSamplingParams?.frequency_penalty,
|
||||
'frequencyPenalty',
|
||||
),
|
||||
thinkingConfig: getParameterValue(
|
||||
this.buildThinkingConfig(),
|
||||
'thinkingConfig',
|
||||
{
|
||||
includeThoughts: true,
|
||||
thinkingLevel: 'THINKING_LEVEL_UNSPECIFIED' as ThinkingLevel,
|
||||
},
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
private buildThinkingConfig():
|
||||
| { includeThoughts: boolean; thinkingLevel?: ThinkingLevel }
|
||||
| undefined {
|
||||
const reasoning = this.contentGeneratorConfig?.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return { includeThoughts: false };
|
||||
}
|
||||
|
||||
if (reasoning) {
|
||||
const thinkingLevel = (
|
||||
reasoning.effort === 'low'
|
||||
? 'LOW'
|
||||
: reasoning.effort === 'high'
|
||||
? 'HIGH'
|
||||
: 'THINKING_LEVEL_UNSPECIFIED'
|
||||
) as ThinkingLevel;
|
||||
|
||||
return {
|
||||
includeThoughts: true,
|
||||
thinkingLevel,
|
||||
};
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildGenerateContentConfig(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContent(finalRequest);
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildGenerateContentConfig(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContentStream(finalRequest);
|
||||
}
|
||||
|
||||
async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
return this.googleGenAI.models.countTokens(request);
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.googleGenAI.models.embedContent(request);
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createGeminiContentGenerator } from './index.js';
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { AuthType } from '../contentGenerator.js';
|
||||
|
||||
vi.mock('./geminiContentGenerator.js', () => ({
|
||||
GeminiContentGenerator: vi.fn().mockImplementation(() => ({})),
|
||||
}));
|
||||
|
||||
describe('createGeminiContentGenerator', () => {
|
||||
let mockConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockConfig = {
|
||||
getUsageStatisticsEnabled: vi.fn().mockReturnValue(false),
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
it('should create a GeminiContentGenerator', () => {
|
||||
const config = {
|
||||
model: 'gemini-1.5-flash',
|
||||
apiKey: 'test-key',
|
||||
authType: AuthType.USE_GEMINI,
|
||||
};
|
||||
|
||||
const generator = createGeminiContentGenerator(config, mockConfig);
|
||||
|
||||
expect(GeminiContentGenerator).toHaveBeenCalled();
|
||||
expect(generator).toBeDefined();
|
||||
});
|
||||
});
|
||||
@@ -1,53 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { InstallationManager } from '../../utils/installationManager.js';
|
||||
|
||||
export { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
|
||||
/**
|
||||
* Create a Gemini content generator.
|
||||
*/
|
||||
export function createGeminiContentGenerator(
|
||||
config: ContentGeneratorConfig,
|
||||
gcConfig: Config,
|
||||
): ContentGenerator {
|
||||
const version = process.env['CLI_VERSION'] || process.version;
|
||||
const userAgent =
|
||||
config.userAgent ||
|
||||
`QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
const baseHeaders: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
let headers: Record<string, string> = { ...baseHeaders };
|
||||
if (gcConfig?.getUsageStatisticsEnabled()) {
|
||||
const installationManager = new InstallationManager();
|
||||
const installationId = installationManager.getInstallationId();
|
||||
headers = {
|
||||
...headers,
|
||||
'x-gemini-api-privileged-user-id': `${installationId}`,
|
||||
};
|
||||
}
|
||||
const httpOptions = { headers };
|
||||
|
||||
const geminiContentGenerator = new GeminiContentGenerator(
|
||||
{
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
},
|
||||
config,
|
||||
);
|
||||
|
||||
return geminiContentGenerator;
|
||||
}
|
||||
208
packages/core/src/core/loggingContentGenerator.ts
Normal file
208
packages/core/src/core/loggingContentGenerator.ts
Normal file
@@ -0,0 +1,208 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Content,
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
GenerateContentResponse,
|
||||
} from '@google/genai';
|
||||
import {
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
ApiErrorEvent,
|
||||
} from '../telemetry/types.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import {
|
||||
logApiError,
|
||||
logApiRequest,
|
||||
logApiResponse,
|
||||
} from '../telemetry/loggers.js';
|
||||
import type { ContentGenerator } from './contentGenerator.js';
|
||||
import { toContents } from '../code_assist/converter.js';
|
||||
import { isStructuredError } from '../utils/quotaErrorDetection.js';
|
||||
|
||||
interface StructuredError {
|
||||
status: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* A decorator that wraps a ContentGenerator to add logging to API calls.
|
||||
*/
|
||||
export class LoggingContentGenerator implements ContentGenerator {
|
||||
constructor(
|
||||
private readonly wrapped: ContentGenerator,
|
||||
private readonly config: Config,
|
||||
) {}
|
||||
|
||||
getWrapped(): ContentGenerator {
|
||||
return this.wrapped;
|
||||
}
|
||||
|
||||
private logApiRequest(
|
||||
contents: Content[],
|
||||
model: string,
|
||||
promptId: string,
|
||||
): void {
|
||||
const requestText = JSON.stringify(contents);
|
||||
logApiRequest(
|
||||
this.config,
|
||||
new ApiRequestEvent(model, promptId, requestText),
|
||||
);
|
||||
}
|
||||
|
||||
private _logApiResponse(
|
||||
responseId: string,
|
||||
durationMs: number,
|
||||
model: string,
|
||||
prompt_id: string,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
responseText?: string,
|
||||
): void {
|
||||
logApiResponse(
|
||||
this.config,
|
||||
new ApiResponseEvent(
|
||||
responseId,
|
||||
model,
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
usageMetadata,
|
||||
responseText,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
private _logApiError(
|
||||
responseId: string | undefined,
|
||||
durationMs: number,
|
||||
error: unknown,
|
||||
model: string,
|
||||
prompt_id: string,
|
||||
): void {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
const errorType = error instanceof Error ? error.name : 'unknown';
|
||||
|
||||
logApiError(
|
||||
this.config,
|
||||
new ApiErrorEvent(
|
||||
responseId,
|
||||
model,
|
||||
errorMessage,
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
errorType,
|
||||
isStructuredError(error)
|
||||
? (error as StructuredError).status
|
||||
: undefined,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
|
||||
try {
|
||||
const response = await this.wrapped.generateContent(req, userPromptId);
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiResponse(
|
||||
response.responseId ?? '',
|
||||
durationMs,
|
||||
response.modelVersion || req.model,
|
||||
userPromptId,
|
||||
response.usageMetadata,
|
||||
JSON.stringify(response),
|
||||
);
|
||||
return response;
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
|
||||
|
||||
let stream: AsyncGenerator<GenerateContentResponse>;
|
||||
try {
|
||||
stream = await this.wrapped.generateContentStream(req, userPromptId);
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
throw error;
|
||||
}
|
||||
|
||||
return this.loggingStreamWrapper(
|
||||
stream,
|
||||
startTime,
|
||||
userPromptId,
|
||||
req.model,
|
||||
);
|
||||
}
|
||||
|
||||
private async *loggingStreamWrapper(
|
||||
stream: AsyncGenerator<GenerateContentResponse>,
|
||||
startTime: number,
|
||||
userPromptId: string,
|
||||
model: string,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
const responses: GenerateContentResponse[] = [];
|
||||
|
||||
let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined;
|
||||
try {
|
||||
for await (const response of stream) {
|
||||
responses.push(response);
|
||||
if (response.usageMetadata) {
|
||||
lastUsageMetadata = response.usageMetadata;
|
||||
}
|
||||
yield response;
|
||||
}
|
||||
// Only log successful API response if no error occurred
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiResponse(
|
||||
responses[0]?.responseId ?? '',
|
||||
durationMs,
|
||||
responses[0]?.modelVersion || model,
|
||||
userPromptId,
|
||||
lastUsageMetadata,
|
||||
JSON.stringify(responses),
|
||||
);
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(
|
||||
undefined,
|
||||
durationMs,
|
||||
error,
|
||||
responses[0]?.modelVersion || model,
|
||||
userPromptId,
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
|
||||
return this.wrapped.countTokens(req);
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
req: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.wrapped.embedContent(req);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
@@ -1,371 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type { ContentGenerator } from '../contentGenerator.js';
|
||||
import { LoggingContentGenerator } from './index.js';
|
||||
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
|
||||
import {
|
||||
logApiRequest,
|
||||
logApiResponse,
|
||||
logApiError,
|
||||
} from '../../telemetry/loggers.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
import type OpenAI from 'openai';
|
||||
|
||||
vi.mock('../../telemetry/loggers.js', () => ({
|
||||
logApiRequest: vi.fn(),
|
||||
logApiResponse: vi.fn(),
|
||||
logApiError: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('../../utils/openaiLogger.js', () => ({
|
||||
OpenAILogger: vi.fn().mockImplementation(() => ({
|
||||
logInteraction: vi.fn().mockResolvedValue(undefined),
|
||||
})),
|
||||
}));
|
||||
|
||||
const convertGeminiRequestToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiRequestToOpenAI')
|
||||
.mockReturnValue([{ role: 'user', content: 'converted' }]);
|
||||
const convertGeminiToolsToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiToolsToOpenAI')
|
||||
.mockResolvedValue([{ type: 'function', function: { name: 'tool' } }]);
|
||||
const convertGeminiResponseToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiResponseToOpenAI')
|
||||
.mockReturnValue({
|
||||
id: 'openai-response',
|
||||
object: 'chat.completion',
|
||||
created: 123456789,
|
||||
model: 'test-model',
|
||||
choices: [],
|
||||
} as OpenAI.Chat.ChatCompletion);
|
||||
|
||||
const createConfig = (overrides: Record<string, unknown> = {}): Config =>
|
||||
({
|
||||
getContentGeneratorConfig: () => ({
|
||||
authType: 'openai',
|
||||
enableOpenAILogging: false,
|
||||
...overrides,
|
||||
}),
|
||||
}) as Config;
|
||||
|
||||
const createWrappedGenerator = (
|
||||
generateContent: ContentGenerator['generateContent'],
|
||||
generateContentStream: ContentGenerator['generateContentStream'],
|
||||
): ContentGenerator =>
|
||||
({
|
||||
generateContent,
|
||||
generateContentStream,
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
useSummarizedThinking: vi.fn().mockReturnValue(false),
|
||||
}) as ContentGenerator;
|
||||
|
||||
const createResponse = (
|
||||
responseId: string,
|
||||
modelVersion: string,
|
||||
parts: Array<Record<string, unknown>>,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
finishReason?: string,
|
||||
): GenerateContentResponse => {
|
||||
const response = new GenerateContentResponse();
|
||||
response.responseId = responseId;
|
||||
response.modelVersion = modelVersion;
|
||||
response.usageMetadata = usageMetadata;
|
||||
response.candidates = [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: parts as never[],
|
||||
},
|
||||
finishReason: finishReason as never,
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
},
|
||||
];
|
||||
return response;
|
||||
};
|
||||
|
||||
describe('LoggingContentGenerator', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
convertGeminiRequestToOpenAISpy.mockClear();
|
||||
convertGeminiToolsToOpenAISpy.mockClear();
|
||||
convertGeminiResponseToOpenAISpy.mockClear();
|
||||
});
|
||||
|
||||
it('logs request/response, normalizes thought parts, and logs OpenAI interaction', async () => {
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn().mockResolvedValue(
|
||||
createResponse(
|
||||
'resp-1',
|
||||
'model-v2',
|
||||
[{ text: 'ok' }],
|
||||
{
|
||||
promptTokenCount: 3,
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 8,
|
||||
},
|
||||
'STOP',
|
||||
),
|
||||
),
|
||||
vi.fn(),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({
|
||||
enableOpenAILogging: true,
|
||||
openAILoggingDir: 'logs',
|
||||
schemaCompliance: 'openapi_30',
|
||||
}),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{ text: 'Hello', thought: 'internal' },
|
||||
{
|
||||
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
|
||||
thought: 'strip-me',
|
||||
},
|
||||
null,
|
||||
],
|
||||
},
|
||||
],
|
||||
config: {
|
||||
temperature: 0.3,
|
||||
topP: 0.9,
|
||||
maxOutputTokens: 256,
|
||||
presencePenalty: 0.2,
|
||||
frequencyPenalty: 0.1,
|
||||
tools: [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{ name: 'tool', description: 'desc', parameters: {} },
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const response = await generator.generateContent(request, 'prompt-1');
|
||||
|
||||
expect(response.responseId).toBe('resp-1');
|
||||
expect(logApiRequest).toHaveBeenCalledTimes(1);
|
||||
const [, requestEvent] = vi.mocked(logApiRequest).mock.calls[0];
|
||||
const loggedContents = JSON.parse(requestEvent.request_text || '[]');
|
||||
expect(loggedContents[0].parts[0]).toEqual({
|
||||
text: 'Hello\n[Thought: internal]',
|
||||
});
|
||||
expect(loggedContents[0].parts[1]).toEqual({
|
||||
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
|
||||
});
|
||||
|
||||
expect(logApiResponse).toHaveBeenCalledTimes(1);
|
||||
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
|
||||
expect(responseEvent.response_id).toBe('resp-1');
|
||||
expect(responseEvent.model).toBe('model-v2');
|
||||
expect(responseEvent.prompt_id).toBe('prompt-1');
|
||||
expect(responseEvent.input_token_count).toBe(3);
|
||||
|
||||
expect(convertGeminiRequestToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
expect(convertGeminiToolsToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
|
||||
const [openaiRequest, openaiResponse, openaiError] =
|
||||
openaiLoggerInstance.logInteraction.mock.calls[0];
|
||||
expect(openaiRequest).toEqual(
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
messages: [{ role: 'user', content: 'converted' }],
|
||||
tools: [{ type: 'function', function: { name: 'tool' } }],
|
||||
temperature: 0.3,
|
||||
top_p: 0.9,
|
||||
max_tokens: 256,
|
||||
presence_penalty: 0.2,
|
||||
frequency_penalty: 0.1,
|
||||
}),
|
||||
);
|
||||
expect(openaiResponse).toEqual({
|
||||
id: 'openai-response',
|
||||
object: 'chat.completion',
|
||||
created: 123456789,
|
||||
model: 'test-model',
|
||||
choices: [],
|
||||
});
|
||||
expect(openaiError).toBeUndefined();
|
||||
});
|
||||
|
||||
it('logs errors with status code and request id, then rethrows', async () => {
|
||||
const error = Object.assign(new Error('boom'), {
|
||||
code: 429,
|
||||
request_id: 'req-99',
|
||||
type: 'rate_limit',
|
||||
});
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn().mockRejectedValue(error),
|
||||
vi.fn(),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
await expect(
|
||||
generator.generateContent(request, 'prompt-2'),
|
||||
).rejects.toThrow('boom');
|
||||
|
||||
expect(logApiError).toHaveBeenCalledTimes(1);
|
||||
const [, errorEvent] = vi.mocked(logApiError).mock.calls[0];
|
||||
expect(errorEvent.response_id).toBe('req-99');
|
||||
expect(errorEvent.status_code).toBe(429);
|
||||
expect(errorEvent.error_type).toBe('rate_limit');
|
||||
expect(errorEvent.prompt_id).toBe('prompt-2');
|
||||
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
const [, , loggedError] = openaiLoggerInstance.logInteraction.mock.calls[0];
|
||||
expect(loggedError).toBeInstanceOf(Error);
|
||||
expect((loggedError as Error).message).toBe('boom');
|
||||
});
|
||||
|
||||
it('logs streaming responses and consolidates tool calls', async () => {
|
||||
const usage1 = {
|
||||
promptTokenCount: 1,
|
||||
} as GenerateContentResponseUsageMetadata;
|
||||
const usage2 = {
|
||||
promptTokenCount: 2,
|
||||
candidatesTokenCount: 4,
|
||||
totalTokenCount: 6,
|
||||
} as GenerateContentResponseUsageMetadata;
|
||||
|
||||
const response1 = createResponse(
|
||||
'resp-1',
|
||||
'model-stream',
|
||||
[
|
||||
{ text: 'Hello' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{}' } },
|
||||
],
|
||||
usage1,
|
||||
);
|
||||
const response2 = createResponse(
|
||||
'resp-2',
|
||||
'model-stream',
|
||||
[
|
||||
{ text: ' world' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
|
||||
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
|
||||
],
|
||||
usage2,
|
||||
'STOP',
|
||||
);
|
||||
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn(),
|
||||
vi.fn().mockResolvedValue(
|
||||
(async function* () {
|
||||
yield response1;
|
||||
yield response2;
|
||||
})(),
|
||||
),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-3');
|
||||
const seen: GenerateContentResponse[] = [];
|
||||
for await (const item of stream) {
|
||||
seen.push(item);
|
||||
}
|
||||
expect(seen).toHaveLength(2);
|
||||
|
||||
expect(logApiResponse).toHaveBeenCalledTimes(1);
|
||||
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
|
||||
expect(responseEvent.response_id).toBe('resp-1');
|
||||
expect(responseEvent.input_token_count).toBe(2);
|
||||
|
||||
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
const [consolidatedResponse] =
|
||||
convertGeminiResponseToOpenAISpy.mock.calls[0];
|
||||
const consolidatedParts =
|
||||
consolidatedResponse.candidates?.[0]?.content?.parts || [];
|
||||
expect(consolidatedParts).toEqual([
|
||||
{ text: 'Hello' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
|
||||
{ text: ' world' },
|
||||
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
|
||||
]);
|
||||
expect(consolidatedResponse.usageMetadata).toBe(usage2);
|
||||
expect(consolidatedResponse.responseId).toBe('resp-2');
|
||||
expect(consolidatedResponse.candidates?.[0]?.finishReason).toBe('STOP');
|
||||
});
|
||||
|
||||
it('logs stream errors and skips response logging', async () => {
|
||||
const response1 = createResponse('resp-1', 'model-stream', [
|
||||
{ text: 'partial' },
|
||||
]);
|
||||
const streamError = new Error('stream-fail');
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn(),
|
||||
vi.fn().mockResolvedValue(
|
||||
(async function* () {
|
||||
yield response1;
|
||||
throw streamError;
|
||||
})(),
|
||||
),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-4');
|
||||
await expect(async () => {
|
||||
for await (const _item of stream) {
|
||||
// Consume stream to trigger error.
|
||||
}
|
||||
}).rejects.toThrow('stream-fail');
|
||||
|
||||
expect(logApiResponse).not.toHaveBeenCalled();
|
||||
expect(logApiError).toHaveBeenCalledTimes(1);
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
@@ -1,507 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
GenerateContentResponse,
|
||||
type Content,
|
||||
type CountTokensParameters,
|
||||
type CountTokensResponse,
|
||||
type EmbedContentParameters,
|
||||
type EmbedContentResponse,
|
||||
type GenerateContentParameters,
|
||||
type GenerateContentResponseUsageMetadata,
|
||||
type ContentListUnion,
|
||||
type ContentUnion,
|
||||
type Part,
|
||||
type PartUnion,
|
||||
type FinishReason,
|
||||
} from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
import {
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
ApiErrorEvent,
|
||||
} from '../../telemetry/types.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import {
|
||||
logApiError,
|
||||
logApiRequest,
|
||||
logApiResponse,
|
||||
} from '../../telemetry/loggers.js';
|
||||
import type { ContentGenerator } from '../contentGenerator.js';
|
||||
import { isStructuredError } from '../../utils/quotaErrorDetection.js';
|
||||
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
|
||||
interface StructuredError {
|
||||
status: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* A decorator that wraps a ContentGenerator to add logging to API calls.
|
||||
*/
|
||||
export class LoggingContentGenerator implements ContentGenerator {
|
||||
private openaiLogger?: OpenAILogger;
|
||||
private schemaCompliance?: 'auto' | 'openapi_30';
|
||||
|
||||
constructor(
|
||||
private readonly wrapped: ContentGenerator,
|
||||
private readonly config: Config,
|
||||
) {
|
||||
const generatorConfig = this.config.getContentGeneratorConfig();
|
||||
if (generatorConfig?.enableOpenAILogging) {
|
||||
this.openaiLogger = new OpenAILogger(generatorConfig.openAILoggingDir);
|
||||
this.schemaCompliance = generatorConfig.schemaCompliance;
|
||||
}
|
||||
}
|
||||
|
||||
getWrapped(): ContentGenerator {
|
||||
return this.wrapped;
|
||||
}
|
||||
|
||||
private logApiRequest(
|
||||
contents: Content[],
|
||||
model: string,
|
||||
promptId: string,
|
||||
): void {
|
||||
const requestText = JSON.stringify(contents);
|
||||
logApiRequest(
|
||||
this.config,
|
||||
new ApiRequestEvent(model, promptId, requestText),
|
||||
);
|
||||
}
|
||||
|
||||
private _logApiResponse(
|
||||
responseId: string,
|
||||
durationMs: number,
|
||||
model: string,
|
||||
prompt_id: string,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
responseText?: string,
|
||||
): void {
|
||||
logApiResponse(
|
||||
this.config,
|
||||
new ApiResponseEvent(
|
||||
responseId,
|
||||
model,
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
usageMetadata,
|
||||
responseText,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
private _logApiError(
|
||||
responseId: string | undefined,
|
||||
durationMs: number,
|
||||
error: unknown,
|
||||
model: string,
|
||||
prompt_id: string,
|
||||
): void {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
const errorType =
|
||||
(error as { type?: string })?.type ||
|
||||
(error instanceof Error ? error.name : 'unknown');
|
||||
const errorResponseId =
|
||||
(error as { requestID?: string; request_id?: string })?.requestID ||
|
||||
(error as { requestID?: string; request_id?: string })?.request_id ||
|
||||
responseId;
|
||||
const errorStatus =
|
||||
(error as { code?: string | number; status?: number })?.code ??
|
||||
(error as { status?: number })?.status ??
|
||||
(isStructuredError(error)
|
||||
? (error as StructuredError).status
|
||||
: undefined);
|
||||
|
||||
logApiError(
|
||||
this.config,
|
||||
new ApiErrorEvent(
|
||||
errorResponseId,
|
||||
model,
|
||||
errorMessage,
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
errorType,
|
||||
errorStatus,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
|
||||
try {
|
||||
const response = await this.wrapped.generateContent(req, userPromptId);
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiResponse(
|
||||
response.responseId ?? '',
|
||||
durationMs,
|
||||
response.modelVersion || req.model,
|
||||
userPromptId,
|
||||
response.usageMetadata,
|
||||
JSON.stringify(response),
|
||||
);
|
||||
await this.logOpenAIInteraction(openaiRequest, response);
|
||||
return response;
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
req: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
|
||||
|
||||
let stream: AsyncGenerator<GenerateContentResponse>;
|
||||
try {
|
||||
stream = await this.wrapped.generateContentStream(req, userPromptId);
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
return this.loggingStreamWrapper(
|
||||
stream,
|
||||
startTime,
|
||||
userPromptId,
|
||||
req.model,
|
||||
openaiRequest,
|
||||
);
|
||||
}
|
||||
|
||||
private async *loggingStreamWrapper(
|
||||
stream: AsyncGenerator<GenerateContentResponse>,
|
||||
startTime: number,
|
||||
userPromptId: string,
|
||||
model: string,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
const responses: GenerateContentResponse[] = [];
|
||||
|
||||
let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined;
|
||||
try {
|
||||
for await (const response of stream) {
|
||||
responses.push(response);
|
||||
if (response.usageMetadata) {
|
||||
lastUsageMetadata = response.usageMetadata;
|
||||
}
|
||||
yield response;
|
||||
}
|
||||
// Only log successful API response if no error occurred
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiResponse(
|
||||
responses[0]?.responseId ?? '',
|
||||
durationMs,
|
||||
responses[0]?.modelVersion || model,
|
||||
userPromptId,
|
||||
lastUsageMetadata,
|
||||
JSON.stringify(responses),
|
||||
);
|
||||
const consolidatedResponse =
|
||||
this.consolidateGeminiResponsesForLogging(responses);
|
||||
await this.logOpenAIInteraction(openaiRequest, consolidatedResponse);
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(
|
||||
undefined,
|
||||
durationMs,
|
||||
error,
|
||||
responses[0]?.modelVersion || model,
|
||||
userPromptId,
|
||||
);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private async buildOpenAIRequestForLogging(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<OpenAI.Chat.ChatCompletionCreateParams | undefined> {
|
||||
if (!this.openaiLogger) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const converter = new OpenAIContentConverter(
|
||||
request.model,
|
||||
this.schemaCompliance,
|
||||
);
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request, {
|
||||
cleanOrphanToolCalls: false,
|
||||
});
|
||||
|
||||
const openaiRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: request.model,
|
||||
messages,
|
||||
};
|
||||
|
||||
if (request.config?.tools) {
|
||||
openaiRequest.tools = await converter.convertGeminiToolsToOpenAI(
|
||||
request.config.tools,
|
||||
);
|
||||
}
|
||||
|
||||
if (request.config?.temperature !== undefined) {
|
||||
openaiRequest.temperature = request.config.temperature;
|
||||
}
|
||||
if (request.config?.topP !== undefined) {
|
||||
openaiRequest.top_p = request.config.topP;
|
||||
}
|
||||
if (request.config?.maxOutputTokens !== undefined) {
|
||||
openaiRequest.max_tokens = request.config.maxOutputTokens;
|
||||
}
|
||||
if (request.config?.presencePenalty !== undefined) {
|
||||
openaiRequest.presence_penalty = request.config.presencePenalty;
|
||||
}
|
||||
if (request.config?.frequencyPenalty !== undefined) {
|
||||
openaiRequest.frequency_penalty = request.config.frequencyPenalty;
|
||||
}
|
||||
|
||||
return openaiRequest;
|
||||
}
|
||||
|
||||
private async logOpenAIInteraction(
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams | undefined,
|
||||
response?: GenerateContentResponse,
|
||||
error?: unknown,
|
||||
): Promise<void> {
|
||||
if (!this.openaiLogger || !openaiRequest) {
|
||||
return;
|
||||
}
|
||||
|
||||
const openaiResponse = response
|
||||
? this.convertGeminiResponseToOpenAIForLogging(response, openaiRequest)
|
||||
: undefined;
|
||||
|
||||
await this.openaiLogger.logInteraction(
|
||||
openaiRequest,
|
||||
openaiResponse,
|
||||
error instanceof Error
|
||||
? error
|
||||
: error
|
||||
? new Error(String(error))
|
||||
: undefined,
|
||||
);
|
||||
}
|
||||
|
||||
private convertGeminiResponseToOpenAIForLogging(
|
||||
response: GenerateContentResponse,
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const converter = new OpenAIContentConverter(
|
||||
openaiRequest.model,
|
||||
this.schemaCompliance,
|
||||
);
|
||||
|
||||
return converter.convertGeminiResponseToOpenAI(response);
|
||||
}
|
||||
|
||||
private consolidateGeminiResponsesForLogging(
|
||||
responses: GenerateContentResponse[],
|
||||
): GenerateContentResponse | undefined {
|
||||
if (responses.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const consolidated = new GenerateContentResponse();
|
||||
const combinedParts: Part[] = [];
|
||||
const functionCallIndex = new Map<string, number>();
|
||||
let finishReason: FinishReason | undefined;
|
||||
let usageMetadata: GenerateContentResponseUsageMetadata | undefined;
|
||||
|
||||
for (const response of responses) {
|
||||
if (response.usageMetadata) {
|
||||
usageMetadata = response.usageMetadata;
|
||||
}
|
||||
|
||||
const candidate = response.candidates?.[0];
|
||||
if (candidate?.finishReason) {
|
||||
finishReason = candidate.finishReason;
|
||||
}
|
||||
|
||||
const parts = candidate?.content?.parts ?? [];
|
||||
for (const part of parts as Part[]) {
|
||||
if (typeof part === 'string') {
|
||||
combinedParts.push({ text: part });
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('text' in part) {
|
||||
if (part.text) {
|
||||
combinedParts.push({
|
||||
text: part.text,
|
||||
...(part.thought ? { thought: true } : {}),
|
||||
...(part.thoughtSignature
|
||||
? { thoughtSignature: part.thoughtSignature }
|
||||
: {}),
|
||||
});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('functionCall' in part && part.functionCall) {
|
||||
const callKey =
|
||||
part.functionCall.id || part.functionCall.name || 'tool_call';
|
||||
const existingIndex = functionCallIndex.get(callKey);
|
||||
const functionPart = { functionCall: part.functionCall };
|
||||
if (existingIndex !== undefined) {
|
||||
combinedParts[existingIndex] = functionPart;
|
||||
} else {
|
||||
functionCallIndex.set(callKey, combinedParts.length);
|
||||
combinedParts.push(functionPart);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('functionResponse' in part && part.functionResponse) {
|
||||
combinedParts.push({ functionResponse: part.functionResponse });
|
||||
continue;
|
||||
}
|
||||
|
||||
combinedParts.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
const lastResponse = responses[responses.length - 1];
|
||||
const lastCandidate = lastResponse.candidates?.[0];
|
||||
|
||||
consolidated.responseId = lastResponse.responseId;
|
||||
consolidated.createTime = lastResponse.createTime;
|
||||
consolidated.modelVersion = lastResponse.modelVersion;
|
||||
consolidated.promptFeedback = lastResponse.promptFeedback;
|
||||
consolidated.usageMetadata = usageMetadata;
|
||||
|
||||
consolidated.candidates = [
|
||||
{
|
||||
content: {
|
||||
role: lastCandidate?.content?.role || 'model',
|
||||
parts: combinedParts,
|
||||
},
|
||||
...(finishReason ? { finishReason } : {}),
|
||||
index: 0,
|
||||
safetyRatings: lastCandidate?.safetyRatings || [],
|
||||
},
|
||||
];
|
||||
|
||||
return consolidated;
|
||||
}
|
||||
|
||||
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
|
||||
return this.wrapped.countTokens(req);
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
req: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.wrapped.embedContent(req);
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return this.wrapped.useSummarizedThinking();
|
||||
}
|
||||
|
||||
private toContents(contents: ContentListUnion): Content[] {
|
||||
if (Array.isArray(contents)) {
|
||||
// it's a Content[] or a PartsUnion[]
|
||||
return contents.map((c) => this.toContent(c));
|
||||
}
|
||||
// it's a Content or a PartsUnion
|
||||
return [this.toContent(contents)];
|
||||
}
|
||||
|
||||
private toContent(content: ContentUnion): Content {
|
||||
if (Array.isArray(content)) {
|
||||
// it's a PartsUnion[]
|
||||
return {
|
||||
role: 'user',
|
||||
parts: this.toParts(content),
|
||||
};
|
||||
}
|
||||
if (typeof content === 'string') {
|
||||
// it's a string
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [{ text: content }],
|
||||
};
|
||||
}
|
||||
if ('parts' in content) {
|
||||
// it's a Content - process parts to handle thought filtering
|
||||
return {
|
||||
...content,
|
||||
parts: content.parts
|
||||
? this.toParts(content.parts.filter((p) => p != null))
|
||||
: [],
|
||||
};
|
||||
}
|
||||
// it's a Part
|
||||
return {
|
||||
role: 'user',
|
||||
parts: [this.toPart(content as Part)],
|
||||
};
|
||||
}
|
||||
|
||||
private toParts(parts: PartUnion[]): Part[] {
|
||||
return parts.map((p) => this.toPart(p));
|
||||
}
|
||||
|
||||
private toPart(part: PartUnion): Part {
|
||||
if (typeof part === 'string') {
|
||||
// it's a string
|
||||
return { text: part };
|
||||
}
|
||||
|
||||
// Handle thought parts for CountToken API compatibility
|
||||
// The CountToken API expects parts to have certain required "oneof" fields initialized,
|
||||
// but thought parts don't conform to this schema and cause API failures
|
||||
if ('thought' in part && part.thought) {
|
||||
const thoughtText = `[Thought: ${part.thought}]`;
|
||||
|
||||
const newPart = { ...part };
|
||||
delete (newPart as Record<string, unknown>)['thought'];
|
||||
|
||||
const hasApiContent =
|
||||
'functionCall' in newPart ||
|
||||
'functionResponse' in newPart ||
|
||||
'inlineData' in newPart ||
|
||||
'fileData' in newPart;
|
||||
|
||||
if (hasApiContent) {
|
||||
// It's a functionCall or other non-text part. Just strip the thought.
|
||||
return newPart;
|
||||
}
|
||||
|
||||
// If no other valid API content, this must be a text part.
|
||||
// Combine existing text (if any) with the thought, preserving other properties.
|
||||
const text = (newPart as { text?: unknown }).text;
|
||||
const existingText = text ? String(text) : '';
|
||||
const combinedText = existingText
|
||||
? `${existingText}\n${thoughtText}`
|
||||
: thoughtText;
|
||||
|
||||
return {
|
||||
...newPart,
|
||||
text: combinedText,
|
||||
};
|
||||
}
|
||||
|
||||
return part;
|
||||
}
|
||||
}
|
||||
@@ -47,7 +47,7 @@ describe('executeToolCall', () => {
|
||||
getDebugMode: () => false,
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'gemini-api-key',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
|
||||
@@ -236,9 +236,8 @@ export class OpenAIContentConverter {
|
||||
*/
|
||||
convertGeminiRequestToOpenAI(
|
||||
request: GenerateContentParameters,
|
||||
options: { cleanOrphanToolCalls: boolean } = { cleanOrphanToolCalls: true },
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
let messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
|
||||
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
|
||||
|
||||
// Handle system instruction from config
|
||||
this.addSystemInstructionMessage(request, messages);
|
||||
@@ -247,89 +246,11 @@ export class OpenAIContentConverter {
|
||||
this.processContents(request.contents, messages);
|
||||
|
||||
// Clean up orphaned tool calls and merge consecutive assistant messages
|
||||
if (options.cleanOrphanToolCalls) {
|
||||
messages = this.cleanOrphanedToolCalls(messages);
|
||||
}
|
||||
messages = this.mergeConsecutiveAssistantMessages(messages);
|
||||
const cleanedMessages = this.cleanOrphanedToolCalls(messages);
|
||||
const mergedMessages =
|
||||
this.mergeConsecutiveAssistantMessages(cleanedMessages);
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Gemini response to OpenAI completion format (for logging).
|
||||
*/
|
||||
convertGeminiResponseToOpenAI(
|
||||
response: GenerateContentResponse,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const candidate = response.candidates?.[0];
|
||||
const parts = (candidate?.content?.parts || []) as Part[];
|
||||
const parsedParts = this.parseParts(parts);
|
||||
|
||||
const message: ExtendedCompletionMessage = {
|
||||
role: 'assistant',
|
||||
content: parsedParts.contentParts.join('') || null,
|
||||
refusal: null,
|
||||
};
|
||||
|
||||
const reasoningContent = parsedParts.thoughtParts.join('');
|
||||
if (reasoningContent) {
|
||||
message.reasoning_content = reasoningContent;
|
||||
}
|
||||
|
||||
if (parsedParts.functionCalls.length > 0) {
|
||||
message.tool_calls = parsedParts.functionCalls.map((call, index) => ({
|
||||
id: call.id || `call_${index}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: call.name || '',
|
||||
arguments: JSON.stringify(call.args || {}),
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
const finishReason = this.mapGeminiFinishReasonToOpenAI(
|
||||
candidate?.finishReason,
|
||||
);
|
||||
|
||||
const usageMetadata = response.usageMetadata;
|
||||
const usage: OpenAI.CompletionUsage = {
|
||||
prompt_tokens: usageMetadata?.promptTokenCount || 0,
|
||||
completion_tokens: usageMetadata?.candidatesTokenCount || 0,
|
||||
total_tokens: usageMetadata?.totalTokenCount || 0,
|
||||
};
|
||||
|
||||
if (usageMetadata?.cachedContentTokenCount !== undefined) {
|
||||
(
|
||||
usage as OpenAI.CompletionUsage & {
|
||||
prompt_tokens_details?: { cached_tokens?: number };
|
||||
}
|
||||
).prompt_tokens_details = {
|
||||
cached_tokens: usageMetadata.cachedContentTokenCount,
|
||||
};
|
||||
}
|
||||
|
||||
const createdMs = response.createTime
|
||||
? Number(response.createTime)
|
||||
: Date.now();
|
||||
const createdSeconds = Number.isFinite(createdMs)
|
||||
? Math.floor(createdMs / 1000)
|
||||
: Math.floor(Date.now() / 1000);
|
||||
|
||||
return {
|
||||
id: response.responseId || `gemini-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: createdSeconds,
|
||||
model: response.modelVersion || this.model,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message,
|
||||
finish_reason: finishReason,
|
||||
logprobs: null,
|
||||
},
|
||||
],
|
||||
usage,
|
||||
};
|
||||
return mergedMessages;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -915,6 +836,84 @@ export class OpenAIContentConverter {
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Gemini response format to OpenAI chat completion format for logging
|
||||
*/
|
||||
convertGeminiResponseToOpenAI(
|
||||
response: GenerateContentResponse,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const candidate = response.candidates?.[0];
|
||||
const content = candidate?.content;
|
||||
|
||||
let messageContent: string | null = null;
|
||||
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
|
||||
|
||||
if (content?.parts) {
|
||||
const textParts: string[] = [];
|
||||
|
||||
for (const part of content.parts) {
|
||||
if ('text' in part && part.text) {
|
||||
textParts.push(part.text);
|
||||
} else if ('functionCall' in part && part.functionCall) {
|
||||
toolCalls.push({
|
||||
id: part.functionCall.id || `call_${toolCalls.length}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: part.functionCall.name || '',
|
||||
arguments: JSON.stringify(part.functionCall.args || {}),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
messageContent = textParts.join('').trimEnd();
|
||||
}
|
||||
|
||||
const choice: OpenAI.Chat.ChatCompletion.Choice = {
|
||||
index: 0,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: messageContent,
|
||||
refusal: null,
|
||||
},
|
||||
finish_reason: this.mapGeminiFinishReasonToOpenAI(
|
||||
candidate?.finishReason,
|
||||
) as OpenAI.Chat.ChatCompletion.Choice['finish_reason'],
|
||||
logprobs: null,
|
||||
};
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
choice.message.tool_calls = toolCalls;
|
||||
}
|
||||
|
||||
const openaiResponse: OpenAI.Chat.ChatCompletion = {
|
||||
id: response.responseId || `chatcmpl-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: response.createTime
|
||||
? Number(response.createTime)
|
||||
: Math.floor(Date.now() / 1000),
|
||||
model: this.model,
|
||||
choices: [choice],
|
||||
};
|
||||
|
||||
// Add usage metadata if available
|
||||
if (response.usageMetadata) {
|
||||
openaiResponse.usage = {
|
||||
prompt_tokens: response.usageMetadata.promptTokenCount || 0,
|
||||
completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
|
||||
total_tokens: response.usageMetadata.totalTokenCount || 0,
|
||||
};
|
||||
|
||||
if (response.usageMetadata.cachedContentTokenCount) {
|
||||
openaiResponse.usage.prompt_tokens_details = {
|
||||
cached_tokens: response.usageMetadata.cachedContentTokenCount,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return openaiResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map OpenAI finish reasons to Gemini finish reasons
|
||||
*/
|
||||
@@ -932,24 +931,29 @@ export class OpenAIContentConverter {
|
||||
return mapping[openaiReason] || FinishReason.FINISH_REASON_UNSPECIFIED;
|
||||
}
|
||||
|
||||
private mapGeminiFinishReasonToOpenAI(
|
||||
geminiReason?: FinishReason,
|
||||
): 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' {
|
||||
if (!geminiReason) {
|
||||
return 'stop';
|
||||
}
|
||||
/**
|
||||
* Map Gemini finish reasons to OpenAI finish reasons
|
||||
*/
|
||||
private mapGeminiFinishReasonToOpenAI(geminiReason?: unknown): string {
|
||||
if (!geminiReason) return 'stop';
|
||||
|
||||
switch (geminiReason) {
|
||||
case FinishReason.STOP:
|
||||
case 'STOP':
|
||||
case 1: // FinishReason.STOP
|
||||
return 'stop';
|
||||
case FinishReason.MAX_TOKENS:
|
||||
case 'MAX_TOKENS':
|
||||
case 2: // FinishReason.MAX_TOKENS
|
||||
return 'length';
|
||||
case FinishReason.SAFETY:
|
||||
case 'SAFETY':
|
||||
case 3: // FinishReason.SAFETY
|
||||
return 'content_filter';
|
||||
case 'RECITATION':
|
||||
case 4: // FinishReason.RECITATION
|
||||
return 'content_filter';
|
||||
case 'OTHER':
|
||||
case 5: // FinishReason.OTHER
|
||||
return 'stop';
|
||||
default:
|
||||
if (geminiReason === ('RECITATION' as FinishReason)) {
|
||||
return 'content_filter';
|
||||
}
|
||||
return 'stop';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type { GenerateContentParameters } from '@google/genai';
|
||||
import { EnhancedErrorHandler } from './errorHandler.js';
|
||||
import type { RequestContext } from './errorHandler.js';
|
||||
import type { RequestContext } from './telemetryService.js';
|
||||
|
||||
describe('EnhancedErrorHandler', () => {
|
||||
let errorHandler: EnhancedErrorHandler;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user